diff --git "a/1556.jsonl" "b/1556.jsonl" new file mode 100644--- /dev/null +++ "b/1556.jsonl" @@ -0,0 +1,665 @@ +{"seq_id":"450723679","text":"\"\"\" api.tests.test_profile\n\n This module implements a unit test to test the API's profile-handling\n logic.\n\"\"\"\nimport os.path\n\nimport simplejson as json\n\nfrom identityAPI.api.lib.apiTestCase import APITestCase\n\nfrom identityAPI.api.models import *\n\n#############################################################################\n\nclass ProfileTest(APITestCase):\n \"\"\" The unit test to test the API's profile-handling logic.\n \"\"\"\n def test_profile(self):\n \"\"\" Test the API's profile-handling logic.\n \"\"\"\n # Calculate a random username that isn't currently in use.\n\n username = self.random_username()\n\n # Open the photo file we want to use for the user's profile. Note that\n # we have to supply this to the API client as an open file object.\n\n photo_file = file(os.path.join(os.path.dirname(__file__),\n \"data\", \"photo.jpg\"), \"rb\")\n\n # Ask the API to create the new user, including profile information.\n\n response = self.client.post(\"/identity/create\",\n {'username' : username,\n 'password' : \"password\",\n 'photo' : photo_file,\n 'name' : \"Test\",\n 'email' : \"test@test.com\"})\n photo_file.close()\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n\n # Now, without logging in, ask the API for the user's public profile.\n\n response = self.client.post(\"/identity/get_profile\",\n {'username' : username})\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(response['Content-Type'], \"application/json\")\n profile = json.loads(response.content)\n\n # Finally, check that the expected user profile was returned.\n\n self.assertItemsEqual(profile.keys(), [\"photo_url_48x48\",\n \"photo_url_72x72\",\n \"photo_url_128x128\",\n \"name\",\n \"email\"])\n\n self.assertEqual(profile['name'], \"Test\")\n self.assertEqual(profile['email'], \"test@test.com\")\n\n","sub_path":"identityAPI/api/tests/test_profile.py","file_name":"test_profile.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316083210","text":"# pragma pylint: disable=attribute-defined-outside-init\n\n\"\"\"\nThis module load custom objects\n\"\"\"\nimport importlib.util\nimport inspect\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union\n\nfrom finrl.exceptions import OperationalException\n\nlogger = logging.getLogger(__name__)\n\n\nclass IResolver:\n \"\"\"\n This class contains all the logic to load custom classes\n \"\"\"\n # Childclasses need to override this\n object_type: Type[Any]\n object_type_str: str\n user_subdir: Optional[str] = None\n initial_search_path: Optional[Path]\n\n @classmethod\n def build_search_paths(cls, config: Dict[str, Any], user_subdir: Optional[str] = None,\n extra_dir: Optional[str] = None) -> List[Path]:\n\n abs_paths: List[Path] = []\n if cls.initial_search_path:\n abs_paths.append(cls.initial_search_path)\n\n if user_subdir:\n abs_paths.insert(0, config['user_data_dir'].joinpath(user_subdir))\n\n if extra_dir:\n # Add extra directory to the top of the search paths\n abs_paths.insert(0, Path(extra_dir).resolve())\n\n return abs_paths\n\n \n @classmethod\n def _get_valid_object(cls, module_path: Path, object_name: Optional[str],\n enum_failed: bool = False) -> Iterator[Any]:\n \"\"\"\n Generator returning objects with matching object_type and object_name in the path given.\n :param module_path: absolute path to the module\n :param object_name: Class name of the object\n :param enum_failed: If True, will return None for modules which fail.\n Otherwise, failing modules are skipped.\n :return: generator containing tuple of matching objects\n Tuple format: [Object, source]\n \"\"\"\n # Generate spec based on absolute path\n # Pass object_name as first argument to have logging print a reasonable name.\n spec = importlib.util.spec_from_file_location(object_name or \"\", str(module_path))\n module = importlib.util.module_from_spec(spec)\n try:\n spec.loader.exec_module(module) # type: ignore # importlib does not use typehints\n except (ModuleNotFoundError, SyntaxError, ImportError) as err:\n # Catch errors in case a specific module is not installed\n logger.warning(f\"Could not import {module_path} due to '{err}'\")\n if enum_failed:\n return iter([None])\n\n valid_objects_gen = ((obj, inspect.getsource(module)) for name, obj in inspect.getmembers(module, \n inspect.isclass) if ((object_name is None or object_name == name)\n and issubclass(obj, cls.object_type)\n and obj is not cls.object_type)\n )\n return valid_objects_gen\n\n \n @classmethod\n def _search_object(cls, directory: Path, *, object_name: str, \n add_source: bool = False) -> Union[Tuple[Any, Path], Tuple[None, None]]:\n \"\"\"\n Search for the objectname in the given directory\n :param directory: relative or absolute directory path\n :param object_name: ClassName of the object to load\n :return: object class\n \"\"\"\n logger.debug(f\"Searching for {cls.object_type.__name__} {object_name} in '{directory}'\")\n for entry in directory.iterdir():\n # Only consider python files\n if not str(entry).endswith('.py'):\n logger.debug('Ignoring %s', entry)\n continue\n module_path = entry.resolve()\n\n obj = next(cls._get_valid_object(module_path, object_name), None)\n if obj:\n obj[0].__file__ = str(entry)\n if add_source:\n obj[0].__source__ = obj[1]\n return (obj[0], module_path)\n return (None, None)\n\n \n @classmethod\n def _load_object(cls, paths: List[Path], *, object_name: str, add_source: bool = False,\n kwargs: dict = {}) -> Optional[Any]:\n \"\"\"\n Try to load object from path list.\n \"\"\"\n for _path in paths:\n try:\n (module, module_path) = cls._search_object(directory=_path, object_name=object_name, add_source=add_source)\n if module:\n logger.info(f\"Using resolved {cls.object_type.__name__.lower()[1:]} {object_name} \"\n f\"from '{module_path}'...\")\n return module(**kwargs)\n except FileNotFoundError:\n logger.warning('Path \"%s\" does not exist.', _path.resolve())\n return None\n\n \n @classmethod\n def load_object(cls, object_name: str, config: dict, *, kwargs: dict,\n extra_dir: Optional[str] = None) -> Any:\n \"\"\"\n Search and loads the specified object as configured in hte child class.\n :param objectname: name of the module to import\n :param config: configuration dictionary\n :param extra_dir: additional directory to search for the given pairlist\n :raises: OperationalException if the class is invalid or does not exist.\n :return: Object instance or None\n \"\"\"\n abs_paths = cls.build_search_paths(config, user_subdir=cls.user_subdir, extra_dir=extra_dir)\n found_object = cls._load_object(paths=abs_paths, object_name=object_name, kwargs=kwargs)\n if found_object:\n return found_object\n raise OperationalException(f\"Impossible to load {cls.object_type_str} '{object_name}'. This class does not exist \"\n \"or contains Python code errors.\"\n )\n\n \n @classmethod\n def search_all_objects(cls, directory: Path,\n enum_failed: bool) -> List[Dict[str, Any]]:\n \"\"\"\n Searches a directory for valid objects\n :param directory: Path to search\n :param enum_failed: If True, will return None for modules which fail.\n Otherwise, failing modules are skipped.\n :return: List of dicts containing 'name', 'class' and 'location' entires\n \"\"\"\n logger.debug(f\"Searching for {cls.object_type.__name__} '{directory}'\")\n objects = []\n for entry in directory.iterdir():\n # Only consider python files\n if not str(entry).endswith('.py'):\n logger.debug('Ignoring %s', entry)\n continue\n module_path = entry.resolve()\n logger.debug(f\"Path {module_path}\")\n for obj in cls._get_valid_object(module_path, object_name=None, enum_failed=enum_failed):\n objects.append({'name': obj[0].__name__ if obj is not None else '',\n 'class': obj[0] if obj is not None else None,\n 'location': entry,\n })\n return objects\n","sub_path":"finrl/resolvers/iresolver.py","file_name":"iresolver.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"319033938","text":"regalo = []\r\nprecio = []\r\nentrada = input()\r\nregalos=entrada.split(\" \")\r\nlist(regalos)\r\n# controlRemoto 450.00 hamburguesa 90.00 Switch 6999.99 tarjetaRegalo 500.00 PixelCelular 15000.00\r\nr1 = []\r\nr1 = (regalos[0], regalos[1])\r\nr2 = []\r\nr2 = (regalos[2], regalos[3])\r\nr3 = []\r\nr3 = (regalos[4], regalos[5])\r\nr4 = []\r\nr4 = (regalos[6], regalos[7])\r\nr5 = []\r\nr5 = (regalos[8], regalos[9])\r\n\r\ndata = [[r1, r2, r3, r4, r5]] \r\n\r\n\r\n\r\nprint(r1,\"\\n\",r2,\"\\n\",r3,\"\\n\",r4,\"\\n\",r5)\r\nprint(data)\r\n#print(list(regalos))\r\n","sub_path":"ago-dic-2020/practicas/JoseJovannyFloresReyna/practicas/extra/practica2Ext.py","file_name":"practica2Ext.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266426521","text":"import os\nimport csv\ndirectory = '/Users/yuanshi/Desktop/pythonprojects'\nfileList = []\nfor file in os.listdir(directory):\n if file.endswith(\".csv\"):\n fileList.append(file)\n\nmyList = []\nindList = []\ncount = 0 #skip header in following files\nfor x in range(0,len(fileList)):\n with open(fileList[x],'U') as f:\n for row in f:\n if(count != 0):\n count = 0\n continue\n indList = row.split(\",\")\n myList.append(indList)\n count += 1\n\nwith open('result.csv','w') as f:\n a = csv.writer(f)\n a.writerows(myList)\n","sub_path":"MergeCSVFiles.py","file_name":"MergeCSVFiles.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335155724","text":"\"\"\"\nExercise #2: Shopping cart\n\"\"\"\n\nfrom flask import Flask, render_template, request, redirect, url_for, flash, abort, session\n\napp = Flask(__name__)\napp.debug = True # only for development!\napp.secret_key = \"any random string\"\n\n\nclass ShoppingCart:\n \"\"\"Class representing a shopping cart.\"\"\"\n\n def __init__(self, contents=dict()):\n \"\"\"Initializes a shopping cart with content (if provided).\"\"\"\n self.__cart = contents\n\n def add(self, product_id, qt):\n \"\"\"Adds a product to the shopping cart or increases its quantity if it's already there.\"\"\"\n self.__cart[product_id] = self.__cart.get(product_id, 0) + qt\n\n def set(self, product_id, qt):\n \"\"\"Sets a product quantity.\"\"\"\n self.__cart[product_id] = qt\n\n def remove(self, product_id):\n \"\"\"Removes a product from the shopping cart.\"\"\"\n self.__cart.pop(product_id)\n\n def contains(self, product_id):\n \"\"\"Checks if the cart contains a given product.\"\"\"\n return product_id in self.__cart\n\n def contents(self):\n \"\"\"Returns the contents of the cart as a dict.\"\"\"\n return self.__cart\n\n\n@app.route(\"/\")\ndef index():\n cart = ShoppingCart(session.get(\"cart\", dict()))\n return render_template(\"page.html\", cart=cart.contents())\n\n\n@app.route(\"/add\", methods=[\"POST\"])\ndef add():\n product_id = request.form.get(\"product_id\", None)\n qt = int(request.form.get(\"qt\", 0))\n\n if product_id and qt:\n cart = ShoppingCart(session.get(\"cart\", dict()))\n cart.add(product_id, qt)\n session[\"cart\"] = cart.contents()\n flash(\"Product added to cart\")\n else:\n abort(400)\n\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/remove\", methods=[\"GET\"])\ndef remove():\n product_id = request.args.get(\"product_id\", None)\n if product_id:\n cart = ShoppingCart(session.get(\"cart\", dict()))\n if cart.contains(product_id):\n cart.remove(product_id)\n session[\"cart\"] = cart.contents()\n flash(\"Product removed cart\")\n else: # trying to remove a product which is not in the cart\n abort(400)\n else:\n abort(400)\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/mod\", methods=[\"POST\"])\ndef mod():\n product_id = request.form.get(\"product_id\", None)\n qt = int(request.form.get(\"qt\", 0))\n\n if product_id and qt:\n cart = ShoppingCart(session.get(\"cart\", dict()))\n cart.set(product_id, qt)\n session[\"cart\"] = cart.contents()\n flash(\"Quantity modified\")\n else:\n abort(400)\n\n return redirect(url_for(\"index\"))\n\n\n@app.errorhandler(400)\ndef bad_request(error):\n return render_template(\"400.html\"), 400\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"solutions/python/flask4/ex_2b/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"440161233","text":"from pathlib import Path\n\nimport ics\nimport os\nimport jinja2\nimport tomlkit\nimport copy\n\nimport election\n\nos.makedirs(\"site\", exist_ok=True)\n\nenv = jinja2.Environment(loader=jinja2.FileSystemLoader(\"templates\"))\n\nstates = {}\n\nstate_index = env.get_template(\"state/index.html.jinja\")\ncounty_index = env.get_template(\"state/county/index.html.jinja\")\n\nspecific_feed_name = \"{} Election Dates by electioncal.us\"\nall_feed_name = \"All Election Dates in {} by electioncal.us\"\n\n# Load per-state data. fn for filename which is also the lower cased version of the state or county.\ndbdir = Path(\"states/\")\n\nfor state in dbdir.glob(\"*/info.toml\"):\n state_info = dict(tomlkit.loads(state.read_text()))\n state_info[\"lower_name\"] = state.parent.name\n state_info[\"counties\"] = {}\n states[state.parent.name] = state_info\n\nfor county in dbdir.glob(\"*/*/info.toml\"):\n county_info = dict(tomlkit.loads(county.read_text()))\n county_info[\"lower_name\"] = county.parent.name\n state = county.parent.parent.name\n states[state][\"counties\"][county.parent.name] = county_info\n\n\ndef add_prefix(dates, *, states=None, counties=None):\n result = copy.deepcopy(dates)\n for date in result:\n if date[\"state\"] and states:\n date[\"name\"] = states[date[\"state\"]][\"name\"] + \" \" + date[\"name\"]\n elif date[\"county\"] and counties:\n date[\"name\"] = counties[date[\"county\"]][\"name\"] + \" \" + date[\"name\"]\n return result\n\n\nfor state_lower in states:\n state_info = states[state_lower]\n\n all_state_dates = [\n d for d in election.dates if d[\"state\"] is None or d[\"state\"] == state_lower\n ]\n\n # Load per-county data.\n counties = state_info[\"counties\"]\n for county_lower in counties:\n county_info = counties[county_lower]\n os.makedirs(f\"site/en/{state_lower}/{county_lower}\", exist_ok=True)\n county_dates = [\n d\n for d in all_state_dates\n if d[\"county\"] is None or d[\"county\"] == county_lower\n ]\n county_data = {\n \"language\": \"en\",\n \"state\": state_info,\n \"county\": dict(county_info),\n \"dates\": county_dates,\n }\n ics.generate(county_dates, f\"site/en/{state_lower}/{county_lower}/voter.ics\")\n county_index.stream(county_data).dump(\n f\"site/en/{state_lower}/{county_lower}/index.html\"\n )\n\n county_list = list(counties.values())\n county_list.sort(key=lambda x: x[\"lower_name\"])\n os.makedirs(f\"site/en/{state_lower}\", exist_ok=True)\n state_dates = [d for d in all_state_dates if d[\"county\"] is None]\n state_dates = add_prefix(state_dates, counties=counties)\n state_data = {\n \"language\": \"en\",\n \"state\": state_info,\n \"counties\": county_list,\n \"dates\": state_dates,\n }\n ics.generate(\n state_dates,\n f\"site/en/{state_lower}/voter.ics\",\n name=specific_feed_name.format(state_info[\"name\"]),\n )\n ics.generate(\n all_state_dates,\n f\"site/en/{state_lower}/all-voter.ics\",\n name=all_feed_name.format(state_info[\"name\"]),\n )\n state_index.stream(state_data).dump(f\"site/en/{state_lower}/index.html\")\n\nstate_list = list(states.values())\nstate_list.sort(key=lambda x: x[\"lower_name\"])\n\n# Render the index.\ntop_level = env.get_template(\"index.html.jinja\")\n\nfederal_dates = [d for d in election.dates if d[\"state\"] is None]\ntop = {\"language\": \"en\", \"states\": state_list, \"dates\": federal_dates}\nics.generate(federal_dates, f\"site/en/voter.ics\",\n name=all_feed_name.format(\"United States\"),)\nnational_dates = add_prefix(election.dates, states=states)\nics.generate(national_dates, f\"site/en/all-voter.ics\",\n name=all_feed_name.format(\"United States\"),)\ntop_level.stream(top).dump(\"site/index.html\")\n","sub_path":"scripts/build_site.py","file_name":"build_site.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"272945780","text":"#首先不确定面试允不允许这样调库。。。\n#要说这道题的话个人感觉需要明白两点:1.什么是异位词(排序之后相同的)2.要用什么数据结构保存比较好\n#具体到语言:python字典的Key不能用list要用tuple\n#https://leetcode-cn.com/problems/group-anagrams/solution/49-zi-mu-yi-wei-ci-fen-zu-jie-yong-zi-dian-by-alex/\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n if not strs:\n return []\n tmp = {}\n for each in strs:\n k = tuple(sorted(each))\n tmp[k] = tmp.get(k,[])+[each]\n \n return list(tmp.values())","sub_path":"Week2/Group Anagrams.py","file_name":"Group Anagrams.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590979049","text":"from TwitterAPI import TwitterAPI\nimport re\n\nimport config\n\ndef get_twitter(conf):\n \"\"\" Read the config_file and construct an instance of TwitterAPI.\n Args:\n conf ... A config object name with Twitter credentials\n Returns:\n An instance of TwitterAPI.\n \"\"\"\n twitter = TwitterAPI(\n conf.CONSUMER_KEY,\n conf.CONSUMER_SECRET,\n conf.ACCESS_TOKEN,\n conf.ACCESS_TOKEN_SECRET\n )\n return twitter\n\ndef show_config(conf):\n conf = getattr(config, conf)\n print('CONSUMER_KEY=%(CONSUMER_KEY)s\\nCONSUMER_SECRET=%(CONSUMER_SECRET)s\\nACCESS_TOKEN=%(ACCESS_TOKEN)s\\nACCESS_TOKEN_SECRET=%(ACCESS_TOKEN_SECRET)s\\n' % conf.__dict__)\n return\n\ndef robust_request(twitter, resource, params, timing, max_tries=5):\n \"\"\" If a Twitter request fails, sleep for 15 minutes.\n Do this at most max_tries times before quitting.\n Args:\n twitter .... A TwitterAPI object.\n resource ... A resource string to request.\n params ..... A parameter dictionary for the request.\n max_tries .. The maximum number of tries to attempt.\n Returns:\n A TwitterResponse object, or None if failed.\n \"\"\"\n while True:\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n timing.wait_nextwindow()\n return None\n\n","sub_path":"collector/twitter_wrapper.py","file_name":"twitter_wrapper.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"427740619","text":"import string\r\nimport re\r\n\r\nfrom collections import Counter\r\nimport collections\r\nimport sys \r\n\r\ndef first():\r\n infile = open(\"PID count check.txt\", \"r+\")\r\n outfile = open('output.txt', \"w+\")\r\n for line in infile:\r\n words = line.split()\r\n x = words[1]\r\n print(x)\r\n outfile.write(x)\r\n outfile.write(\"\\n\")\r\n print (\"\\n\",\"*********End of PID List***************\") \r\n outfile.close()\r\n\r\ndef second():\r\n lines_seen = set() # holds lines already seen\r\n outfile = open('output.txt', \"r+\").readlines()\r\n ffile = open('result.txt', \"w+\")\r\n\r\n for line in outfile:\r\n #print (line)\r\n if line not in lines_seen: # not a duplicate\r\n ffile.write(line)\r\n lines_seen.add(line)\r\n ffile.close()\r\n for line in open('result.txt', \"r\"):\r\n #print (line)\r\n print(\"\\n\", line)\r\n print (\"\\n\",\"********End of Unique Count***************\")\r\ndef third():\r\n import collections\r\n outfile = open (\"output.txt\", \"r\").readlines()\r\n ffile = open('result.txt', \"w+\")\r\n with open('output.txt') as infile:\r\n counts = collections.Counter(l.strip() for l in infile)\r\n for line, count in counts.most_common():\r\n print (\"\\n\", line, \" - \", count,\"times appeared\")\r\n print (\"\\n\", \"*****End status for Counts of Unique Value********\") \r\n ffile.close()\r\nstdoutOrigin=sys.stdout \r\nsys.stdout = open(\"log.txt\", \"w\")\r\nfirst()\r\nsecond()\r\nthird()\r\n \r\n","sub_path":"PID.py","file_name":"PID.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149813090","text":"from collections import namedtuple\n\nfrom .abstract import IterSeek, Direction\n\n\nclass CursorProxy(IterSeek):\n\n def __init__(self, db, res, cursor, db_name, direction=Direction.F):\n self.db = db\n self.res = res\n self.cursor = cursor\n self.db_name = db_name\n self.direction = direction\n self.can_set = None\n\n self._dupkey = None\n self.seeked = None\n\n self.dupsort = res.db[db_name].flags(res.txn)['dupsort']\n\n def __next__(self):\n if self.dupsort and self.dupkey is None:\n raise RuntimeError(\"dupkey must be set before iteration\")\n\n seeked = self.seeked\n if seeked is None:\n if self.direction is Direction.F:\n if self.dupsort:\n found = self.cursor.first_dup()\n else:\n found = self.cursor.first()\n else:\n if self.dupsort:\n found = self.cursor.last_dup()\n else:\n found = self.cursor.last()\n self.seeked = False\n elif seeked is False:\n if self.direction is Direction.F:\n if self.dupsort:\n found = self.cursor.next_dup()\n else:\n found = self.cursor.next()\n else:\n if self.dupsort:\n found = self.cursor.prev_dup()\n else:\n found = self.cursor.prev()\n else:\n if self.can_set is False:\n raise StopIteration\n\n found = None\n self.seeked = False\n\n if found is False:\n raise StopIteration\n else:\n raw_key, raw_value = self.cursor.item()\n if self.dupsort:\n if raw_value is b'':\n raise StopIteration\n return self._from_value(raw_value)\n else:\n if raw_key is b'':\n raise StopIteration\n return self._from_key(raw_key)\n\n\n @property\n def dupkey(self):\n return self._dupkey\n\n @dupkey.setter\n def dupkey(self, value):\n self._dupkey = value\n if not self.cursor.set_key(self._to_key(value)):\n raise ValueError(\"key not found\")\n\n def seek(self, key):\n self.seeked = key\n if self.dupkey is None:\n self.can_set = self.cursor.set_key(self._to_key(key))\n if not self.can_set:\n self.can_set = self.cursor.set_range(self._to_key(key))\n if self.direction == Direction.B:\n if not self.can_set:\n self.can_set = self.cursor.last()\n else:\n self.cursor.prev()\n else:\n self.can_set = self.cursor.set_key_dup(self._to_key(self.dupkey),\n self._to_value(key))\n if not self.can_set:\n self.can_set = self.cursor.set_range_dup(\n self._to_key(self.dupkey),\n self._to_value(key))\n if self.direction == Direction.B:\n if not self.can_set:\n self.can_set = self.cursor.last_dup()\n else:\n self.cursor.prev_dup()\n\n def _to_key(self, data):\n return self.db.K.db_value(data)\n\n def _from_key(self, data):\n return self.db.K.python_value(data)\n\n def _to_value(self, data):\n return self.db.V.db_value(data)\n\n def _from_value(self, data):\n return self.db.V.python_value(data)\n\n def get(self, key, default=None):\n raw = self.cursor.get(self._to_key(key), default=None)\n if raw is None:\n return default\n else:\n return self._from_value(raw)\n\n def first(self):\n return self.cursor.first()\n\n def last(self):\n return self.cursor.last()\n\n def next(self):\n return self.cursor.next()\n\n def prev(self):\n return self.cursor.prev()\n\n def put(self, key, value, **kwargs):\n return self.cursor.put(self._to_key(key),\n self._to_value(value),\n **kwargs)\n\n def putmulti(self, items, **kwargs):\n def _translate_items():\n for key, value in items:\n yield (self._to_key(key), self._to_value(value))\n\n return self.cursor.putmulti(_translate_items(), **kwargs)\n\n def pop(self, key):\n res = self.cursor.pop(self._to_key(key))\n if res is None:\n return None\n else:\n return self._from_value(res)\n\n def _iterate(self, iterator, keys, values):\n if keys and values:\n for raw_key, raw_value in iterator:\n yield (self._from_key(raw_key),\n self._from_value(raw_value))\n elif keys:\n for raw_key in iterator:\n yield self._from_key(raw_key)\n elif values:\n for raw_value in iterator:\n yield self._from_value(raw_value)\n else:\n raise ValueError(\"keys and/or values must be true\")\n\n def iternext(self, keys=True, values=True):\n return self._iterate(self.cursor.iternext(keys, values), keys, values)\n\n def iterprev(self, keys=True, values=True):\n return self._iterate(self.cursor.iterprev(keys, values), keys, values)\n\n def item(self):\n raw_key, raw_value = self.cursor.item()\n return (self._from_key(raw_key), self._from_value(raw_value))\n\n def delete2(self, dupdata=False):\n return self.cursor.delete(dupdata=dupdata)\n\n def delete(self, key, value=None):\n db_handler = self.db.get_db_handler(self.res, self.db_name)\n if value is not None:\n return self.res.txn.delete(self._to_key(key),\n value=self._to_value(value),\n db=db_handler)\n else:\n return self.res.txn.delete(self._to_key(key), db=db_handler)\n\n def set_range(self, key):\n return self.cursor.set_range(self._to_key(key))\n","sub_path":"binlog/cursor.py","file_name":"cursor.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"463297856","text":"from django.shortcuts import render, reverse\nfrom django.http import HttpResponseRedirect\nfrom random import choice\nfrom .models import Eye, Nose, Mouth\n\n\ndef index(request):\n eyes = list(Eye.objects.all())\n noses = list(Nose.objects.all())\n mouths = list(Mouth.objects.all())\n emoticon = str(choice(eyes)) + str(choice(noses)) + str(choice(mouths))\n context = {'emoticon_template_var': emoticon}\n return render(request, 'emo_app/index.html', context)\n\n\ndef add(request):\n print('*' * 80)\n print('dictionary:'.ljust(15), request.POST)\n print('*' * 80)\n print('raw:'.ljust(15), request.body)\n print('*' * 80)\n data = request.POST\n face_parts = {\n 'eye': Eye,\n 'nose': Nose,\n 'mouth': Mouth,\n }\n new_part_class = face_parts[data['type']]\n new_part = new_part_class(char=data['new'])\n new_part.save()\n return HttpResponseRedirect(reverse('emo_paths:index_path'))\n","sub_path":"Assignments/Devan/4 Django/emoticon_django/emo_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"562834352","text":"# File: Bot.py\r\n# Description: The bots main logic and entry point\r\n\r\nimport Config\r\nimport Utilities\r\nimport OpOperations\r\nimport socket\r\nimport re\r\nimport thread\r\nimport CommandHandler\r\nfrom time import sleep\r\n\r\n\r\ndef main():\r\n s = socket.socket()\r\n s.connect((Config.HOST, Config.PORT))\r\n s.send(\"PASS {}\\r\\n\".format(Config.PASS).encode(\"utf-8\"))\r\n s.send(\"NICK {}\\r\\n\".format(Config.NICK).encode(\"utf-8\"))\r\n s.send(\"JOIN #{}\\r\\n\".format(Config.CHAN).encode(\"utf-8\"))\r\n\r\n chat_message = re.compile(r\"^:\\w+!\\w+@\\w+\\.tmi\\.twitch\\.tv PRIVMSG #\\w+ :\")\r\n Utilities.chat(s, \"SpotBot has started running!\")\r\n\r\n thread.start_new_thread(OpOperations.thread_fill_op_list, ())\r\n\r\n while True:\r\n response = s.recv(1024).decode(\"utf-8\")\r\n if response == \"PING :tmi.twitch.tv\\r\\n\":\r\n s.send(\"PONG :tmi.twitch.tv\\r\\n\".encode(\"utf-8\"))\r\n else:\r\n user = re.search(r\"\\w+\", response).group(0)\r\n message = chat_message.sub(\"\", response)\r\n message = message.strip()\r\n CommandHandler.handle_command(user, message, s)\r\n print(response)\r\n sleep(1)\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555338766","text":"#\n# Copyright 2012 eNovance \n# Copyright 2012 Red Hat, Inc\n# Copyright 2014 Cisco Systems, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport collections\n\nfrom oslo_log import log\n\nfrom ceilometer.compute import pollsters\nfrom ceilometer import sample\n\nLOG = log.getLogger(__name__)\n\n\nclass AggregateDiskPollster(pollsters.GenericComputePollster):\n inspector_method = \"inspect_disks\"\n\n def aggregate_method(self, result):\n fields = list(result[0]._fields)\n fields.remove(\"device\")\n agg_stats = collections.defaultdict(int)\n devices = []\n for stats in result:\n devices.append(stats.device)\n for f in fields:\n agg_stats[f] += getattr(stats, f)\n kwargs = dict(agg_stats)\n kwargs[\"device\"] = devices\n return [result[0].__class__(**kwargs)]\n\n @staticmethod\n def get_additional_metadata(instance, stats):\n return {'device': stats.device}\n\n\nclass PerDeviceDiskPollster(pollsters.GenericComputePollster):\n inspector_method = \"inspect_disks\"\n\n @staticmethod\n def get_resource_id(instance, stats):\n return \"%s-%s\" % (instance.id, stats.device)\n\n @staticmethod\n def get_additional_metadata(instance, stats):\n return {'disk_name': stats.device}\n\n\nclass ReadRequestsPollster(AggregateDiskPollster):\n sample_name = 'disk.read.requests'\n sample_unit = 'request'\n sample_type = sample.TYPE_CUMULATIVE\n sample_stats_key = 'read_requests'\n\n\nclass PerDeviceReadRequestsPollster(PerDeviceDiskPollster):\n sample_name = 'disk.device.read.requests'\n sample_unit = 'request'\n sample_type = sample.TYPE_CUMULATIVE\n sample_stats_key = 'read_requests'\n\n\nclass ReadBytesPollster(AggregateDiskPollster):\n sample_name = 'disk.read.bytes'\n sample_unit = 'B'\n sample_type = sample.TYPE_CUMULATIVE\n sample_stats_key = 'read_bytes'\n\n\nclass PerDeviceReadBytesPollster(PerDeviceDiskPollster):\n sample_name = 'disk.device.read.bytes'\n sample_unit = 'B'\n sample_type = sample.TYPE_CUMULATIVE\n sample_stats_key = 'read_bytes'\n\n\nclass WriteRequestsPollster(AggregateDiskPollster):\n sample_name = 'disk.write.requests'\n sample_unit = 'request'\n sample_type = sample.TYPE_CUMULATIVE\n sample_stats_key = 'write_requests'\n\n\nclass PerDeviceWriteRequestsPollster(PerDeviceDiskPollster):\n sample_name = 'disk.device.write.requests'\n sample_unit = 'request'\n sample_type = sample.TYPE_CUMULATIVE\n sample_stats_key = 'write_requests'\n\n\nclass WriteBytesPollster(AggregateDiskPollster):\n sample_name = 'disk.write.bytes'\n sample_unit = 'B'\n sample_type = sample.TYPE_CUMULATIVE\n sample_stats_key = 'write_bytes'\n\n\nclass PerDeviceWriteBytesPollster(PerDeviceDiskPollster):\n sample_name = 'disk.device.write.bytes'\n sample_unit = 'B'\n sample_type = sample.TYPE_CUMULATIVE\n sample_stats_key = 'write_bytes'\n\n\nclass ReadBytesRatePollster(AggregateDiskPollster):\n inspector_method = \"inspect_disk_rates\"\n sample_name = 'disk.read.bytes.rate'\n sample_unit = 'B/s'\n sample_stats_key = 'read_bytes_rate'\n\n\nclass PerDeviceReadBytesRatePollster(PerDeviceDiskPollster):\n inspector_method = \"inspect_disk_rates\"\n sample_name = 'disk.device.read.bytes.rate'\n sample_unit = 'B/s'\n sample_stats_key = 'read_bytes_rate'\n\n\nclass ReadRequestsRatePollster(AggregateDiskPollster):\n inspector_method = \"inspect_disk_rates\"\n sample_name = 'disk.read.requests.rate'\n sample_unit = 'request/s'\n sample_stats_key = 'read_requests_rate'\n\n\nclass PerDeviceReadRequestsRatePollster(PerDeviceDiskPollster):\n inspector_method = \"inspect_disk_rates\"\n sample_name = 'disk.device.read.requests.rate'\n sample_unit = 'request/s'\n sample_stats_key = 'read_requests_rate'\n\n\nclass WriteBytesRatePollster(AggregateDiskPollster):\n inspector_method = \"inspect_disk_rates\"\n sample_name = 'disk.write.bytes.rate'\n sample_unit = 'B/s'\n sample_stats_key = 'write_bytes_rate'\n\n\nclass PerDeviceWriteBytesRatePollster(PerDeviceDiskPollster):\n inspector_method = \"inspect_disk_rates\"\n sample_name = 'disk.device.write.bytes.rate'\n sample_unit = 'B/s'\n sample_stats_key = 'write_bytes_rate'\n\n\nclass WriteRequestsRatePollster(AggregateDiskPollster):\n inspector_method = \"inspect_disk_rates\"\n sample_name = 'disk.write.requests.rate'\n sample_unit = 'request/s'\n sample_stats_key = 'write_requests_rate'\n\n\nclass PerDeviceWriteRequestsRatePollster(PerDeviceDiskPollster):\n inspector_method = \"inspect_disk_rates\"\n sample_name = 'disk.device.write.requests.rate'\n sample_unit = 'request/s'\n sample_stats_key = 'write_requests_rate'\n\n\nclass DiskLatencyPollster(AggregateDiskPollster):\n inspector_method = 'inspect_disk_latency'\n sample_name = 'disk.latency'\n sample_unit = 'ms'\n sample_stats_key = 'disk_latency'\n\n\nclass PerDeviceDiskLatencyPollster(PerDeviceDiskPollster):\n inspector_method = 'inspect_disk_latency'\n sample_name = 'disk.device.latency'\n sample_unit = 'ms'\n sample_stats_key = 'disk_latency'\n\n\nclass DiskIOPSPollster(AggregateDiskPollster):\n inspector_method = 'inspect_disk_iops'\n sample_name = 'disk.iops'\n sample_unit = 'count/s'\n sample_stats_key = 'iops_count'\n\n\nclass PerDeviceDiskIOPSPollster(PerDeviceDiskPollster):\n inspector_method = 'inspect_disk_iops'\n sample_name = 'disk.device.iops'\n sample_unit = 'count/s'\n sample_stats_key = 'iops_count'\n\n\nclass CapacityPollster(AggregateDiskPollster):\n inspector_method = 'inspect_disk_info'\n sample_name = 'disk.capacity'\n sample_unit = 'B'\n sample_stats_key = 'capacity'\n\n\nclass PerDeviceCapacityPollster(PerDeviceDiskPollster):\n inspector_method = 'inspect_disk_info'\n sample_name = 'disk.device.capacity'\n sample_unit = 'B'\n sample_stats_key = 'capacity'\n\n\nclass AllocationPollster(AggregateDiskPollster):\n inspector_method = 'inspect_disk_info'\n sample_name = 'disk.allocation'\n sample_unit = 'B'\n sample_stats_key = 'allocation'\n\n\nclass PerDeviceAllocationPollster(PerDeviceDiskPollster):\n inspector_method = 'inspect_disk_info'\n sample_name = 'disk.device.allocation'\n sample_unit = 'B'\n sample_stats_key = 'allocation'\n\n\nclass PhysicalPollster(AggregateDiskPollster):\n inspector_method = 'inspect_disk_info'\n sample_name = 'disk.usage'\n sample_unit = 'B'\n sample_stats_key = 'physical'\n\n\nclass PerDevicePhysicalPollster(PerDeviceDiskPollster):\n inspector_method = 'inspect_disk_info'\n sample_name = 'disk.device.usage'\n sample_unit = 'B'\n sample_stats_key = 'physical'\n","sub_path":"ceilometer/compute/pollsters/disk.py","file_name":"disk.py","file_ext":"py","file_size_in_byte":7123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"158497235","text":"import collections\n\n\ndef _599_findRestaurant(self, list1, list2):\n \"\"\"\n :type list1: List[str]\n :type list2: List[str]\n :rtype: List[str]\n \"\"\"\n dict1 = {s: i for i, s in enumerate(list1)}\n minSum = len(list1) + len(list2)\n ans = []\n\n for i, s in enumerate(list2):\n if s in dict1:\n currentSum = i + dict1[s]\n if minSum > currentSum:\n ans = [s]\n minSum = currentSum\n elif minSum == currentSum:\n ans.append(s)\n return ans\n\n\ndef _594_findLHS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n\n cnums = collections.Counter(nums)\n snums = sorted(cnums.items(), key=lambda x: x[0], reverse=True)\n\n ans = 0\n for item in range(1, len(snums)):\n\n if abs(snums[item - 1][0] - snums[item][0]) == 1:\n ans = max(ans, snums[item][1] + snums[item - 1][1])\n\n return ans\n\n\ndef _1_twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n for i in range(len(nums)):\n for j in range(i, len(nums)):\n if (nums[i] + nums[j]) == target:\n if i != j:\n return [i, j]\n\n\ndef _575_distributeCandies(self, candies):\n \"\"\"\n :type candies: List[int]\n :rtype: int\n \"\"\"\n n = len(candies)\n candies = collections.Counter(candies)\n x = len(candies)\n if x >= n / 2:\n return n / 2\n else:\n return x\n\n\ndef _500_findWords(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: List[str]\n \"\"\"\n\n first = ('q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p')\n second = ('a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l')\n third = ('z', 'x', 'c', 'b', 'v', 'n', 'm')\n\n ans = []\n\n for word in words:\n temp = word\n word = word.lower()\n if word:\n group = first\n if word[0] in second:\n group = second\n elif word[0] in third:\n group = third\n\n flag = 1\n for s in word:\n if s not in group:\n flag = 0\n break\n if flag:\n ans.append(temp)\n return ans\n\n\ndef _409_longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n size = len(s)\n\n if s == s[::-1]: return size\n\n s = collections.Counter(s)\n flag = 0\n cnt = 0\n\n for key, val in s.items():\n cnt += val\n if val % 2 == 1:\n cnt -= 1\n flag = 1\n\n return cnt + flag\n\n\ndef _136_singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n res = nums[0]\n for i in nums[1:]:\n res = res ^ i\n return res\n\n\ndef _290_wordPattern(self, pattern, str1):\n \"\"\"\n :type pattern: str\n :type str: str\n :rtype: bool\n \"\"\"\n str1 = list(str1.split(' '))\n pattern = list(pattern)\n dict1, dict2 = {}, {}\n\n if len(str1) != len(pattern):\n return False\n\n for i in range(len(str1)):\n if not dict1.has_key(str1[i]) and not dict2.has_key(pattern[i]):\n dict1[str1[i]] = pattern[i]\n dict2[pattern[i]] = str1[i]\n elif dict1.get(str1[i]) != pattern[i] or dict2.get(pattern[i]) != str1[i]:\n return False\n\n return True\n\n\ndef _204_countPrimes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n <= 1:\n return 0\n\n isPrime = [True] * n\n isPrime[0], isPrime[1] = False, False\n x = 2\n\n while x * x < n:\n if isPrime[x]:\n p = x * x\n while p < n:\n isPrime[p] = False\n p += x\n x += 1\n\n return sum(isPrime)\n\n\ndef _202_isHappy(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n s = set()\n\n while n != 1:\n t = 0\n while n:\n t += (n % 10) ** 2\n n = n / 10\n\n if t in s:\n break\n else:\n s.add(t)\n n = t\n\n return n == 1\n\n\ndef _349_intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n return list(set(nums1) & set(nums2))\n\n\ndef _645_findErrorNums(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n if not nums:\n return [0, 0]\n\n n = len(nums)\n\n dup = sum(nums) - sum(set(nums))\n miss = n * (n + 1) / 2 - (sum(nums) - dup)\n return [dup, miss]\n\n\ndef _350_intersect(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n if not nums1 or not nums2:\n return []\n\n dic1 = collections.Counter(nums1)\n res = []\n\n for i in range(len(nums2)):\n if dic1[nums2[i]] > 0:\n res.append(nums2[i])\n dic1[nums2[i]] -= 1\n return res\n\n\ndef _463_islandPerimeter(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if not grid:\n return 0\n h, w = len(grid), len(grid[0])\n ans = 0\n for i in range(h):\n for j in range(w):\n if grid[i][j] == 1:\n if (j == 0 or grid[i][j - 1] == 0):\n ans += 1\n if (i == 0 or grid[i - 1][j] == 0):\n ans += 1\n if (j == w - 1 or grid[i][j + 1] == 0):\n ans += 1\n if (i == h - 1 or grid[i + 1][j] == 0):\n ans += 1\n return ans\n\n\n\ndef _447_numberOfBoomerangs(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: int\n \"\"\"\n res = 0\n for x1, y1 in points:\n d = collections.defaultdict(int)\n for x2, y2 in points:\n d[(x1 - x2) ** 2 + (y1 - y2) ** 2] += 1\n\n for item in d:\n res += d[item] * (d[item] - 1)\n return res\n\n\ndef _347_topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n dic = collections.Counter(nums)\n return [x[0] for x in dic.most_common(k)]\n\n\ndef _299_getHint(self, secret, guess):\n \"\"\"\n :type secret: str\n :type guess: str\n :rtype: str\n \"\"\"\n bulls, cows, digits = 0, 0, [0] * 10\n\n for i in range(len(guess)):\n if guess[i] == secret[i]:\n bulls += 1\n else:\n digits[int(secret[i])] += 1\n\n for i in range(len(guess)):\n if guess[i] != secret[i] and digits[int(guess[i])] != 0:\n cows += 1\n digits[int(guess[i])] -= 1\n\n return str(bulls) + 'A' + str(cows) + 'B'","sub_path":"leet_hush_table.py","file_name":"leet_hush_table.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"630946339","text":"from .serializers import (RegionListSerializer, RegionNameSerializer,\n CityNameSerializer, CityListSerializer,\n PDKSerializer)\nfrom .models import Region, City, PDK\nfrom rest_framework.generics import (ListCreateAPIView, ListAPIView,\n RetrieveUpdateDestroyAPIView)\nfrom django.shortcuts import render\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\nclass RegionNameList(ListAPIView):\n queryset = Region.objects.only('id', 'name')\n serializer_class = RegionNameSerializer\n\n\nclass RegionList(ListCreateAPIView):\n serializer_class = RegionListSerializer\n\n def get_queryset(self):\n queryset = Region.objects.all()\n id_region = self.request.query_params.get('region', None)\n if id_region is not None:\n queryset = queryset.filter(id=id_region)\n return queryset\n\n\nclass RegionDetail(RetrieveUpdateDestroyAPIView):\n queryset = Region.objects.all()\n serializer_class = RegionListSerializer\n\n\nclass CityNameList(ListAPIView):\n queryset = City.objects.only('id', 'name')\n serializer_class = CityNameSerializer\n\n\nclass CityList(ListCreateAPIView):\n queryset = City.objects.all()\n serializer_class = CityListSerializer\n\n\nclass CityDetail(RetrieveUpdateDestroyAPIView):\n queryset = City.objects.all()\n serializer_class = CityListSerializer\n\n\nclass PDKList(ListAPIView):\n queryset = PDK.objects.all()\n serializer_class = PDKSerializer\n","sub_path":"service_ecol_data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"43830951","text":"class Solution:\n def sortArrayByParity(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: List[int]\n \"\"\"\n from collections import deque\n queue = deque();\n for i in A:\n if i%2==0: queue.appendleft(i);\n else: queue.append(i);\n return list(queue);","sub_path":"Leetcode/Others/python/arrays/905_SortArrayByParity.py","file_name":"905_SortArrayByParity.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"83885345","text":"#\n# GeoAPI - Java interfaces for OGC/ISO standards\n# http://www.geoapi.org\n#\n# Copyright (C) 2018-2019 Open Geospatial Consortium, Inc.\n# All Rights Reserved. http://www.opengeospatial.org/ogc/legal\n#\n# Permission to use, copy, and modify this software and its documentation, with\n# or without modification, for any purpose and without fee or royalty is hereby\n# granted, provided that you include the following on ALL copies of the software\n# and documentation or portions thereof, including modifications, that you make:\n#\n# 1. The full text of this NOTICE in a location viewable to users of the\n# redistributed or derivative work.\n# 2. Notice of any changes or modifications to the OGC files, including the\n# date changes were made.\n#\n# THIS SOFTWARE AND DOCUMENTATION IS PROVIDED \"AS IS,\" AND COPYRIGHT HOLDERS MAKE\n# NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\n# TO, WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT\n# THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY THIRD PARTY\n# PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.\n#\n# COPYRIGHT HOLDERS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL OR\n# CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE OF THE SOFTWARE OR DOCUMENTATION.\n#\n# The name and trademarks of copyright holders may NOT be used in advertising or\n# publicity pertaining to the software without specific, written prior permission.\n# Title to copyright in this software and any associated documentation will at all\n# times remain with copyright holders.\n#\n\n##########################################################\n# Python Wheel build script\n# Author: Johann Sorel (Geomatys)\n#\n# Usage: python3 build.py sdist\n##########################################################\n\nimport setuptools\nimport glob\nimport shutil\nimport os\n\n\ndef createInit(path):\n f = open(path+\"/__init__.py\", \"w+\")\n f.close()\n\n\nshutil.rmtree('dist/', ignore_errors=True)\nshutil.rmtree('temp/', ignore_errors=True)\nshutil.rmtree('opengis/', ignore_errors=True)\n\n##########################################################\n# Copy python files located in other modules.\n##########################################################\n\n# Create java library folder\ndirectory = 'opengis/'\nif not os.path.exists(directory):\n os.makedirs(directory)\n\n# API\ndir = 'opengis/metadata/'\nos.makedirs(dir)\nfiles = glob.glob('../geoapi/src/main/python/opengis/metadata/*.py')\nfor f in files:\n shutil.copy2(f, dir)\n\n# Java bridge\ndir = 'opengis/bridge/java/'\nos.makedirs(dir)\nfiles = glob.glob('../geoapi-java-python/src/main/python/opengis/bridge/java/*.py')\nfor f in files:\n shutil.copy2(f, dir)\n\n# GDAL bridge\ndir = 'opengis/wrapper/'\nos.makedirs(dir)\nfiles = glob.glob('../geoapi-gdal/src/main/python/opengis/wrapper/*.py')\nfor f in files:\n shutil.copy2(f, dir)\n\n# __init__.py in each folder to include\ncreateInit(\"opengis\")\ncreateInit(\"opengis/bridge\")\ncreateInit(\"opengis/bridge/java\")\n\n\n##########################################################\n# Generate pip package.\n##########################################################\n\nimport setup\n\n##########################################################\n# Cleaning, setup tool makes a lot of side files.\n##########################################################\nshutil.rmtree(\"opengis.egg-info\", ignore_errors=True)\nshutil.rmtree(\"__pycache__\", ignore_errors=True)\nshutil.rmtree(\"opengis\", ignore_errors=True)\n","sub_path":"geoapi-python-wheel/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248688482","text":"from datetime import datetime, timedelta\n\nimport pandas as pd\nfrom driver_repo import driver, driver_stats_fv\n\nfrom feast import FeatureStore\n\n\ndef main():\n pd.set_option(\"display.max_columns\", None)\n pd.set_option(\"display.width\", 1000)\n\n # Load the feature store from the current path\n fs = FeatureStore(repo_path=\".\")\n\n # Deploy the feature store to AWS\n print(\"Deploying feature store to AWS...\")\n fs.apply([driver, driver_stats_fv])\n\n # Select features\n features = [\"driver_hourly_stats:conv_rate\", \"driver_hourly_stats:acc_rate\"]\n\n # Create an entity dataframe. This is the dataframe that will be enriched with historical features\n entity_df = pd.DataFrame(\n {\n \"event_timestamp\": [\n pd.Timestamp(dt, unit=\"ms\", tz=\"UTC\").round(\"ms\")\n for dt in pd.date_range(\n start=datetime.now() - timedelta(days=3),\n end=datetime.now(),\n periods=3,\n )\n ],\n \"driver_id\": [1001, 1002, 1003],\n }\n )\n\n print(\"Retrieving training data...\")\n\n # Retrieve historical features by joining the entity dataframe to the Redshift table source\n training_df = fs.get_historical_features(\n features=features, entity_df=entity_df\n ).to_df()\n\n print()\n print(training_df)\n\n print()\n print(\"Loading features into the online store...\")\n fs.materialize_incremental(end_date=datetime.now())\n\n print()\n print(\"Retrieving online features...\")\n\n # Retrieve features from the online store (Firestore)\n online_features = fs.get_online_features(\n features=features, entity_rows=[{\"driver_id\": 1001}, {\"driver_id\": 1002}],\n ).to_dict()\n\n print()\n print(pd.DataFrame.from_dict(online_features))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sdk/python/feast/templates/aws/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"259377186","text":"import pygame\nfrom pygame.locals import *\n\nfrom bounds import check\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"media/small.png\")\n self.rect = self.image.get_rect()\n self.image = pygame.transform.scale(self.image, (int(self.rect.w/2), int(self.rect.h/2)))\n self.rect = self.image.get_rect()\n self.jumping = False\n self.falling = False\n\n def tick(self):\n keys = pygame.key.get_pressed()\n if keys[K_RIGHT] and check(self.rect.x + 5, self.rect.y):\n self.rect.x += 5\n if keys[K_LEFT] and check(self.rect.x - 5, self.rect.y):\n self.rect.x -= 5\n if keys[K_UP] and not self.jumping and not self.falling:\n self.jumping = True\n self.jumpsLeft = 25\n\n if self.jumping:\n if self.jumpsLeft == 0 or not check(self.rect.x, self.rect.y - 5):\n self.jumping = False\n self.falling = True\n else:\n self.rect.y -= 5\n self.jumpsLeft -= 1\n\n if not self.jumping and check(self.rect.x, self.rect.y + 5): # gravity\n self.rect.y += 5\n self.falling = True\n\n if not self.jumping and not check(self.rect.x, self.rect.y + 5):\n self.falling = False\n","sub_path":"killer-queen/one-player/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"480632478","text":"import datetime\n\nimport attr\nfrom marshmallow.fields import Field\n\nfrom korbenware.dbus.service import Service # noqa\nfrom korbenware.dbus.marshmallow.fields import (\n DBusField, Bytes, Bool, Int16, UInt16, Int32, UInt32, Int64, UInt64,\n Double, Str, ObjectPath, Signature, List, Tuple, Nested, Variant,\n DateTime\n)\nfrom korbenware.dbus.marshmallow.schema import DBUS_FIELD, DBUS_NESTED\n\n__all__ = [\n 'Field', 'Service', 'DBusField', 'Bytes', 'Bool', 'Int16', 'UInt16',\n 'Int32', 'UInt32', 'Int64', 'UInt64', 'Double', 'Str', 'ObjectPath',\n 'Signature', 'List', 'Tuple', 'Nested', 'Variant', 'DateTime',\n 'DBUS_FIELD', 'DBUS_NESTED', 'dbus_attr'\n]\n\n\nSENSIBLE_DEFAULTS = {\n Bool: False,\n Int16: -1,\n UInt16: 0,\n Int32: -1,\n UInt32: 0,\n Int64: -1,\n UInt64: 0,\n Double: -1,\n Str: '',\n List: attr.Factory(list),\n Tuple: attr.Factory(tuple),\n DateTime: datetime.datetime.fromtimestamp(0)\n}\n\n\ndef dbus_attr(field=None, metadata=None, default=None, **kwargs):\n if not default:\n default = SENSIBLE_DEFAULTS.get(field, attr.Factory(field))\n\n metadata = metadata or dict()\n\n if isinstance(field, Field):\n metadata[DBUS_FIELD] = field\n else:\n metadata[DBUS_NESTED] = field\n\n return attr.ib(metadata=metadata, default=default, **kwargs)\n","sub_path":"korbenware/dbus/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"515012396","text":"import csv\nimport random\ndef read_graph(filename):\n tsv = csv.reader(open(filename), delimiter='\\t')\n G = {}\n actors = set()\n for (actor, movie, year) in tsv:\n make_link(G, actor, movie + '(' + year + ')')\n actors.add(actor)\n return G, actors\ndef centrality(G, v):\n marked_node={}\n marked_node[v]=True\n distance_to_node={}\n distance_to_node[v]=0\n open_list=[v]\n while len(open_list)>0:\n current=open_list.pop(0)\n for neighbor in G[current].keys():\n if neighbor not in marked_node:\n marked_node[neighbor]=True\n open_list.append(neighbor)\n distance_to_node[neighbor]=distance_to_node[current]+1\n \n return (float(sum(distance_to_node.values())))/len(distance_to_node) \ndef make_link(G, node1, node2):\n if node1 not in G:\n G[node1] = {}\n (G[node1])[node2] = 1\n if node2 not in G:\n G[node2] = {}\n (G[node2])[node1] = 1\n return G\ndef compute_centralities(G, actors):\n C = {}\n for actor in actors:\n C[actor] = centrality(G, actor)\n return C\n\ndef partition(C, v):\n smaller={}\n bigger={}\n middle={}\n for val in C.keys():\n if C[val] < C[v]:\n smaller[val]=C[val]\n \n if C[val] > C[v]:\n bigger[val]=C[val]\n if C[val]==C[v]:\n middle[val]=C[val] \n return smaller,middle,bigger\n\ndef Kth_element(C,k):\n v=random.choice(C.keys())\n (left,middle,right)=partition(C, v)\n if len(left)>=k:\n return Kth_element(left,k) \n elif len(left)+len(middle)>=k: return v, C[v]\n \n \n return Kth_element(right,k-len(left)-len(middle))\n \nactors_graph, actors = read_graph(\"imdb-1.tsv\")\ncentrality_of_actors=compute_centralities(actors_graph, actors)\nprint (Kth_element(centrality_of_actors,20))\n\n","sub_path":"actor centrality.py","file_name":"actor centrality.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426569512","text":"# -*- coding: utf-8 -*-\n\nimport sys\n\nif sys.version_info[0] > 2:\n from urllib import request as urllib2\n import http.cookiejar as cookiejar\nelse:\n import urllib2\n import cookielib as cookiejar\n\nimport urllib\nimport os\nimport logging as l\nimport random\nimport time\n\nfrom rod import exception as exc\nfrom rod.util import Color\nfrom rod.parser import Parser\nfrom rod.config import GlobalConf as Cf\n\n\nclass Automator:\n \"\"\"Automator is a class to crawl a website and execute some actions.\"\"\"\n\n def __init__(self, cj_filename, user_conf=None):\n \"\"\"Initialize the automator.\n\n :param user_conf: filename for configuration\n :param cj_filename: filename for cookiejar\n :returns: None\n \"\"\"\n\n Cf()\n if user_conf is not None:\n if not os.path.exists(user_conf):\n raise exc.FileNotFoundError(\"configuration file not found.\")\n else:\n Cf.update(user_conf)\n\n if not os.path.exists(cj_filename):\n raise exc.FileNotFoundError(\"cookie-jar file not found.\")\n\n self._parser = Parser()\n\n self._cj_file = cj_filename\n self._cj = cookiejar.LWPCookieJar()\n if os.path.isfile(self._cj_file):\n self._cj.load(self._cj_file)\n\n cookie_proc = urllib2.HTTPCookieProcessor(self._cj)\n self.opener = urllib2.build_opener(cookie_proc)\n\n self._header = {\n \"Accept-Language\": Cf.get('user.lang'),\n \"Accept\": Cf.get('user.mime'),\n \"Accept-Charset\": Cf.get('user.charset'),\n \"User-Agent\": Cf.get('user.ua'),\n }\n\n self.url_prefix = Cf.get('service.url_prefix')\n self.path_prefix = self.url_prefix + self.__get_prefix()\n Cf.set('service.path_prefix', self.path_prefix)\n\n def __get_prefix(self):\n req = urllib2.Request(self.url_prefix, None, self._header)\n res = self.opener.open(req)\n conts = res.read()\n return self._parser.parse('init', conts)\n\n def get_info(self):\n conts = self._get('mypage')\n return self._parser.parse('mypage', conts)\n\n def get_mission_status(self):\n conts = self._get('mission')\n return self._parser.parse('mission_check', conts)\n\n def do_recent_mission(self):\n conts = self._get('mission')\n res = self._parser.parse('mission_check', conts)\n\n if res['info']['st_flag']:\n l.debug('post req')\n self._post(res['req_data'], res['req_action'])\n\n return res\n\n def list_players(self):\n conts = self._get('battle')\n user_id, user_hash, form = self._parser.parse('battle', conts)\n if len(user_hash.keys()) <= 3:\n res = self._post(form['req_data'], form['action'])\n user_id, user_hash, form = self._parser.parse('battle', res)\n print(user_hash)\n\n def loop(self, display):\n init_info = self.get_info()\n display.show_info(init_info)\n\n sleep_base = int(Cf.get('automator.sleep_base'))\n sleep_rand = int(Cf.get('automator.sleep_rand'))\n interval = int(Cf.get('automator.interval'))\n iv_range = int(Cf.get('automator.interval_range'))\n time.sleep(sleep_base + random.randrange(sleep_rand))\n\n st_flag = True\n if Cf.get('automator.event_priority') == '0':\n # mission\n status = self.do_recent_mission()\n st_flag = status['info']['st_flag']\n display.show_mission_status(status, action='do_mission')\n time.sleep(sleep_base + random.randrange(sleep_rand))\n\n info = self.get_info()\n\n # battle\n if info['power_cur'] == info['power_max']:\n l.info(\"%sbattle%s\" % (Color.YELLOW, Color.END_COLOR))\n self.battle()\n time.sleep(sleep_base + random.randrange(sleep_rand))\n after_info = self.get_info()\n gain = after_info['energy'] - info['energy']\n l.info(\" - gain:%d\" % gain)\n\n display.show_info(after_info)\n display.show_guild_info(self.get_guild_info())\n\n # investment\n time.sleep(sleep_base + random.randrange(sleep_rand))\n if info['energy'] > int(Cf.get('automator.investment_threshold')):\n l.info(' - investment 1k')\n self.get_investment1k()\n\n if not st_flag:\n interval_sec = interval + random.randrange(iv_range)\n time.sleep(interval_sec)\n else:\n l.warn('Not implemented: event_priority > 0')\n time.sleep(600)\n\n def battle(self):\n conts = self._get('battle')\n user_id, user_hash, form = self._parser.parse('battle', conts)\n if len(user_hash.keys()) <= 10:\n # change gear\n conts = self._post(form['req_data'], form['action'])\n user_id, user_hash, form = self._parser.parse('battle', conts)\n\n conts = self._get('battle%2Fcheck%2F' + str(user_id) + '%2F0')\n req_data, form_action = self._parser.parse('battle_check', conts)\n\n self._post(req_data, form_action)\n\n def get_guild_info(self):\n conts = self._get('guild%2Fjoinindex')\n res = self._parser.parse('guild', conts)\n\n return res\n\n def get_investment1k(self):\n conts = self._get('guild%2Fchkinvestment%2F1000')\n req_data, form_action = self._parser.parse('investment1k', conts)\n res = self._post(req_data, form_action)\n return res\n\n def run(self, name, display):\n if name == 'quest_info':\n display.show_mission_status(self.get_mission_status())\n elif name == 'battle':\n self.list_players()\n elif name == 'quest':\n display.show_mission_status(self.do_recent_mission(),\n action='do_mission')\n display.show_mission_status(self.get_mission_status())\n elif name == 'info':\n display.show_info(self.get_info())\n elif name == 'guild':\n display.show_guild_info(self.get_guild_info())\n elif name == 'investment1k':\n self.get_investment1k()\n elif name == 'loop':\n while True:\n self.loop(display)\n\n def _post(self, param, full_url):\n req = urllib2.Request(full_url, urllib.urlencode(param), self._header)\n res = self.opener.open(req)\n conts = res.read()\n return conts\n\n def _get(self, page):\n url = self.path_prefix + page\n req = urllib2.Request(url, None, self._header)\n res = self.opener.open(req)\n conts = res.read()\n return conts\n","sub_path":"rod/automator.py","file_name":"automator.py","file_ext":"py","file_size_in_byte":6676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"207075947","text":"def solution(n):\n answer = 0\n \n threeEx = ''\n while n > 0:\n if n % 3 == 0: threeEx += '0'\n else: threeEx += str(n % 3)\n n = n//3\n \n count = 1\n for value in reversed(threeEx):\n answer += int(value) * count\n print(answer)\n count*=3\n \n return answer","sub_path":"Level1/16.3진법뒤집기.py","file_name":"16.3진법뒤집기.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"406957294","text":"#define function\ndef extend(parameter,filename):\n with open(filename,'r',encoding='UTF-8') as fy:\n Htext=fy.readlines()\n code_num=0\n blank_num=0\n annotate_num=0\n for content in Htext:\n content = content.strip()\n # 统计空行\n if content == '':\n blank_num += 1\n # 统计注释行\n elif content.startswith('#'):\n annotate_num += 1\n # 统计代码行\n else:\n code_num += 1\n if parameter=='k':\n return blank_num\n elif parameter=='z':\n return annotate_num\n elif parameter=='d':\n return code_num\nparameter=input(\"parameter is:\")\nprint\nfilename=input(\"filename is:\")\nprint\na=extend(parameter,filename)\nprint(a)\nparameter=input(\"parameter is:\")\nprint\nfilename=input(\"filename is:\")\nprint\nb=extend(parameter,filename)\nprint(b)\nparameter=input(\"parameter is:\")\nprint\nfilename=input(\"filename is:\")\nprint\nc=extend(parameter,filename)\nprint(c)\n","sub_path":"v03.py","file_name":"v03.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"231991120","text":"#!/usr/bin/python\n# -*- encoding: utf-8; py-indent-offst: 4 -*-\n\ngroup = \"agents/\" + _(\"Agent Plugins\")\n\nregister_rule(group,\n \"agent_config:hyperv_new\",\n DropdownChoice(\n title = _(\"HyperV Cluster Plugins (Windows)\"),\n help = _('This plugin checks the status of HyperV Cluster'),\n choices = [\n ( True, _(\"Deploy plugin for HyperV Cluster plugin\") ),\n ( None, _(\"Do not deploy plugin for HyperV Cluster plugin\") ),\n ]\n )\n)\n\n","sub_path":"checks/hyperv_cluster/1.x/web/plugins/wato/hyperv_new_deploy.py","file_name":"hyperv_new_deploy.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"217352412","text":"# 辅助函数:计算列表中所有区间所覆盖的时间范围\ndef support(x: list) -> int:\n tmp = []\n for i in x:\n tmp += i\n left, right = min(tmp), max(tmp)\n count = 0\n for i in range(left, right):\n for j in x:\n if i in range(j[0], j[1]):\n count += 1\n break\n return count\n\n\nn = int(input())\ndata = []\nfor i in range(n):\n data.append(list(map(int, input().split(' '))))\nresult = 0\nfor i in data:\n tmp = data[:]\n tmp.remove(i)\n if support(tmp) > result:\n result = support(tmp)\nprint(result)\n","sub_path":"Code/CodeRecords/2554/60632/257500.py","file_name":"257500.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"64729325","text":"import time\t#debug\nfrom math import log\nimport random\ndata = {}\ndef readfile(file) :\n\tf = open(file, \"r\")\n\tfor line in f :\n\t\t# parsing lines with whitespaces\n\t\tligne = line.split(\" \")\n\t\t# stocking the first element of the line\n\t\tclasse=int(ligne[0])\n\t\tdel(ligne[0])\n\t\t# first time that we find this class\n\t\tif classe not in data.keys() :\n\t\t\tdata[classe] = []\t# dictionary (key : value) = (class : list of docs)\n\t\tdata[classe].append({})\t# doc = dictionary(word : nb of occurrences)\n\t\tfor element in ligne :\n\t\t\tcouple = element.split(\":\")\n\t\t\tif (element != '\\n' ) :\n\t\t\t\tdata[classe][-1][int(couple[0])] = int(couple[1])\t\t# add in last\n\n\n\t#for each word detected through all the docs, listing the number of docs containing it.\n\twordpresence = {}\t#dictionary(class : dictionary(word : nb of docs containing it))\n\tnbdocs = {}\t\t\t#number of documents in each class\n\tbaseapprentissage = [] #Liste des mots dans le texte\n\tonfaitbazap=True\n\twordtest=[]\n\tfor k, v in data.items() :\n\t\trng=random.randint(1,100)\n\t\tif rng < 31 : \n\t\t\twordtest.append((k,[]))\n\t\t\tonfaitbazap=False\n\t\telse : \n\t\t\twordpresence[k] = {}\n\t\t\tonfaitbazap=True\n\t\tfor elem in v :\n\t\t\tfor k2 in elem.keys() :\n\t\t\t\t#print(k2, \"\\n\")\n\t\t\t\tif onfaitbazap :\t\t\t\t\t\t\t\n\t\t\t\t\tif k2 not in baseapprentissage :\t\t# if the current word is not known yet\n\t\t\t\t\t\tbaseapprentissage.append(k2)\n\t\t\t\t\tif k2 not in (wordpresence[k]).keys() :\t\n\t\t\t\t\t\t(wordpresence[k])[k2] = 1\n\t\t\t\t\t\tif k not in nbdocs :\n\t\t\t\t\t\t\tnbdocs[k] = 3\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tnbdocs[k] += 1\n\t\t\t\t\telse :\n\t\t\t\t\t\t(wordpresence[k])[k2] += 1\n\t\t\t\telse :\n\t\t\t\t\tif k2 not in wordtest[-1][1] :\n\t\t\t\t\t\twordtest[-1][1].append(k2)\n\tprint(\"WT\", wordtest)\t\t\t\t\t\n\t#for i in baseapprentissage :\n\t#\tprint(i)\n\t#for i,j in wordpresence.items() :\n\t#\tprint(i)\n\t#\tprint(j)\n\t#print(nbdocs)\n\treturn wordpresence,nbdocs,baseapprentissage,wordtest\n\n\t'''\n\t#verification qu'on est pas dans le dernier élément\n\tif (element != ligne[-1]) :\n\t\t#recuperation de la clef\n\t\tprint(couple[0])\n\t\t#recup nb occur\n\t\tprint(couple[1])'''\t\n\t'''\n\t#Suppression du premier element que l'on devra avoir stocke avant. \n\tdel (ligne[0])\n\t#division de clef & nombre occurence\t\t\n\tfor element in ligne :\n\t\tcouple = element.split(\":\")\n\t\t#verification qu'on est pas dans le dernier élément\nprint(la)\nprint(pasla)\n\t\tif (element != ligne[-1]) :\n\t\t\t#recuperation de la clef\n\t\t\tprint(couple[0])\n\t\t\t#recup nb occur\n\t\t\tprint(couple[1])\n\t'''\n\n\t#return data\n\t#affichage de data\n\t'''for k, v in data.items() :\n\t\tprint(k,\"\\n\")\n\t\tfor elem in v :\t\t\n\t\t\tprint(elem,\"\\n\")\n\t'''\n\ndef probappari(dicomot,nbdocs,baza) :\n apari={}\n absence={}\n for i in dicomot :\n print(i)\n apari[i]={}\n absence[i]=(log(1/nbdocs[i]),log(1-(1/nbdocs[i])))\n print(absence[i])\n for j,k in dicomot[i].items() :\n #print(j, k)\n nbmaux=int(nbdocs[i])\n apari[i][j]=(k+1)/nbmaux\n #if j in prezdansdoc :\n #prezdansdoc.remove(j)\n #print(prezdansdoc)\n #for h in prezdansdoc :\n \n #for i,j in apari.items() : \n #print(i,j)\n\t#for i,j in baseapprentissage.items() :\n\t#\tprint(i,j)\n #for i,j in absence.items() :\n #print (i,j)\n return apari,absence\n\ndef computeProbasK(data) :\n\tnbtotaldocs = sum(len(v) for v in data.values())\t\t\t\t# total number of documents in the learning base\n\tdictprobasK = {}\n\tfor k, v in data.items() :\t\t\t\t\t\t\t\t\t\t# compute and store p(k) for each class k in the learning base\n\t\tdictprobasK[k] = len(v)/nbtotaldocs\n\treturn dictprobasK\n\n\n\n#wordintestfile,nbdocsintestfile,baseapprentissage=readfile(\"BaseReuters-29\")\nwordininputfile,nbdocsininputfile,baseapprentissage,motdanstest=readfile(\"test\")\n\nla,pasla=probappari(wordininputfile,nbdocsininputfile,baseapprentissage)\nprobasK = computeProbasK(data)\nprint(\"P : \", probasK)\n\n'''\nallwords = []\nfor k, v in wordininputfile.items() :\t\t# for each class detected in the input file\n\tfor k2, v2 in v.items() :\t\t\t\t# for each word found existing in this class\n\t\tif k2 not in allwords :\t\t\t\t# if this word hasn't been seen yet amongst previous classes\n\t\t\tallwords.append(k2)\t\t\t\t# add the word to the words already seen in the input file\n\nbigsums = []\nindex = 0\t\t\t\t\t\t\t\t\t# dictionary (class : log(p(k|current doc in testbase))\nfor doctest in motdanstest :\t\t\t\t# for each document of the test base\n\tbigsums.append({})\n\tfor k, v in data.items():\t\t\t\t# for each class k detected in the input file \n\t\ts = 0\t\t\t\t\t\t\t\t# initialise log(p|k) to zero\n\t\tfor word in allwords :\t\t\t\t# for each word amongst all the different words in the learning base\n\t\t\tif word in doctest[1] :\t\t\t# if the word of the learning base exists in the current document of the test base\n\t\t\t\ts += pasla[word][0]\t\t# add log(p(word|k)) to log(p|k)\n\t\t\telse :\t\t\t\t\t\t\t# else\n\t\t\t\ts += pasla[word][1]\t\t# add the log of the opposite probability 1-p(word|k) to log(p|k)\n\t\tbigsums[-1] = s + log(probasK[k])\t# store dictionary(k : log(p(k|doc))\n\nprint(bigsums)\n'''\n\n","sub_path":"projet3.py","file_name":"projet3.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"196957313","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Main module.\"\"\"\n\nimport json\nimport re\nimport requests\nfrom distutils.version import LooseVersion\nimport click\n\n\ndef scanFile(composer_obj, verbose, token):\n\n headers = {\n \"Authorization\": \"Token token={}\".format(token)\n }\n\n globalFound = 0\n\n for package in composer_obj['packages']:\n if re.match(\"wordpress-[plugin|theme]\", package[\"type\"]):\n _type = package[\"type\"].split(\"-\")[-1]\n name = package[\"name\"].split(\"/\")[-1]\n version = package['version']\n\n try:\n r = requests.get(\"https://wpvulndb.com/api/v3/{}/{}\".format(\n _type+\"s\",\n name\n ), headers=headers)\n r.raise_for_status()\n except Exception:\n if r.status_code == 404:\n if verbose:\n click.secho(\"{} not found on WPVulnDB\".format(name), fg=\"yellow\")\n click.echo()\n continue\n else:\n click.secho(\"API request for {} failed\".format(name), fg=\"yellow\")\n click.echo(r.status_code)\n click.echo()\n continue\n\n if len(json.loads(r.text)[name]['vulnerabilities']) > 0:\n found = 0\n titlePrint = False\n for v in json.loads(r.text)[name]['vulnerabilities']:\n try:\n if not LooseVersion(version) >= LooseVersion(v[\"fixed_in\"]):\n if not titlePrint:\n click.echo(\"{} - {} - {}\".format(name, version, _type))\n titlePrint = True\n click.secho(\"VULNERABILITY FOUND!!!\", fg=\"red\")\n click.echo(\"{}\".format(v[\"title\"]))\n click.echo(\"https://wpvulndb.com/vulnerabilities/{}\".format(v[\"id\"]))\n # set found to 1 so we can exit\n found = 1\n except TypeError:\n if not titlePrint:\n click.echo(\"{} - {} - {}\".format(name, version, _type))\n titlePrint = True\n click.secho(\"Cannot properly compare versions {} (from composer.lock) to {} for https://wpvulndb.com/vulnerabilities/{}\".format(version, v[\"fixed_in\"], v[\"id\"]))\n if not found:\n if verbose:\n click.echo(\"{} - {} - {}\".format(name, version, _type))\n click.secho(\"{} {} has reported vulnerabilities, but they are all fixed in version {}\".format(_type, name, version), fg=\"green\")\n click.echo()\n else:\n globalFound = 1\n click.echo()\n\n else:\n if verbose:\n click.echo(\"{} - {} - {}\".format(name, version, _type))\n click.secho(\"No vulnerabilities found for {} {}\".format(_type, name), fg=\"green\")\n click.echo()\n\n return globalFound\n","sub_path":"composer_scan/composer_scan.py","file_name":"composer_scan.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"273258442","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(db_index=True, max_length=100, default='')),\n ('slug', models.CharField(db_index=True, max_length=100, default='')),\n ('index', models.IntegerField(default=0)),\n ('is_dropdown', models.BooleanField(default=False)),\n ('parent', models.ForeignKey(to='blog.Category')),\n ],\n options={\n 'verbose_name': 'Post Category',\n 'ordering': ['index'],\n 'verbose_name_plural': 'Post Categories',\n },\n ),\n migrations.CreateModel(\n name='Entry',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=100, unique=True)),\n ('slug', models.SlugField(max_length=100, unique=True)),\n ('body_md', models.TextField(default='')),\n ('body_html', models.TextField(blank=True, default='')),\n ('cover', models.CharField(blank=True, max_length=100, default='')),\n ('excerpt', models.TextField(blank=True, default='')),\n ('is_published', models.BooleanField(db_index=True, default=True)),\n ('auto_cover', models.BooleanField(db_index=True, default=True)),\n ('auto_excerpt', models.BooleanField(db_index=True, default=True)),\n ('created_date', models.DateTimeField(auto_now_add=True, db_index=True)),\n ('last_modified', models.DateTimeField(db_index=True, auto_now=True)),\n ('category', models.ForeignKey(to='blog.Category')),\n ],\n options={\n 'verbose_name': 'Blog Entry',\n 'ordering': ['-created_date'],\n 'verbose_name_plural': 'Blog Entries',\n },\n ),\n ]\n","sub_path":"blog/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"97674996","text":"\r\ndef minimumCost(cost, persons):\r\n \r\n cost = sorted(cost)\r\n totalCost = 0\r\n\r\n for i in range(persons - 1, 1, -2):\r\n if i == 2:\r\n totalCost += cost[2] + cost[0]\r\n else:\r\n \r\n price_first = cost[i] + cost[0] + 2 * cost[1]\r\n price_second = cost[i] + cost[i - 1] + 2 * cost[0]\r\n totalCost += min(price_first, price_second)\r\n\r\n \r\n if persons == 1:\r\n totalCost += cost[0]\r\n else:\r\n totalCost += cost[1]\r\n\r\n return totalCost\r\n\r\ntest_cases = int(input())\r\nfor test_case in range(test_cases):\r\n persons = int(input())\r\n cost = list(int(num) for num in input(\" \").strip().split())[:persons]\r\n print(minimumCost(cost, persons))\r\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"375523614","text":"# Bot written to play the game using what seems to be the best strategy according to the authors. \n\nimport MLModifiedSpaceShooter as game\nimport numpy as np\n\n\ndef playGame():\n game_state = game.GameState()\n todo = 0\n counter = 0\n\n while True:\n if counter > 12:\n counter = 0\n if todo == 0:\n todo = 1\n else:\n todo = 0\n counter = counter + 1\n # shoot\n action = np.zeros(4)\n action[2] = 1\n game_state.frame_step(action)\n\n # shoot\n action = np.zeros(4)\n action[2] = 1\n game_state.frame_step(action)\n\n # move\n action = np.zeros(4)\n action[todo] = 1\n game_state.frame_step(action)\n\n # shoot\n action = np.zeros(4)\n action[2] = 1\n game_state.frame_step(action)\n\n # shoot\n action = np.zeros(4)\n action[2] = 1\n game_state.frame_step(action)\n\n\ndef main():\n playGame()\n\nif __name__ == \"__main__\":\n main()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"320039043","text":"from prime_tools import prime_table\n\n\ndef isCalcByFunction(n):\t\n\tchecklist = [x for x in prime_table(n) if x not in (2,3,5)]\n\tfor p in checklist:\n\t\tif n%p == 0:\n\t\t\treturn False\n\treturn True\n\n\ndef test(k):\n\ti = 0\n\tvalue = 0\n\tvalues =[]\n\twhile i List[int]:\n d: Dict[int, int] = {}\n for i, num in enumerate(nums):\n if num in d:\n return [d[num], i]\n else:\n d[target-num] = i\n return []\n","sub_path":"solutions/two_sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"492768434","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 5 14:37:24 2020\n\n@author: corkep\n\"\"\"\n\nfrom spatialmath.geom3d import *\n\nimport unittest\nimport numpy.testing as nt\nimport spatialmath.base as base\n\nclass PluckerTest(unittest.TestCase):\n \n\n # Primitives\n def test_constructor1(self):\n \n # construct from 6-vector\n L = Plucker([1, 2, 3, 4, 5, 6])\n self.assertIsInstance(L, Plucker)\n nt.assert_array_almost_equal(L.v, np.r_[1, 2, 3])\n nt.assert_array_almost_equal(L.w, np.r_[4, 5, 6])\n \n # construct from object\n L2 = Plucker(L)\n self.assertIsInstance(L, Plucker)\n nt.assert_array_almost_equal(L2.v, np.r_[1, 2, 3])\n nt.assert_array_almost_equal(L2.w, np.r_[4, 5, 6])\n \n # construct from point and direction\n L = Plucker.PointDir([1, 2, 3], [4, 5, 6])\n self.assertTrue(L.contains([1, 2, 3]))\n nt.assert_array_almost_equal(L.uw, base.unitvec([4, 5, 6]))\n \n \n def test_vec(self):\n # verify double\n L = Plucker([1, 2, 3, 4, 5, 6])\n nt.assert_array_almost_equal(L.vec, np.r_[1, 2, 3, 4, 5, 6])\n \n def test_constructor2(self):\n # 2, point constructor\n P = np.r_[2, 3, 7]\n Q = np.r_[2, 1, 0]\n L = Plucker.TwoPoints(P, Q)\n nt.assert_array_almost_equal(L.w, P-Q)\n nt.assert_array_almost_equal(L.v, np.cross(P-Q, Q))\n \n # TODO, all combos of list and ndarray\n # test all possible input shapes\n # L2, = Plucker(P, Q)\n # self.assertEqual(double(L2), double(L))\n # L2, = Plucker(P, Q')\n # self.assertEqual(double(L2), double(L))\n # L2, = Plucker(P', Q')\n # self.assertEqual(double(L2), double(L))\n # L2, = Plucker(P, Q)\n # self.assertEqual(double(L2), double(L))\n \n # # planes constructor\n # P = [10, 11, 12]'; w = [1, 2, 3]\n # L = Plucker.PointDir(P, w)\n # self.assertEqual(double(L), [cross(w,P) w]'); %FAIL\n # L2, = Plucker.PointDir(P', w)\n # self.assertEqual(double(L2), double(L))\n # L2, = Plucker.PointDir(P, w')\n # self.assertEqual(double(L2), double(L))\n # L2, = Plucker.PointDir(P', w')\n # self.assertEqual(double(L2), double(L))\n \n \n def test_pp(self):\n # validate pp and ppd\n L = Plucker.TwoPoints([-1, 1, 2], [1, 1, 2])\n nt.assert_array_almost_equal(L.pp, np.r_[0, 1, 2])\n self.assertEqual(L.ppd, math.sqrt(5))\n \n # validate pp\n self.assertTrue( L.contains(L.pp) )\n \n \n def test_contains(self):\n P = [2, 3, 7]\n Q = [2, 1, 0]\n L = Plucker.TwoPoints(P, Q)\n \n # validate contains\n self.assertTrue( L.contains([2, 3, 7]) )\n self.assertTrue( L.contains([2, 1, 0]) )\n self.assertFalse( L.contains([2, 1, 4]) )\n \n \n def test_closest(self):\n P = [2, 3, 7]\n Q = [2, 1, 0]\n L = Plucker.TwoPoints(P, Q)\n \n out = L.closest_to_point(P)\n nt.assert_array_almost_equal(out.p, P)\n self.assertAlmostEqual(out.d, 0)\n \n # validate closest with given points and origin\n out = L.closest_to_point(Q)\n nt.assert_array_almost_equal(out.p, Q)\n self.assertAlmostEqual(out.d, 0)\n \n L = Plucker.TwoPoints([-1, 1, 2], [1, 1, 2])\n out = L.closest_to_point([0, 1, 2])\n nt.assert_array_almost_equal(out.p, np.r_[0, 1, 2])\n self.assertAlmostEqual(out.d, 0)\n \n out = L.closest_to_point([5, 1, 2])\n nt.assert_array_almost_equal(out.p, np.r_[5, 1, 2])\n self.assertAlmostEqual(out.d, 0)\n \n out = L.closest_to_point([0, 0, 0])\n nt.assert_array_almost_equal(out.p, L.pp)\n self.assertEqual(out.d, L.ppd)\n \n out = L.closest_to_point([5, 1, 0])\n nt.assert_array_almost_equal(out.p, [5, 1, 2])\n self.assertAlmostEqual(out.d, 2)\n \n def test_plot(self):\n \n P = [2, 3, 7]\n Q = [2, 1, 0]\n L = Plucker.TwoPoints(P, Q)\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d', proj_type='ortho')\n ax.set_xlim3d(-10, 10)\n ax.set_ylim3d(-10, 10)\n ax.set_zlim3d(-10, 10)\n \n L.plot(color='red', linewidth=2)\n \n def test_eq(self):\n w = np.r_[1, 2, 3]\n P = np.r_[-2, 4, 3]\n \n L1 = Plucker.TwoPoints(P, P + w)\n L2 = Plucker.TwoPoints(P + 2 * w, P + 5 * w)\n L3 = Plucker.TwoPoints(P + np.r_[1, 0, 0], P + w)\n \n self.assertTrue(L1 == L2)\n self.assertFalse(L1 == L3)\n \n self.assertFalse(L1 != L2)\n self.assertTrue(L1 != L3)\n \n def test_skew(self):\n \n P = [2, 3, 7]; Q = [2, 1, 0]\n L = Plucker.TwoPoints(P, Q)\n \n m = L.skew\n \n self.assertEqual(m.shape, (4,4))\n nt.assert_array_almost_equal(m + m.T, np.zeros((4,4)))\n \n def test_mtimes(self):\n P = [1, 2, 0]\n Q = [1, 2, 10] # vertical line through (1,2)\n L = Plucker.TwoPoints(P, Q)\n \n # check transformation by SE3\n \n L2 = SE3() * L\n nt.assert_array_almost_equal(L.vec, L2.vec)\n \n L2 = SE3(2, 0, 0) * L # shift line in the x direction\n nt.assert_array_almost_equal(L2.vec, np.r_[20, -30, 0, 0, 0, -10])\n L2 = SE3(0, 2, 0) * L # shift line in the y direction\n nt.assert_array_almost_equal(L2.vec, np.r_[40, -10, 0, 0, 0, -10])\n \n def test_parallel(self):\n \n L1 = Plucker.PointDir([4, 5, 6], [1, 2, 3])\n L2 = Plucker.PointDir([5, 5, 6], [1, 2, 3])\n L3 = Plucker.PointDir([4, 5, 6], [3, 2, 1])\n \n # L1, || L2, but doesnt intersect\n # L1, intersects L3\n \n self.assertTrue( L1.isparallel(L1) )\n self.assertTrue(L1 | L1)\n \n self.assertTrue( L1.isparallel(L2) )\n self.assertTrue(L1 | L2)\n self.assertTrue( L2.isparallel(L1) )\n self.assertTrue(L2 | L1)\n self.assertFalse( L1.isparallel(L3) )\n self.assertFalse(L1 | L3)\n \n \n def test_intersect(self):\n \n \n L1 = Plucker.PointDir([4, 5, 6], [1, 2, 3])\n L2 = Plucker.PointDir([5, 5, 6], [1, 2, 3])\n L3 = Plucker.PointDir( [4, 5, 6], [0, 0, 1])\n L4 = Plucker.PointDir([5, 5, 6], [1, 0, 0])\n \n # L1, || L2, but doesnt intersect\n # L3, intersects L4\n self.assertFalse( L1^L2, )\n \n self.assertTrue( L3^L4, )\n \n \n def test_commonperp(self):\n L1 = Plucker.PointDir([4, 5, 6], [0, 0, 1])\n L2 = Plucker.PointDir([6, 5, 6], [0, 1, 0])\n \n self.assertFalse( L1|L2)\n self.assertFalse( L1^L2)\n \n self.assertEqual( L1.distance(L2), 2)\n \n L = L1.commonperp(L2) # common perp intersects both lines\n \n self.assertTrue( L^L1)\n self.assertTrue( L^L2)\n \n \n def test_line(self):\n \n # mindist\n # intersect\n # char\n # intersect_volume\n # mindist\n # mtimes\n # or\n # side\n pass\n \n def test_contains(self):\n P = [2, 3, 7]\n Q = [2, 1, 0]\n L = Plucker.TwoPoints(P, Q)\n \n self.assertTrue( L.contains(L.point(0)) )\n self.assertTrue( L.contains(L.point(1)) )\n self.assertTrue( L.contains(L.point(-1)) )\n\n def test_point(self):\n P = [2, 3, 7]\n Q = [2, 1, 0]\n L = Plucker.TwoPoints(P, Q)\n \n nt.assert_array_almost_equal(L.point(0).flatten(), L.pp)\n\n for x in (-2, 0, 3):\n nt.assert_array_almost_equal(L.lam(L.point(x)), x)\n \n def test_char(self):\n P = [2, 3, 7]\n Q = [2, 1, 0]\n L = Plucker.TwoPoints(P, Q)\n \n s = str(L)\n self.assertIsInstance(s, str)\n\n\n def test_plane(self):\n \n xyplane = [0, 0, 1, 0]\n xzplane = [0, 1, 0, 0]\n L = Plucker.TwoPlanes(xyplane, xzplane) # x axis\n nt.assert_array_almost_equal(L.vec, np.r_[0, 0, 0, -1, 0, 0])\n \n L = Plucker.TwoPoints([-1, 2, 3], [1, 2, 3]); # line at y=2,z=3\n x6 = [1, 0, 0, -6] # x = 6\n \n # plane_intersect\n p, lam = L.intersect_plane(x6)\n nt.assert_array_almost_equal(p, np.r_[6, 2, 3])\n nt.assert_array_almost_equal(L.point(lam).flatten(), np.r_[6, 2, 3])\n \n\n x6s = Plane.PN(n=[1, 0, 0], p=[6, 0, 0])\n p, lam = L.intersect_plane(x6s)\n nt.assert_array_almost_equal(p, np.r_[6, 2, 3])\n \n nt.assert_array_almost_equal(L.point(lam).flatten(), np.r_[6, 2, 3])\n \n def test_methods(self):\n # intersection\n px = Plucker.TwoPoints([0, 0, 0], [1, 0, 0]); # x-axis\n py = Plucker.TwoPoints([0, 0, 0], [0, 1, 0]); # y-axis\n px1 = Plucker.TwoPoints([0, 1, 0], [1, 1, 0]); # offset x-axis\n \n self.assertEqual(px.ppd, 0)\n self.assertEqual(px1.ppd, 1)\n nt.assert_array_almost_equal(px1.pp, [0, 1, 0])\n\n px.intersects(px)\n px.intersects(py)\n px.intersects(px1)\n \n \n # def test_intersect(self):\n # px = Plucker([0, 0, 0], [1, 0, 0]); # x-axis\n # py = Plucker([0, 0, 0], [0, 1, 0]); # y-axis\n # \n # plane.d = [1, 0, 0]; plane.p = 2; # plane x=2\n # \n # px.intersect_plane(plane)\n # py.intersect_plane(plane)\n\nif __name__ == \"__main__\":\n\n unittest.main()\n","sub_path":"tests/test_geom3d.py","file_name":"test_geom3d.py","file_ext":"py","file_size_in_byte":9584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472609174","text":"import flask\nimport pytest, secrets, re, json, sys\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n\nfrom app import create_app\nfrom app.api.auth import verification_required\nfrom config import Config\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nclass TestConfig(Config):\n TESTING = True\n FLASK_APP = \"pap.py\"\n FLASK_DEBUG = 1\n FLASK_ENV = \"development\"\n MAIL_SERVER = \"smtp.googlemail.com\"\n MAIL_PORT=587\n MAIL_USE_TLS = 1\n MAIL_USERNAME = \"peerassesspromailer@gmail.com\"\n MAIL_PASSWORD = \"*********\"\n DATABASE_URL = \"mysql+pymysql://f5hnnik3dka6mr99:dkc1sz78jb0engyj@blonze2d5mrbmcgf.cbetxkdyhwsb.us-east-1.rds.amazonaws.com:3306/g66ricd94wwu8746\"\n\n\napp = create_app(TestConfig)\n\n@pytest.fixture\ndef crypto_bundle():\n def _make_bundle(key):\n backend = default_backend()\n cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=backend)\n return {\n 'backend': backend,\n 'digest': hashes.Hash(hashes.SHA256(), backend=backend),\n 'encryptor': cipher.encryptor()\n }\n return _make_bundle\n\ndef test_auth_only_url(crypto_bundle):\n test_auth_key = '34c38b9bae87cc0d0513a04452ed71f4ee9e4f70f7d63cd9698ea6f9e203df5c'\n test_url = '/api/surveys/relaunch?survey_id=xorro_2'\n\n crypto = crypto_bundle(bytes.fromhex(test_auth_key))\n\n crypto['digest'].update(test_url.encode('utf-8'))\n hashed_url = crypto['digest'].finalize()\n\n signature_bytes = crypto['encryptor'].update(hashed_url) + crypto['encryptor'].finalize()\n signature = bytes.hex(signature_bytes)\n\n with app.test_request_context(f'{test_url}&signature={signature}'):\n\n @verification_required\n def mock_fun():\n return True\n\n assert mock_fun()==True\n\n\ndef test_auth_url_json(crypto_bundle):\n test_auth_key = '34c38b9bae87cc0d0513a04452ed71f4ee9e4f70f7d63cd9698ea6f9e203df5c'\n test_url = '/api/surveys/relaunch?survey_id=xorro_2'\n test_body={\n \"testKey\": 1, \n \"anotherTestKey\": \"test_value\"\n }\n print(json.dumps(test_body))\n test_text = test_url + json.dumps(test_body)\n\n crypto = crypto_bundle(bytes.fromhex(test_auth_key))\n\n crypto['digest'].update(test_text.encode('utf-8'))\n hashed_url = crypto['digest'].finalize()\n\n signature_bytes = crypto['encryptor'].update(hashed_url) + crypto['encryptor'].finalize()\n signature = bytes.hex(signature_bytes)\n\n with app.test_request_context(f'{test_url}&signature={signature}', data=json.dumps(test_body), content_type='application/json'):\n @verification_required\n def mock_fun():\n return True\n\n assert mock_fun()==True\n\n\ndef test_auth_url_random_data(crypto_bundle):\n test_auth_key = '34c38b9bae87cc0d0513a04452ed71f4ee9e4f70f7d63cd9698ea6f9e203df5c'\n test_url = '/api/surveys/relaunch?survey_id=xorro_2'\n test_body=\"bla bla bla\".encode('utf-8')\n\n test_data = test_url.encode('utf-8') + test_body\n\n crypto = crypto_bundle(bytes.fromhex(test_auth_key))\n\n crypto['digest'].update(test_data)\n hashed_url = crypto['digest'].finalize()\n\n signature_bytes = crypto['encryptor'].update(hashed_url) + crypto['encryptor'].finalize()\n signature = bytes.hex(signature_bytes)\n\n with app.test_request_context(f'{test_url}&signature={signature}', data=test_body, content_type='text/plain'):\n @verification_required\n def mock_fun():\n return True\n\n assert mock_fun()==True","sub_path":"tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293502978","text":"import numpy as np\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import cross_validation\n\nfrom regression_classification.utilities import visualize_classifier\n\nif __name__ == '__main__':\n # Input file containing data\n input_file = 'data_multivar_nb.txt'\n\n data = np.loadtxt(input_file, delimiter=',')\n X, y = data[:, :-1], data[:, -1]\n\n # Create Naïve Bayes classifier\n classifier = GaussianNB()\n\n # Train the classifier\n classifier.fit(X, y)\n\n # Predict the values for training data\n y_pred = classifier.predict(X)\n\n # Split data into training and test data\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,\n test_size=0.2, random_state=3)\n classifier_new = GaussianNB()\n classifier_new.fit(X_train, y_train)\n y_test_pred = classifier_new.predict(X_test)\n\n # compute accuracy of the classifier\n accuracy = 100.0 * (y_test == y_test_pred).sum() / X_test.shape[0]\n print(\"Accuracy of the new classifier =\", round(accuracy, 2), \"%\")\n\n # Visualize the performance of the classifier\n visualize_classifier(classifier_new, X_test, y_test)\n\n num_folds = 3\n accuracy_values = cross_validation.cross_val_score(classifier,\n X, y, scoring='accuracy', cv=num_folds)\n print(\"Accuracy: \" + str(round(100 * accuracy_values.mean(), 2)) + \"%\")\n precision_values = cross_validation.cross_val_score(classifier,\n X, y, scoring='precision_weighted', cv=num_folds)\n print(\"Precision: \" + str(round(100 * precision_values.mean(), 2)) + \"%\")\n recall_values = cross_validation.cross_val_score(classifier,\n X, y, scoring='recall_weighted', cv=num_folds)\n print(\"Recall: \" + str(round(100 * recall_values.mean(), 2)) + \"%\")\n f1_values = cross_validation.cross_val_score(classifier,\n X, y, scoring='f1_weighted', cv=num_folds)\n print(\"F1: \" + str(round(100 * f1_values.mean(), 2)) + \"%\")\n","sub_path":"machine/regression_classification/naives_probability.py","file_name":"naives_probability.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63160294","text":"import pandas as pd\r\nimport math\r\nimport statistics\r\nfrom techstaff import TechStaff\r\n\r\n# File path to tech_labour_hours.xlsx\r\ntech_labour_hours_path = \"model_inputs/labour_reports/tech_labour_hours.xlsx\"\r\n\r\n# Read \"General Summary\" into a dataframe\r\ngen_sum_df = pd.read_excel(tech_labour_hours_path, sheet_name=\"General Summary\")\r\n\r\n# File path to staff_salaries.xlsx\r\nstaff_salaries_file_path = \"model_inputs/labour_reports/staff_salaries.xlsx\"\r\n\r\n\"\"\"\r\n########################################################################################################################\r\n##################################### MODULE SCOPE FUNCTIONS BELOW #####################################################\r\n########################################################################################################################\r\n\"\"\"\r\n\r\n\r\ndef read_tech_staff_ref():\r\n \"\"\"\r\n Reads data from \"Tech Staff\" sheet in staff_salaries.xlsx, which gives us information about the staffing level of\r\n techs at all the cost centres.\r\n\r\n :return: DataFrame with columns \"cost_centre_name\", \"health_auth\", \"function\", \"level8\", \"level9\", \"level10\",\r\n \"level12\".\r\n\r\n Note: The \"levelx\" columns contain a float indicating the number of techs of that level at the\r\n corresponding cost centre.\r\n \"\"\"\r\n\r\n tech_staff_df = pd.read_excel(staff_salaries_file_path, sheet_name=\"Tech Staff\")\r\n\r\n return tech_staff_df\r\n\r\n\r\ndef read_tech_vac_summary():\r\n \"\"\"\r\n Reads into a dict the \"level\" and \"avg_vac\" columns from \"Vacation Summary\" sheet in tech_labour_hours.xlsx.\r\n\r\n :return: Dict in the following form:\r\n Key: level (int: 8, 9, 10, 12)\r\n Value: avg_vac (int)\r\n \"\"\"\r\n\r\n vac_sum_df = pd.read_excel(tech_labour_hours_path, sheet_name=\"Vacation Summary\")\r\n annual_vac_days_by_level_dict = vac_sum_df.set_index(\"level\")[\"avg_vac\"].to_dict()\r\n\r\n return annual_vac_days_by_level_dict\r\n\r\n\r\ndef read_hours_paid_per_year():\r\n \"\"\"\r\n Reads in the number of hours for which techs get paid in a year from \"General Summary\" sheet of\r\n tech_labour_hours.xlsx.\r\n\r\n :return: Float representing number of hours for which a tech is paid in a year.\r\n \"\"\"\r\n\r\n hours_paid_per_year = gen_sum_df[\"hours_paid_per_year\"]\r\n\r\n return hours_paid_per_year\r\n\r\n\r\ndef read_semi_prod_days():\r\n \"\"\"\r\n Reads in \"semi_prod_days_per_year\" from \"General Summary\" sheet of tech_labour_hours.xlsx.\r\n\r\n :return: Float representing number of days in a year less weekends, stats, and sick days.\r\n \"\"\"\r\n\r\n semi_prod_days_per_year = gen_sum_df[\"semi_prod_days_per_year\"]\r\n\r\n return semi_prod_days_per_year\r\n\r\n\r\ndef read_hours_per_day():\r\n \"\"\"\r\n Reads in average hours worked per day from \"General Summary\" sheet of tech_labour_hours.xlsx.\r\n\r\n :return: Float representing average hours worked per day by a tech.\r\n \"\"\"\r\n\r\n avg_hours_per_day = gen_sum_df[\"avg_hours_per_day\"]\r\n\r\n return avg_hours_per_day\r\n\r\n\r\ndef read_tech_staff_salary_sched():\r\n \"\"\"\r\n Reads data from \"Tech Staff Salary Sched\" sheet in staff_salaries.xlsx, which gives us information about tech staff\r\n hourly wage by hour and years of experience---function currently only looks at the \"year6_hourly_wage\" column.\r\n\r\n :return: Dict with the following format:\r\n Key: level (int: 8, 9, 10, 12)\r\n Value: year6_hourly_wage (float)\r\n \"\"\"\r\n\r\n tech_staff_salary_df = pd.read_excel(staff_salaries_file_path, sheet_name=\"Tech Staff Salary Sched\")\r\n tech_staff_salary_dict = tech_staff_salary_df.set_index(\"level\")[\"year6_hourly_wage\"].to_dict()\r\n\r\n return tech_staff_salary_dict\r\n\r\n\r\n\"\"\"\r\n########################################################################################################################\r\n######################################## COSTCENTRE CLASS BELOW ########################################################\r\n########################################################################################################################\r\n\"\"\"\r\n\r\n\r\nclass CostCentre:\r\n \"\"\"\r\n This class handles data and behaviour specific to each cost centre. This includes information about staffing levels\r\n in the cost centre, the amount of OH incurred, etc.\r\n \"\"\"\r\n\r\n # Updated in __init__ to store list of RegionalStaff objects\r\n regional_staff = None\r\n\r\n # DataFrame with information about staffing levels at each cost centre\r\n tech_staff_df = read_tech_staff_ref()\r\n\r\n # Dict with tech staff hourly wages by level\r\n tech_staff_salary_dict = read_tech_staff_salary_sched()\r\n\r\n # Number of hours techs get paid for each year\r\n hours_paid_per_year = read_hours_paid_per_year()\r\n\r\n # % of time that techs spend doing non-device related work (i.e. attending meetings, assisting clinical staff, etc.)\r\n OH_TECH_TIME_PERCENTAGE = 0.35\r\n\r\n # Path to directory containing financial reports\r\n financial_reports_folder_path = \"model_inputs/financial_reports/\"\r\n\r\n # Average number of hours a tech works in a day\r\n hours_worked_per_day = read_hours_per_day()\r\n\r\n # Number of days in a year less weekends, stat holidays, and average annual sick days\r\n semi_prod_days_per_year = read_semi_prod_days()\r\n\r\n # Average number of vacation days granted to a tech by level\r\n annual_vac_days_by_level = read_tech_vac_summary()\r\n\r\n def __init__(self, asset, budget_report):\r\n \"\"\"\r\n :param asset: Asset object used to initialize the list of Asset objects associated with this cost centre\r\n :param budget_report: BudgetReport object\r\n \"\"\"\r\n # Cost centre name\r\n self.name = asset.cost_centre\r\n # List of asset objects in this cost centre\r\n self.assets = [asset]\r\n # Health authority under which this cost centre falls\r\n self.health_auth = asset.health_auth\r\n # Function of this cost centre (clinical, renal, imaging)\r\n self.function = asset.function\r\n # List of all RegionalStaff objects\r\n CostCentre.regional_staff = self.get_regional_staff_objects(budget_report)\r\n # Contribution to cost centre OH from regional staff total compensation\r\n self.regional_staff_oh = self.compute_regional_staff_oh(self.regional_staff)\r\n # List of TechStaff objects\r\n self.tech_staff = self.create_tech_staff_objects()\r\n # Contribution to cost centre OH from tech staff total compensation\r\n self.tech_staff_oh = self.compute_tech_staff_oh()\r\n # Contribution to cost centre OH from non-labour accounts in financial reports\r\n self.non_labour_oh = self.compute_non_labour_oh()\r\n # Predetermined overhead rate\r\n self.pohr = self.compute_pohr()\r\n # Weighted average hourly tech wage\r\n self.weighted_avg_tech_hourly_wage = self.compute_weighted_avg_tech_hourly_wage()\r\n\r\n def compute_regional_staff_oh(self, regional_staff):\r\n \"\"\"\r\n Iterates through each regional staff and checks to see if this cost centre is overseen by them. If it is, then\r\n add the staff's oh_cost_per_cc to this cost centre's regional staff OH.\r\n\r\n :return: Regional staff OH for this cost centre.\r\n \"\"\"\r\n\r\n total_oh = 0\r\n\r\n for staff in regional_staff:\r\n if self.name in staff.cost_centre_responsibility:\r\n total_oh += staff.oh_cost_per_cc\r\n\r\n return total_oh\r\n\r\n def get_regional_staff_objects(self, budget_report):\r\n \"\"\"\r\n Function call to create RegionalStaff objects.\r\n\r\n :return: List of RegionalStaff objects.\r\n \"\"\"\r\n\r\n return budget_report.create_regional_staff_objects()\r\n\r\n def create_tech_staff_objects(self):\r\n \"\"\"\r\n Reads \"Tech Staff\" worksheet from staff_salaries.xlsx to pull the row corresponding to this cost centre. Creates\r\n tech staff objects using pulled data.\r\n\r\n :return: List of TechStaff objects (Maximum 4 objects, one for each level. Each TechStaff object will have a\r\n \"qty\" field that indicates the number of staff of that level working at the cost centre.)\r\n \"\"\"\r\n\r\n # Pull row from df with information relevant to this cost centre\r\n tech_staff_df = self.tech_staff_df[self.tech_staff_df[\"cost_centre_name\"] == self.name]\r\n\r\n # Create a list of floats indicating the quantity of techs for each level:\r\n # - Level 8 techs at tech_level_qty[0]\r\n # - Level 9 techs at tech_level_qty[1]\r\n # - Level 10 techs at tech_level_qty[2]\r\n # - Level 12 techs at tech_level_qty[3]\r\n tech_level_qty = tech_staff_df[[\"level8\", \"level9\", \"level10\", \"level12\"]].values.tolist()[0]\r\n\r\n # Create TechStaff objects and append them to tech_staff\r\n tech_staff = []\r\n\r\n for qty in tech_level_qty:\r\n if qty != 0 and not math.isnan(qty):\r\n\r\n # Create level 8 techs\r\n if tech_level_qty.index(qty) == 0:\r\n tech_staff.append(TechStaff(8,\r\n qty,\r\n self.tech_staff_salary_dict.get(8),\r\n self.hours_paid_per_year,\r\n self))\r\n\r\n # Create level 9 techs\r\n if tech_level_qty.index(qty) == 1:\r\n tech_staff.append(TechStaff(9,\r\n qty,\r\n self.tech_staff_salary_dict.get(9),\r\n self.hours_paid_per_year,\r\n self))\r\n\r\n # Create level 10 techs\r\n if tech_level_qty.index(qty) == 2:\r\n tech_staff.append(TechStaff(10,\r\n qty,\r\n self.tech_staff_salary_dict.get(10),\r\n self.hours_paid_per_year,\r\n self))\r\n\r\n # Create level 12 techs\r\n if tech_level_qty.index(qty) == 3:\r\n tech_staff.append(TechStaff(12,\r\n qty,\r\n self.tech_staff_salary_dict.get(12),\r\n self.hours_paid_per_year,\r\n self))\r\n\r\n return tech_staff\r\n\r\n def compute_tech_staff_oh(self):\r\n \"\"\"\r\n Iterates through TechStaff objects assigned to this cost centre and adds their OH contribution to this cost\r\n centre's tech staff OH cost.\r\n\r\n :return: Tech staff OH for this cost centre.\r\n \"\"\"\r\n\r\n total_tech_labour_cost = 0\r\n\r\n for staff in self.tech_staff:\r\n total_tech_labour_cost += staff.total_compensation\r\n\r\n return self.OH_TECH_TIME_PERCENTAGE * total_tech_labour_cost\r\n\r\n def compute_non_labour_oh(self):\r\n \"\"\"\r\n Compute an estimated non-labour OH for this cost centre based on historical OH amounts from previous years.\r\n\r\n :return: Non-labour OH for this cost centre.\r\n \"\"\"\r\n\r\n # Find the appropriate financial report Excel workbook and worksheet to parse\r\n file_path = self.financial_reports_folder_path + \"{function}/{health_auth}.xlsx\".format(function=self.function, health_auth=self.health_auth)\r\n financials_df = pd.read_excel(file_path, sheet_name=self.name)\r\n\r\n # Pull actual and budgeted partial OH for each fiscal year into a list\r\n # For clinical and renal cost centres, partial OH is total expenses less labour expense.\r\n # For imaging cost centres, partial OH is total expenses less labour and contracts expense.\r\n actual_historical_non_labour_oh = financials_df[\"actual_partial_oh\"].tolist()\r\n budgeted_historical_non_labour_oh = financials_df[\"budgeted_partial_oh\"].tolist()\r\n\r\n # Create a list max_historical_non_labour_oh that contains the larger OH value out of actual and budgeted\r\n # partial OH for each fiscal year\r\n max_historical_non_labour_oh = []\r\n index = 0\r\n\r\n while index < len(actual_historical_non_labour_oh):\r\n if actual_historical_non_labour_oh[index] > budgeted_historical_non_labour_oh[index]:\r\n max_historical_non_labour_oh.append(actual_historical_non_labour_oh[index])\r\n else:\r\n max_historical_non_labour_oh.append(budgeted_historical_non_labour_oh[index])\r\n index += 1\r\n\r\n # Compute the mean OH of max_historical_non_labour_oh\r\n mean_max_historical_non_labour_oh = statistics.fmean(max_historical_non_labour_oh)\r\n\r\n return mean_max_historical_non_labour_oh\r\n\r\n def compute_pohr(self):\r\n \"\"\"\r\n Compute POHR for this cost centre by taking the estimated annual OH for this cost centre and dividing it by the\r\n estimated annual tech labour hours for this cost centre.\r\n\r\n :return: Pre-determined overhead rate for this cost centre.\r\n \"\"\"\r\n\r\n annual_labour_hours = 0\r\n\r\n for staff in self.tech_staff:\r\n days_per_staff = self.semi_prod_days_per_year - self.annual_vac_days_by_level.get(staff.level)\r\n annual_labour_hours += (staff.qty * days_per_staff * self.hours_worked_per_day)\r\n\r\n total_oh = self.non_labour_oh + self.regional_staff_oh + self.tech_staff_oh\r\n\r\n # Take 80% of annual labour hours because 80% is the standard productivity rate cited in literature after\r\n # accounting for idle time\r\n return total_oh / (0.8 * annual_labour_hours)\r\n\r\n def compute_weighted_avg_tech_hourly_wage(self):\r\n \"\"\"\r\n Compute weighted average hourly wage for techs at this cost centre based on the number of techs of each level\r\n and the wages they earn.\r\n\r\n :return: A float computed by computing the following for each TechStaff object:\r\n Hourly wage for staff at this level * (Num staff at this level / Total num staff)\r\n \"\"\"\r\n\r\n total_num_staff = 0\r\n weighted_avg_hourly_wage = 0\r\n\r\n for staff in self.tech_staff:\r\n total_num_staff += staff.qty\r\n\r\n if total_num_staff > 0:\r\n for staff in self.tech_staff:\r\n weighted_avg_hourly_wage += self.tech_staff_salary_dict.get(staff.level) * (staff.qty / total_num_staff)\r\n\r\n return weighted_avg_hourly_wage\r\n","sub_path":"costcentre.py","file_name":"costcentre.py","file_ext":"py","file_size_in_byte":14646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248441639","text":"from flask import render_template, request, jsonify\nimport numpy as np\n\nfrom webserver import app, celery\n\nfrom helpers import result_exists\nimport tasks\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n start_angle = int(request.values.get(\"start_angle\"))\n end_angle = int(request.values.get(\"end_angle\"))\n n_angles = int(request.values.get(\"n_angles\"))\n\n angles = np.linspace(start_angle, end_angle, n_angles, dtype=int)\n\n res = celery.send_task('tasks.solve_angles', [list(angles)])\n\n print(celery.AsyncResult(res.task_id))\n\n return render_template(\"home.html\", start_angle = start_angle, end_angle = end_angle, n_angles = n_angles, tasks=[celery.AsyncResult(res.task_id).task_id])\n\n return render_template(\"home.html\")\n\n\n@app.route('/results/', methods=['GET'])\ndef result_angle(angle):\n res = result_exists(angle)\n\n if res == None:\n return jsonify({'result': \"None\", 'value':\"\"})\n\n ## Render result?\n return jsonify({'result': angle, 'value': res})","sub_path":"project/application/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568431354","text":"n = int(input())\ndist = [list(map(int, input().split())) for _ in range(n)]\ncheck = [[True] * n for _ in range(n)]\n\nans = 0\nfor k in range(n):\n for i in range(n):\n for j in range(n):\n if i == j or j == k or k == i:\n continue\n\n # 다른 곳을 거치는 최단거리가 존재하는 경우 다리 제거\n if dist[i][j] == dist[i][k] + dist[k][j]:\n check[i][j] = False\n # 잘못된 플로이드 워셜\n elif dist[i][j] > dist[i][k] + dist[k][j]:\n ans = -1\n\nif ans == -1:\n print(ans)\nelse:\n for i in range(n):\n for j in range(i, n):\n if check[i][j]:\n ans += dist[i][j]\n print(ans)\n","sub_path":"problem_solving/2021/210617/210617_6.py","file_name":"210617_6.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"203055558","text":"#Author: timcdoc Feb 2019 (1st python code ever)\n#Needs python3\nimport socket, struct,time,sys\nfrom calc import calcdict\nfrom emu import emudict\nfrom vhdlsim import vhdlsimdict\nfrom marbles import marblesdict\n\nappdict = {}\nappdict[\"calc\"] = calcdict\nappdict[\"emu\"] = emudict\nappdict[\"vhdlsim\"] = vhdlsimdict\nappdict[\"marbles\"] = marblesdict\n\ndef mouse_click(name):\n global ghost, gport, gapp\n x,y = appdict[gapp][name]\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((ghost, gport))\n client_socket.send(struct.pack(\"iii\",x,y,1))\n client_socket.close()\n\ndef do_key_stuff(ovr,sym,ch):\n global ghost, gport\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((ghost, gport))\n client_socket.send(struct.pack(\"iii\",ovr,sym,ch))\n client_socket.close()\n\ndef resize(ovr,width,height):\n global ghost, gport\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((ghost, gport))\n client_socket.send(struct.pack(\"iii\",ovr,width,height))\n client_socket.close()\n\ndef move(ovr,x,y):\n global ghost, gport\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((ghost, gport))\n client_socket.send(struct.pack(\"iii\",ovr,x,y))\n client_socket.close()\n\n# do_key_stuff(\"boyspc\",80,-2,97,97)\n\nghost, gport, gapp = sys.argv[1], int(sys.argv[2], 10), sys.argv[3]\n","sub_path":"py/automate.py","file_name":"automate.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"320746596","text":"import numpy as np\n\nxedges = np.linspace(-180, 180, num=181)\nyedges = np.linspace(-180, 180, num=181)\n\nphi_all = np.load('phi.npy')\npsi_all = np.load('psi.npy')\n\nphi = []\npsi = []\n\ndata = []\n\nfor frame in range(len(phi_all)):\n phi.append(phi_all[frame])\n psi.append(psi_all[frame])\n H, xedges, yedges = np.histogram2d(phi, psi, bins=(xedges, yedges))\n H0 = np.unique(np.concatenate(H))\n n_states = len(H0)-1\n data.append([frame, n_states])\n\nnp.save('n_discoveredS_time.npy', data)\n\n#####\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rcParams.update({'font.size':20})\nplt.rc('xtick', labelsize=20)\nplt.rc('ytick', labelsize=20)\n \ndata_rl = np.array(np.load('RL/n_discoveredS_time.npy')) \ndata_sl = np.array(np.load('SL/n_discoveredS_time.npy'))\n\nplt.plot(0.005*data_sl[:,0], data_sl[:, 1], lw=1.5, color=\"orangered\", label='Single long trajectory')\nplt.fill_between(0.005*data_sl[:,0], data_sl[:, 1], color=\"orangered\", linewidth=0.0, alpha=0.4)\n\nplt.plot(0.005*data_rl[:,0], data_rl[:, 1], lw=1.5, color=\"midnightblue\", label='REAP trajectories')\nplt.fill_between(0.005*data_rl[:,0], data_rl[:, 1], color=\"midnightblue\", linewidth=0.0, alpha=0.4)\n\nplt.legend(loc=0, fontsize=18)\nplt.xlim([0, 15])\nplt.xticks([0, 5, 10, 15])\nplt.yticks([0, 400, 800])\n\nplt.ylabel('Number of discovered states')\nplt.xlabel('Time ('+ r'$\\mu$'+'s)')\n\nplt.savefig('n_discoveredS_time.png')\nplt.show()\n\n","sub_path":"MDSimulation/Alanine dipeptide/timeVsState.py","file_name":"timeVsState.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"171155295","text":"# Python program to find number of distinct \n# permutations of a string. \n \nMAX_CHAR = 26\n \n# Utility function to find factorial of n. \ndef factorial(n) : \n \n fact = 1; \n for i in range(2, n + 1) : \n fact = fact * i; \n return fact \n \n# Returns count of distinct permutations \n# of str. \ndef countDistinctPermutations(st) : \n \n length = len(st) \n freq = [0] * MAX_CHAR \n \n # finding frequency of all the lower \n # case alphabet and storing them in \n # array of integer \n for i in range(0, length) : \n if (st[i] >= 'a') : \n freq[(ord)(st[i]) - 97] = freq[(ord)(st[i]) - 97] + 1; \n \n # finding factorial of number of \n # appearances and multiplying them \n # since they are repeating alphabets \n fact = 1\n for i in range(0, MAX_CHAR) : \n print(fact * factorial(freq[i]))\n fact = fact * factorial(freq[i]) \n \n # finding factorial of size of string \n # and dividing it by factorial found \n # after multiplying \n return factorial(length) / fact \n \n# Driver code \nst = \"abcdefghijklmabcdefghijklm\"\nprint (countDistinctPermutations(st)) \n \n# This code is contributed by Nikita Tiwari. \n","sub_path":"permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"145394882","text":"import torch\nimport numpy as np\nfrom .. import load_tensor_and_image_from_file, load_image_buffer_to_tensor\nfrom ..params import COCO_INSTANCE_CATEGORY_NAMES as COCO\n\n\nrecognition_nets = ['deeplabv3_resnet101', 'fcn_resnet101']\ndetection_nets = [\n 'alexnet', 'densenet121', 'densenet161', 'densenet169',\n 'densenet201', 'googlenet', 'inception_v3', 'mobilenet_v2', 'resnet101',\n 'resnet152', 'resnet18', 'resnet34', 'resnet50', 'resnext101_32x8d',\n 'resnext50_32x4d', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0',\n 'squeezenet1_0', 'squeezenet1_1', 'vgg11', 'vgg11_bn', 'vgg13',\n 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn', 'wide_resnet101_2',\n 'wide_resnet50_2'\n]\n\n\ndef get_available_models_names(force_reload=False):\n \"\"\"Gives list of model names available on torch hub\n\n Return:\n models (list[string]): list of pytorch/vision models available\n on pytorch hub\n \"\"\"\n return torch.hub.list('pytorch/vision', force_reload=force_reload)\n\n\ndef check_if_new_models_in_hub():\n \"\"\"Checks if there are new models available on torch hub\n\n Return:\n boolean: True if new models are available, False otherwise\n \"\"\"\n hub_list = get_available_models_names(False)\n local_list = recognition_nets + detection_nets\n if len(set(hub_list) & set(local_list)) == len(local_list):\n for i in local_list:\n if i not in hub_list:\n return True\n return False\n return True\n\n\nclass YoloVision:\n \"\"\"Trained deep neuron network vision interface\n\n Attributes:\n _models: Pretrained net models torch hub instance name\n \"\"\"\n\n _models = 'pytorch/vision',\n\n def __init__(self, device='cpu'):\n \"\"\"YoloVisionRecognition constructor\n\n Args:\n device (string): Name of device used for calculation\n \"\"\"\n if device not in ['cuda', 'cpu']:\n raise Exception('Wrong device type passed to constructor')\n self._device = device\n\n\nclass YoloVisionRecognition(YoloVision):\n \"\"\"Trained deep neuron network vision recognition interface\n \"\"\"\n\n def __init__(self, nn_model=None, device='cpu'):\n \"\"\"YoloVisionRecognition constructor\n\n Args:\n nn_module (string): Deep net vision model\n device (string): Name of device used for calculation\n \"\"\"\n super().__init__(device)\n if nn_model not in recognition_nets:\n raise Exception('Wrong net model for recognition')\n self._nn_module = torch.hub.load('pytorch/vision', nn_model,\n pretrained=True).eval()\n\n @staticmethod\n def find_boxes(output_predictions):\n \"\"\"Sets boxed boundaries for predicted parameters\n\n Args:\n output_predictions (Tensor): AI model output predictions tensor\n\n Returns:\n prediction_boxed (object) Object with keys of predicted object\n names paired with boundary\n boxes of each prediction type\n \"\"\"\n prediction_boxed = {}\n for (x, y), t in np.ndenumerate(output_predictions\n .byte()\n .cpu()\n .numpy()):\n for _, v in np.ndenumerate(t):\n value = v.item()\n if value in prediction_boxed.keys():\n if prediction_boxed[value]['x_min'] > x:\n prediction_boxed[value]['x_min'] = x\n if prediction_boxed[value]['y_min'] > y:\n prediction_boxed[value]['y_min'] = y\n if prediction_boxed[value]['x_max'] < x:\n prediction_boxed[value]['x_max'] = x\n if prediction_boxed[value]['y_max'] < y:\n prediction_boxed[value]['y_max'] = y\n else:\n prediction_boxed[value] = {\n 'x_min': x or 0,\n 'y_min': y or 0,\n 'x_max': x,\n 'y_max': y\n }\n named = {}\n for k in prediction_boxed.keys():\n named[str(COCO[k])] = prediction_boxed[k]\n return named\n\n def recognize_local_file(self, image_path):\n \"\"\"Performs image recognition of local file\n\n Args:\n image_path (string): path to image\n\n Returns:\n tuple (input_image, output_predictions) Vectorized image\n to numpy array and AI model\n output predictions tensor\n \"\"\"\n input_batch, input_image = load_tensor_and_image_from_file(\n image_path, self._device\n )\n with torch.no_grad():\n output = self._nn_module(input_batch)\n output = output['out'][0]\n output_predictions = output.argmax(0)\n return input_image, output_predictions\n\n def recognize_buffer(self, image_buf):\n \"\"\"Performs image recognition of given image buffer\n\n Args:\n image_buf (bytes buffer): The image to recognize\n\n Returns:\n output_predictions: AI model output predictions tensor\n \"\"\"\n input_batch = load_image_buffer_to_tensor(image_buf, self._device)\n with torch.no_grad():\n output = self._nn_module(input_batch)\n output = output['out'][0]\n output_predictions = output.argmax(0)\n return output_predictions\n\n\nclass YoloVisionDetection(YoloVision):\n \"\"\"Trained deep neuron network vision recognition interface\n \"\"\"\n\n def __init__(self, nn_model=None, device='cpu'):\n \"\"\"YoloVisionRecognition constructor\n\n Args:\n nn_module (string): Deep net vision model\n device (string): Name of device used for calculation\n \"\"\"\n super().__init__(device)\n if nn_model not in detection_nets:\n raise Exception('Wrong net model for detection')\n self._nn_module = torch.hub.load('pytorch/vision', nn_model,\n pretrained=True).eval()\n\n @staticmethod\n def find_most_probable(output_predictions):\n \"\"\"Looks in to output predictions for most probable detection\n\n Args:\n output_predictions: Tensor with probabilities of Yolo detection\n\n Returns:\n tuple (float, int): tuple of recognition probability and number\n representing Yolo class of the probability\n \"\"\"\n max_probability = torch.max(output_predictions)\n yolo_class_index = (output_predictions ==\n max_probability).nonzero().item()\n return max_probability.item(), yolo_class_index\n\n def detect_local_file(self, image_path):\n \"\"\"Performs image recognition of local file\n\n Args:\n image_path (string): path to image\n\n Returns:\n output_predictions: AI model output probabilities tensor of\n of Yolo classes\n \"\"\"\n input_batch, input_image = load_tensor_and_image_from_file(\n image_path, self._device\n )\n with torch.no_grad():\n output = self._nn_module(input_batch)\n output_predictions = torch.nn.functional.softmax(output[0], dim=0)\n return output_predictions\n\n def detect_buffer(self, image_buf):\n \"\"\"Performs object detection on given image buffer\n\n Args:\n image_buf (bytes buffer): The image to recognize\n\n Returns:\n output_predictions: AI model output probabilities tensor\n of Yolo classes\n \"\"\"\n input_batch = load_image_buffer_to_tensor(image_buf, self._device)\n with torch.no_grad():\n output = self._nn_module(input_batch)\n output_predictions = torch.nn.functional.softmax(output[0], dim=0)\n return output_predictions\n","sub_path":"src/ai_callable/pretrained.py","file_name":"pretrained.py","file_ext":"py","file_size_in_byte":8072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"89264491","text":"\nimport pandas as pd\n\n\ndef get_first_last_internal(exon_id, db):\n exon_i = None\n\n exon = db[exon_id]\n reverse = exon.strand=='-'\n\n first_internals = []\n last_internals = []\n for transcript in db.parents(exon, featuretype='transcript'):\n children = db.children(transcript, featuretype='exon',\n order_by='start', reverse=reverse)\n for i, child in enumerate(children):\n if child == exon:\n exon_i = i\n first_internal = (exon_i == 1)\n last_internal = (exon_i == (i - 1))\n first_internals.append(first_internal)\n last_internals.append(last_internal)\n return pd.Series(dict(last_internal=any(last_internals),\n first_internal=any(first_internals)))\n","sub_path":"poshsplice/exons.py","file_name":"exons.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"324859696","text":"\"\"\"\nDemo on mnist dataset.\n\"\"\"\nimport gzip\nfrom sklearn.metrics import classification_report\nfrom logistic_regression import LogisticRegression\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nfrom utils import get_script_root\n\n__author__ = 'kensk8er'\n\nPROJECT_ROOT = get_script_root(__file__).split('/demos')[0]\n\n\ndef load_data(file_path):\n with gzip.open(file_path, 'rb') as file_:\n (X_train, y_train), (X_valid, y_valid), (X_test, y_test) = pickle.load(file_)\n return X_train, y_train, X_valid, y_valid, X_test, y_test\n\n\nif __name__ == '__main__':\n (X_train, y_train, X_valid, y_valid, X_test, y_test) = load_data(file_path=\"{}/data/mnist.pkl.gz\"\n .format(PROJECT_ROOT))\n clf = LogisticRegression()\n clf.fit(X_train=X_train, X_valid=X_valid, y_train=y_train, y_valid=y_valid, verbose=True)\n y_pred = clf.predict(X=X_test)\n print(classification_report(y_test, y_pred, range(10)))\n clf.save(\"{}/models/logistic_regression.pkl\".format(PROJECT_ROOT))\n clf = LogisticRegression.load(\"{}/models/logistic_regression.pkl\".format(PROJECT_ROOT))\n y_pred = clf.predict(X=X_test)\n print(classification_report(y_test, y_pred, range(10)))\n","sub_path":"demos/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"420902896","text":"import pandas as pd\nimport pylab as pl\nimport glob\nimport os\n\nimport hoehe_kolbenfuss\n\nbasedir = \\\n \"/home/ab/Dokumente/promotion/auswertungen_speichersimulationen/\"\n\n# ATTENTION: order is important section fig_comp_plug_flow !\n\nsims = [\"m1_dt25\", \"m035_dt25\", \"m1_dt15\", \"m035_dt15\"]\n\nfilling_levels = pl.array([30, 60, 90, 120, 150, 180, 210, 240])\nmassflows = pl.array([1.0, 0.35, 1.0, 0.35])\ndt = pl.array([25.0, 25.0, 15.0, 15.0])\n\nmeans = []\nstds = []\n\nlines_list = [ \\\n\n [\"Lines_table_3.000060e+01.csv\", \"Lines_table_6.000005e+01.csv\", \\\n \"Lines_table_9.000015e+01.csv\", \"Lines_table_1.200002e+02.csv\", \\\n \"Lines_table_1.500003e+02.csv\", \"Lines_table_1.800004e+02.csv\", \\\n \"Lines_table_2.100006e+02.csv\", \"Lines_table_2.400000e+02.csv\"], \\\n\n [\"Lines_table_8.600030e+01.csv\", \"Lines_table_1.710008e+02.csv\", \\\n \"Lines_table_2.570012e+02.csv\", \"Lines_table_3.430016e+02.csv\", \\\n \"Lines_table_4.290002e+02.csv\", \"Lines_table_5.140016e+02.csv\", \\\n \"Lines_table_6.000002e+02.csv\", \"Lines_table_6.850016e+02.csv\"], \\\n\n [\"Lines_table_3.000015e+01.csv\", \"Lines_table_6.000025e+01.csv\", \\\n \"Lines_table_9.000035e+01.csv\", \"Lines_table_1.200004e+02.csv\", \\\n \"Lines_table_1.500005e+02.csv\", \"Lines_table_1.800000e+02.csv\", \\\n \"Lines_table_2.100001e+02.csv\", \"Lines_table_2.400002e+02.csv\"], \\\n\n [\"Lines_table_8.600040e+01.csv\", \"Lines_table_1.710011e+02.csv\", \\\n \"Lines_table_2.570015e+02.csv\", \"Lines_table_3.430001e+02.csv\", \\\n \"Lines_table_4.290005e+02.csv\", \"Lines_table_5.140001e+02.csv\", \\\n \"Lines_table_6.000005e+02.csv\", \"Lines_table_6.850001e+02.csv\"]\n ]\n\ncwd = os.getcwd()\n\nos.chdir(basedir)\n\nfor k, sim in enumerate(sims):\n\n os.chdir(sim)\n\n # -- fig_mean_per_sim --\n\n fl = sorted(glob.glob(\"q*.csv\"))\n\n means.append(pd.read_table(fl[0], sep = \",\"))\n means[k].columns = [\"Time\", fl[0][3:8]]\n\n for e in fl[1:]:\n\n data_add = pd.read_table(e, sep = \",\")\n data_add.columns = [\"Time\", e[3:8]]\n means[k] = pd.merge(means[k], data_add, on = \"Time\")\n\n means[k][\"0.435\"] -= 273.15\n means[k][\"0.455\"] -= 273.15\n\n heights = pl.array(means[k].keys())[1:].astype(float)\n\n pl.figure()\n\n for level in filling_levels:\n\n index = pl.argmin(abs(means[k][\"Time\"] - (level / massflows[k])))\n\n pl.plot(means[k].loc[index][1:][0:].values, heights, \\\n label = \"filling level = \" + str(level) + \" kg\" )\n\n pl.xlim([58.0, 76.0])\n pl.ylim([0.0, 2.4])\n pl.grid(True)\n\n pl.ylabel(\"Height (m)\")\n pl.xlabel(\"Temperature ($^{\\circ}$C)\")\n\n pl.legend(loc=\"lower right\")\n pl.title(\"Mean temperature per stratum in \" + sim)\n\n pl.savefig(basedir + \"fig_mean_per_sim/\" + sim + \".png\", bbox_inches='tight')\n pl.close()\n\n # pl.show()\n\n\n # -- fig_plug_flow --\n\n for j, dia in enumerate(lines_list[k]):\n\n lines = pd.read_table(dia, sep = \",\")\n lines[\"Temperature (K)\"] -= 273.15\n\n gb = lines.groupby([\"Y (m)\", \"X (m)\"])\n\n grps = dict(list(gb))\n\n # pairs = [(0.22, 0.0), (0.0, 0.22), (-0.22, 0.0), (0.0, -0.22), \\\n # (0.44, 0.0), (0.0, 0.44), (-0.44, 0.0), (0.0, -0.44)]\n\n pairs = [(0.33, 0.0), (0.0, 0.33), (-0.33, 0.0), (0.0, -0.33)]\n\n pl.figure()\n\n for pair in pairs:\n\n dataset = grps[pair].sort_index(0, \"Z (m)\")\n pl.plot(dataset[\"Temperature (K)\"], dataset[\"Z (m)\"], label = str(pair))\n \n index = pl.argmin(abs(means[k][\"Time\"] - (filling_levels[j] / massflows[k])))\n pl.plot(means[k].loc[index][1:][0:].values, heights, label = \"Mean\")\n\n h_kolbenfuss = hoehe_kolbenfuss.hoehe_kolbenfuss(filling_levels[j] / 1000.0)\n h_speicher = 2.335\n\n pl.plot([60.0, 60.0, 60.0 + dt[k], 60.0 + dt[k]], \\\n [0.0, h_speicher - h_kolbenfuss, h_speicher - h_kolbenfuss, h_speicher], \\\n label = \"Plug-flow\")\n\n pl.xlim([58.0, 86.0])\n pl.ylim([0.0, 2.4])\n pl.grid(True)\n\n pl.ylabel(\"Height (m)\")\n pl.xlabel(\"Temperature ($^{\\circ}$C)\")\n\n pl.legend(loc=\"lower right\")\n pl.title(\"Temperature distribution in \" + sim + \\\n \" (filling level = \" + str(filling_levels[j]) + \" kg)\")\n \n pl.savefig(basedir + \"fig_plug_flow/plug_flow_\" + sim + \"_fl\" + \\\n str(filling_levels[j]) + \\\n \"kg.png\", bbox_inches='tight')\n pl.close()\n \n # pl.show()\n\n \n os.chdir(\"..\")\n\n\n# -- fig_comp_plug_flow --\n\nfor level in filling_levels:\n\n for k in range(len(sims) / 2):\n\n pl.figure()\n\n index = pl.argmin(abs(means[2*k][\"Time\"] - (level / massflows[2*k])))\n pl.plot(means[2*k].loc[index][1:][0:].values, heights, \\\n label = sims[2*k])\n \n index = pl.argmin(abs(means[2*k+1][\"Time\"] - (level / massflows[2*k+1])))\n pl.plot(means[2*k+1].loc[index][1:][0:].values, heights, \\\n label = sims[2*k+1])\n\n h_kolbenfuss = hoehe_kolbenfuss.hoehe_kolbenfuss(level / 1000.0)\n h_speicher = 2.335\n\n pl.plot([60.0, 60.0, 60.0 + dt[2*k], 60.0 + dt[2*k]], \\\n [0.0, h_speicher - h_kolbenfuss, h_speicher - h_kolbenfuss, h_speicher], \\\n label = \"Plug-flow \") \n\n pl.xlim([58.0, 86.0])\n pl.ylim([0.0, 2.4])\n pl.grid(True)\n\n pl.ylabel(\"Height (m)\")\n pl.xlabel(\"Temperature ($^{\\circ}$C)\")\n\n pl.legend(loc=\"lower right\")\n pl.title(\"Mean per stratum for $\\Delta$T = \" + \\\n str(dt[2*k]) + \" K (filling level = \" + str(level) + \\\n \" kg)\")\n\n pl.savefig(basedir + \"fig_comp_plug_flow/comp_plug_flow_dt\" + \\\n str(int(dt[2*k])) + \\\n \"_fl_\" + str(level) + \"kg.png\", bbox_inches='tight')\n\n pl.close()\n\n\n# -- fig_mean_per_stratum ---\n\nfor level in filling_levels:\n\n pl.figure()\n\n for k, sim in enumerate(sims):\n\n index = pl.argmin(abs(means[k][\"Time\"] - (level / massflows[k])))\n pl.plot(means[k].loc[index][1:][0:].values, heights, label = sim)\n\n pl.legend(loc=\"lower right\")\n pl.title(\"Mean temperature per stratum (filling level = \" + \\\n str(level) + \" kg)\")\n\n pl.xlim(58.0, 76.0)\n pl.ylim(0.0, 2.4)\n pl.grid(True)\n\n pl.ylabel(\"Height (m)\")\n pl.xlabel(\"Temperature ($^{\\circ}$C)\")\n\n pl.savefig(basedir + \"fig_mean_per_stratum/mean_per_stratum_fl\" + \\\n str(level) + \"kg.png\", bbox_inches='tight')\n pl.close()\n\n # pl.show()\n\n\n# -- fig_standard_deviation --\n\nheights = []\n\nfor k, sim in enumerate(sims):\n\n os.chdir(sim)\n\n fl = sorted(glob.glob(\"s*.csv\"))\n\n stds.append(pd.read_table(fl[0], sep = \",\"))\n stds[k].columns = [\"Time\", fl[0][6:11]]\n\n for e in fl[1:]:\n\n data_add = pd.read_table(e, sep = \",\")\n data_add.columns = [\"Time\", e[6:11]]\n stds[k] = pd.merge(stds[k], data_add, on = \"Time\")\n\n heights.append(pl.array(stds[k].keys())[1:].astype(float))\n\n os.chdir(\"..\")\n\n\nfor level in filling_levels:\n\n pl.figure()\n\n for k, sim in enumerate(sims):\n\n index = pl.argmin(abs(stds[k][\"Time\"] - (level / massflows[k])))\n pl.plot(stds[k].loc[index][1:][0:].values, heights[k], label = sim )\n\n pl.legend(loc=\"lower right\")\n pl.title(\"Standard deviation per stratum (filling level = \" + str(level) + \" kg)\")\n\n pl.xlim(0.0, 3.0)\n pl.ylim(0.715, 2.4)\n pl.grid(True)\n\n pl.ylabel(\"Height (m)\")\n pl.xlabel(\"Standard deviation (K)\")\n\n pl.savefig(basedir + \"fig_standard_deviation/standard_deviation_fl\" + \\\n str(level) + \"kg.png\", bbox_inches='tight')\n pl.close()\n\n # pl.show()\n\nos.chdir(cwd)\n","sub_path":"comp_m035-1_dt15-25.py","file_name":"comp_m035-1_dt15-25.py","file_ext":"py","file_size_in_byte":7597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"536570500","text":"from rest_framework import serializers\nfrom rest_framework.serializers import (\n\tModelSerializer,\n\tHyperlinkedIdentityField,\n\tSerializerMethodField\n\t)\n\nfrom component.models import Module,Component,Assembled\nfrom serialnumber.api.serializers import SerialNumberUrlSerializer\n\n\nclass ModuleSerialUrlSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Module\n\t\tfields = ['number','url']\n\t\tlookup_field = 'number'\n\t\textra_kwargs = {\n\t\t\t'url': {'lookup_field': 'number'}\n\t\t}\n\t\t\nclass ModuleSerializer(serializers.ModelSerializer):\n\t# parent \t\t\t= SerialNumberUrlSerializer(many=False,read_only=True)\n\t# reserved_for \t= SerialNumberUrlSerializer(many=False,read_only=True)\n\tclass Meta:\n\t\tmodel = Module\n\t\tfields = ['id','number','parent','reserved_for','slug','title','category1','category2',\n\t\t\t\t'description','pn','rev','datecode','lotcode','supcode',\n\t\t\t\t'registered_date','last_operation','last_modified_date','status',\n\t\t\t\t'user','pn_type','url']\n\t\tlookup_field = 'number'\n\t\textra_kwargs = {\n\t\t\t'url': {'lookup_field': 'number'}\n\t\t}\n\n\n\nclass ComponentSerializer(serializers.ModelSerializer):\n\n\tclass Meta:\n\t\tmodel = Component\n\t\tfields = ['number','barcode', 'slug','title','category1','category2','description',\n\t\t\t\t'pn','rev','datecode','lotcode','supcode','qty','carrier',\n\t\t\t\t'msl','floor_life','shelf_life','met','exp_date','baking_start_date',\n\t\t\t\t'baking_finish_date','registered_date','last_modified_date','status','user','url']\n\t\t# lookup_field = 'slug'\n\t\t# extra_kwargs = {\n\t\t# \t'url': {'lookup_field': 'slug'}\n\t\t# }\nclass ComponentUrlSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Component\n\t\tfields = ['number','url']\n\n\nclass AssembledSerializer(serializers.ModelSerializer):\n\t# number \t\t\t= SerialNumberUrlSerializer(many=False,read_only=False)\n\t# module_number \t= ModuleSerialUrlSerializer(many=False,read_only=False)\n\t# component_number = ComponentUrlSerializer(many=False,read_only=False)\n\tclass Meta:\n\t\tmodel = Assembled\n\t\tfields = ['number','pn','rev','pn_type','refdes',\n\t\t\t\t'module_number','component_number','operation',\n\t\t\t\t'note','action_date','action_status','user','url']\n\t\tlookup_field = 'slug'\n\t\textra_kwargs = {\n\t\t\t'url': {'lookup_field': 'slug'}\n\t\t}","sub_path":"wmp/component/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"310661988","text":"# coding=utf-8\nimport time\nimport unittest\nfrom framework.logger import Logger\nlogger = Logger(logger=\"test_1\").getlog()\n\nclass Relicl(unittest.TestCase):\n\n #@classmethod\n def setUp(self):\n\n print (\"----------SetUp -----\\n\")\n #@classmethod\n def tearDown(self):\n\n print (\"-----------TearDown----\\n\")\n\n def test_1(self):\n\n a=1\n b=1\n c = 2\n print('判断a+b=c吗?')\n logger.info(str(a)+\"+\"+ str(b)+ \"=\"+ str(a + b))\n self.assertEqual(a+b, c) #此项断言测试通过\n\n\n\n def test_2(self):\n a=1\n b=1\n c=2\n print('判断a+b=c吗?')\n logger.info(str(a)+\"+\"+ str(b)+ \"=\"+ str(a + b))\n self.assertEqual(a+b, c) #此项断言测试失败\n\n\n\n\n\n\n\n\n\nif __name__=='__main__':\n # unittest.main() # 使用main()直接运行时,将按case的名称顺序执行\n suite=unittest.TestSuite()\n #suite.addTest(Relicl(\"test_entrance\"))# 将需要执行的case添加到Test Suite中,没有添加的不会被执行\n #suite.addTest(Relicl(\"test_entrance\"))\n unittest.TextTestRunner().run(suite) # 将根据case添加的先后顺序执行\n\n\n","sub_path":"test_case/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321947758","text":"# Databricks notebook source\n# MAGIC %md \n# MAGIC ## Maple Lake ITT465_ML_ATTRIBUTE\n# MAGIC * version 2\n# MAGIC \n# MAGIC [MAPPING DOCUMENT](https://columbiaoffice.sharepoint.com/:x:/r/sites/pwa/GRPP/_layouts/15/Doc.aspx?sourcedoc=%7Bdf4f6c62-4d14-486a-b930-d19ef035b65a%7D&action=default&uid=%7BDF4F6C62-4D14-486A-B930-D19EF035B65A%7D&ListItemId=5966&ListId=%7BCBB0CE02-27B3-474D-9BEE-F2B0F0B8BCD3%7D&odsp=1&env=prod)\n# MAGIC ```\n# MAGIC - Input Table : entity_buyer_groups , Approved vendor list by products , Released product variants\n# MAGIC - Transforming the Data : Union all three entitites required fields (entity_buyer_groups , Approved vendor list by products & Released product variants)\n# MAGIC ```\n# MAGIC ####Expected OutPut\n\n# COMMAND ----------\n\n# Set job start time\nimport datetime\nExecutionStartTime=datetime.datetime.now()\n\n\n# setup for some runtime parameters\n\ndbutils.widgets.text('debugLevel', 'trite', 'Debug Level: none, trite, verbose')\ndebugLevel = dbutils.widgets.get('debugLevel')\n\n\n# COMMAND ----------\n\n# MAGIC %run ../includes/ml_helper_v2 \n\n# COMMAND ----------\n\n# familyName = \"CscMapleLakeDataBricks\"\n# operationName = \"ITT465_ML_ATTRIBUTE\"\n# instumentationKey = dbutils.secrets.get(scope=\"ML_automation_scope\", key=\"instrumentation-key\")\n# operationId = \"ITT458_ML_ATTRIBUTE\"\n# parentOperationId = \"_ML_ATTRIBUTE_ITT462\"\n# instanceId = str(uuid.uuid4())\n\n# #setup other needed variables\n# # Shouldn't need to alter below\n# loggingState = LoggingState(familyName, operationName, instanceId, operationId, parentOperationId)\n# tc = NewTelemetryClient(instumentationKey, operationName, operationId, parentOperationId)\n\n# COMMAND ----------\n\n# DBTITLE 1,Load the Buyer Groups data\nml_buyer_groups = FetchEntityByHandle(\"buyer_groups\")\nif debugLevel == 'verbose':\n print(\"ml_buyer_groups record counts (orginal/noDupicates): \" + str(ml_buyer_groups.count()) + \"/\" + str(ml_buyer_groups.dropDuplicates().count()))\nml_buyer_groups = ml_buyer_groups.dropDuplicates()\n\n# COMMAND ----------\n\n# DBTITLE 1,Load the ApprovedVendorListByProducts file and AlternateApprovedVendorListByProducts\nml_approved_vendorlist = FetchEntityByHandle(\"approved_vendor_list\")\nif debugLevel == 'verbose':\n print(\"ml_approved_vendorlist record counts (orginal/noDupicates): \" + str(ml_approved_vendorlist.count()) + \"/\" + str(ml_approved_vendorlist.dropDuplicates().count())) \n \nml_alternateVendor_df_temp = FetchEntityByHandle(\"approved_multivendors\") \n\n# COMMAND ----------\n\nsqlContext.registerDataFrameAsTable(FetchEntityByHandle(\"approved_vendor_list_raw\"), 'Tvendorlist')\nsqlContext.registerDataFrameAsTable(ml_alternateVendor_df_temp, 'Talternateavendorlisttemp')\n\nml_alternateVendor_df = sqlContext.sql(\"\"\"SELECT * FROM Tvendorlist where ITEMNUMBER IN (SELECT ITEMNUMBER FROM Talternateavendorlisttemp) \"\"\")\nif debugLevel == 'verbose':\n print(\"ml_alternateVendor_df record counts (orginal/noDupicates): \" + str(ml_alternateVendor_df.count()) + \"/\" + str(ml_alternateVendor_df.dropDuplicates().count())) \n\n# COMMAND ----------\n\n# DBTITLE 1,entity_released_product_variants\nml_released_productvariants = FetchEntityByHandle(\"released_product_variants\")\n\n# COMMAND ----------\n\n# DBTITLE 1,Transformation logic\nsqlContext.registerDataFrameAsTable(ml_buyer_groups, 'Tbuyergroup')\nsqlContext.registerDataFrameAsTable(ml_approved_vendorlist, 'Tavendorlist')\nsqlContext.registerDataFrameAsTable(ml_released_productvariants, 'Trpvariants')\nsqlContext.registerDataFrameAsTable(ml_alternateVendor_df, 'Talternateavendorlist')\n\ndf_listno10001 = sqlContext.sql(\"\"\" \nSELECT '10001' AS LISTNO , GROUPID AS ATTRIBUTE_ID , GROUPDESCRIPTION AS ATTRIBUTE_NAME\nFROM Tbuyergroup\nWHERE GROUPID != '' AND GROUPDESCRIPTION != ''\nGROUP BY ATTRIBUTE_ID, ATTRIBUTE_NAME \"\"\")\n\ndf_listno10002 = sqlContext.sql(\"\"\"\nSELECT '10002' AS LISTNO , PRODUCTCOLORID AS ATTRIBUTE_ID , PRODUCTCOLORID AS ATTRIBUTE_NAME\nFROM Trpvariants\nWHERE PRODUCTCOLORID != '' AND PRODUCTCOLORID != ''\nGROUP BY ATTRIBUTE_ID, ATTRIBUTE_NAME \"\"\")\n\ndf_listno10003 = sqlContext.sql(\"\"\"\nSELECT '10003' AS LISTNO , VENDORACCOUNTNUMBER AS ATTRIBUTE_ID , MCSVENDORNAME AS ATTRIBUTE_NAME\nFROM Tavendorlist\nGROUP BY ATTRIBUTE_ID, ATTRIBUTE_NAME \"\"\")\n\ndf_listno10004 = sqlContext.sql(\"\"\" SELECT '10004' AS LISTNO , VENDORACCOUNTNUMBER AS ATTRIBUTE_ID , MCSVENDORNAME AS ATTRIBUTE_NAME\nFROM Talternateavendorlist\nGROUP BY ATTRIBUTE_ID, ATTRIBUTE_NAME \"\"\")\n\ndf_listno10005 = sqlContext.sql(\"\"\" SELECT '10005' AS LISTNO , MCSSEASON AS ATTRIBUTE_ID , MCSSEASONNAME AS ATTRIBUTE_NAME\nFROM Trpvariants\nWHERE MCSSEASON != '' AND MCSSEASONNAME != ''\nGROUP BY ATTRIBUTE_ID, ATTRIBUTE_NAME \"\"\")\n\nml_attribute_df = df_listno10001.union(df_listno10002).union(df_listno10003).union(df_listno10004).union(df_listno10005)\n\nml_attribute_df = ml_attribute_df.dropDuplicates()\n\nfreeDF(df_listno10005)\nfreeDF(df_listno10004)\nfreeDF(df_listno10003)\nfreeDF(df_listno10002)\nfreeDF(df_listno10001)\n\n# COMMAND ----------\n\n# DBTITLE 1, Write out the ML_ATTRIBUTE data\n# Configuration - Database Connection Info\ntableName = \"ML_ATTRIBUTE_D365_v2\"\n\nprint(\"Sending \" + str(ml_attribute_df.count()) + \" records to the database.\")\nresult = WriteFrameToMapleLakeDeltaTables(tableName = tableName, df_in = ml_attribute_df)\n\nif result != \"Success\":\n print(\"Failed\")\n #print(loggingState.notebookError)\nelse:\n freeDF(ml_attribute_df)\n print(\"Completed Successfully\")\n","sub_path":"C1-SIT3/edw_intg/maplelake/allocations/ML_ATTRIBUTE.py","file_name":"ML_ATTRIBUTE.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424232419","text":"import math\nimport Image\n\n\ndef resize_and_crop(image, width, height):\n image_filename = image.filename\n actual_width, actual_height = image.size\n width, height = float(width), float(height)\n new_width, new_height = 0, 0\n crop_x, crop_y = 0, 0\n width_ratio = actual_width / width\n height_ratio = actual_height / height\n if width_ratio < height_ratio:\n new_width = width\n new_height = math.ceil(actual_height * new_width / actual_width)\n crop_y = int(math.fabs((new_height / 2) - (height / 2)))\n else:\n new_height = height\n new_width = math.ceil(actual_width * new_height / actual_height)\n crop_x = int(math.fabs((new_width / 2) - (width / 2)))\n new_width = int(new_width)\n new_height = int(new_height)\n resized_image = image.resize((new_width, new_height))\n width, height = int(width), int(height)\n cropped_image = resized_image.crop(\n (crop_x, crop_y, crop_x + width, crop_y + height)\n )\n return cropped_image\n\n\ndef grayscale(image):\n return image.convert('L')\n\n\ndef black_and_white(image):\n return image.convert('1')\n\n\ndef _get_svg_tiles_path(image, x1, y1, x2, y2, path_dict=None):\n if not isinstance(path_dict, dict):\n path_dict = {}\n color = None\n svg_code = ''\n approved = True\n width = x2 - x1\n height = y2 - y1\n for x in range(x1, x2):\n for y in range(y1, y2):\n pixel = image.getpixel((x, y))\n if not color:\n color = pixel\n if color != pixel:\n approved = False\n if width > height:\n _get_svg_tiles_path(\n image, x1, y1, x1 + width / 2, y2, path_dict)\n _get_svg_tiles_path(\n image, x1 + width / 2, y1, x2, y2, path_dict)\n else:\n _get_svg_tiles_path(\n image, x1, y1, x2, y1 + height / 2, path_dict)\n _get_svg_tiles_path(\n image, x1, y1 + height / 2, x2, y2, path_dict)\n break\n if not approved:\n break\n if approved:\n if not color in path_dict:\n path_dict[color] = \"\"\n path_dict[color] += ' M%(x1)d %(y1)d H%(x2)d V%(y2)d H%(x1)d Z' % {\n 'x1': x1,\n 'x2': x2,\n 'y1': y1,\n 'y2': y2\n }\n return path_dict\n\n\ndef _get_color(color_tuple):\n \"\"\"\n Converts and returns the color in the following format:\n In canse of a rgb tuple:\n #%02x%02x%02x (i.e: #00FFCC)\n In case of a rgba tuple:\n rgba(color_tuple)\n \"\"\"\n if len(color_tuple) == 3:\n return '#%02x%02x%02x' % color_tuple\n return 'rgba%s' % str(color_tuple)\n\n\ndef svg_source(image):\n source = '''\\\n \\\n %(path_sources)s\n \n '''\n path_sources = ''\n width, height = image.size\n path_dict = _get_svg_tiles_path(image, 0, 0, width, height)\n for color, path in path_dict.iteritems():\n path = path.strip()\n path_sources += '' % {\n 'color': _get_color(color),\n 'path': path,\n }\n return source % {\n 'width': width,\n 'height': height,\n 'path_sources': path_sources,\n }\n","sub_path":"py/pyit.py","file_name":"pyit.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123449463","text":"from pptx import Presentation\nimport os\n\ndef openPPT(filename):\n\tos.system(\"\\\"C:\\\\Program Files\\\\Microsoft Office\\\\Office14\\\\POWERPNT.EXE\\\" \"+ filename)\n\n\n\n\nprint(\"holi\")\nprs = Presentation('lala.pptx')\n#f = open('test.pptx')\n\ntitle_slide_layout = prs.slide_layouts[0]\nslide = prs.slides.add_slide(title_slide_layout)\ntitle = slide.shapes.title\nsubtitle = slide.placeholders[1]\n\ntitle.text = \"FALAMOS\"\nsubtitle.text = \"python-pptx was here!\"\n\nprs.save(\"lala.pptx\")\nopenPPT(\"lala.pptx\")\n","sub_path":"Proyectos/otros/ejemploppt.py","file_name":"ejemploppt.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312690166","text":"\"\"\"\nAuthor: Shiyu Cheng (23329948)\nISTA 350 Hw7\nSL: Jacob Heller\nDate: 4/20/20\nSummary: Intro to web scrapping. Grabs the data you need from the web, put it into \nan html parser, and save the result into a file.\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport requests, zlib, gzip, os, json\n\n\ndef get_soup(url=None, fname=None, gzipped=False):\n \"\"\"\n This function has three parameters. The first is a string representing a URL and has\n a default argument of None. The second is a string named fname representing a filename\n also with default argument of None. The third is a Boolean named gzipped with a default\n value of False. True is passed to this parameter if the html to be parsed is gzipped.\n If the filename is not None, the file is opened and then passed the resulting file pointer\n to the BeautifulSoup constructor, and return the resulting object. If the url is None,\n a RuntimeError with a message is returned. If it is not None, a get request is sent to the\n server. If the response content is zipped, it is unzipped. Then the content is passed\n to the BeautifulSoup constructor and the resulting object is returned.\n :param url: string\n :param fname: string\n :param gzipped: boolean\n :return: BeautifulSoup\n \"\"\"\n if fname:\n return BeautifulSoup(open(fname))\n if not url:\n raise RuntimeError(\"Either url or filename must be specified.\")\n request = requests.get(url)\n if gzipped:\n return BeautifulSoup(zlib.decompress(request.content, 16 + zlib.MAX_WBITS))\n return BeautifulSoup(request.content)\n\n\ndef save_soup(fname, soup):\n \"\"\"\n this function takes two arguments, a filename and a soup object. It saves a textual\n representation of the soup object in the file.\n :param fname: string\n :param soup: soup\n :return:\n \"\"\"\n with open(fname, 'w') as file:\n file.write(repr(soup))\n file.close()\n\n\ndef scrape_and_save():\n \"\"\"\n this function scrapes the following addresses, soupifies\n the contents, and stores a textual representation of these\n objects in the files 'wrcc_pcpn.html', 'wrcc_mint.html',\n and 'wrcc_maxt.html'\n :return:\n \"\"\"\n save_soup('wrcc_pcpn.html',\n get_soup('https://wrcc.dri.edu/WRCCWrappers.py?sodxtrmts+028815+por+por+pcpn+none+msum+5+01+F'))\n save_soup('wrcc_mint.html',\n get_soup('https://wrcc.dri.edu/WRCCWrappers.py?sodxtrmts+028815+por+por+mint+none+mave+5+01+F'))\n save_soup('wrcc_maxt.html',\n get_soup('https://wrcc.dri.edu/WRCCWrappers.py?sodxtrmts+028815+por+por+maxt+none+mave+5+01+F'))\n\n\ndef is_num(my_str):\n try:\n float(my_str)\n return True\n except ValueError:\n return False\n\n\ndef load_lists(soup, flag):\n \"\"\"\n takes in soup obj and flag abd returns list of lists containing the useful data\n in the soup obj. The soup obj contains an html parse tree that describes a table of data\n You will extract the data from the parse tree and store it in the list of lists. In the\n process you will also transpose the data so that the columns in the table are rows in the lists\n of lists and vice versa. Suggest nested for loops.\n Outter loop you traverse the documents table rows and in the inner loop you traverse each rows\n table data fields. the first datum in each row should be a year, which you will need to convert\n to an int. Use if num to find ----- and replace with flag in its place\n :param soup: soup\n :param flag:\n :return: list\n \"\"\"\n lists = []\n for tr in soup.find_all('tr')[1::]:\n row = []\n for td in tr.find_all('td'):\n if td.get_text() == \"-----\":\n row.append(int(flag))\n elif is_num(td.get_text()):\n if float(td.get_text()) < 1894:\n row.append(float(td.get_text()))\n else:\n row.append(int(td.get_text()))\n if row:\n if row[0] > 1893:\n lists.append(row)\n result = []\n for i in range(len(lists[0])):\n col = []\n for lis in lists:\n col.append(lis[i])\n result.append(col)\n return result\n\n\ndef replace_na(data, row, col, flag, precision=5):\n \"\"\"\n 'na' is an abbreviation for not available. This is standard jargon for missing data. In our case,\n we have replaced all missing data with the flag -999as we loaded our list of lists. We want to clean\n our data by putting in reasonable values where data was missing. We are particularly interested in trends\n with time, so replacing missing data with averages of data from nearby years is a natural approach.\n The data for a given month through the years is represented by a row (because we transposed it from\n the website format). Therefore, when confronted with the flag in a position in a row, we will take the\n data from the 5 previous positions in that month's row and 5 following positions and use this to calculate\n an average. In clean_data, we will replace the flag with the average. In this function, we will calculate\n and return that average for clean_data to use. We must delete any occurrences of the flag in the 5\n following years, as that would really mess up the average. This function returns a replacement value for\n data[r][c ] with the average of the surrounding 10 years. Its first parameter is the list, the second\n and third are the row and column, respectively, the next is the flag, and the last is a precision with a\n default value of 5. If one of the surrounding years also contains the flag, leave that position out of\n the average. Round the replacement value to the precision specified by the last argument.\n :param data: list\n :param row:\n :param col:\n :param flag:\n :param precision: int\n :return:\n \"\"\"\n count, total, used = 0, 0, 0\n curr = col\n while count != 5:\n curr += 1\n if curr > len(data[row]) - 1:\n break\n count += 1\n if data[row][curr] != flag:\n total += data[row][curr]\n used += 1\n count = 0\n curr = col\n while count != 5:\n curr -= 1\n if curr < 0:\n break\n count += 1\n if data[row][curr] != flag:\n total += data[row][curr]\n used += 1\n replace = round(total / used, precision)\n data[row][col] = replace\n return replace\n\n\ndef clean_data(data, flag, precision=5):\n \"\"\"\n this function traverses the list of lists and every time it finds the flag, it calls replace_na to replace the flag.\n Its parameters are the list, the flag, and a precision with a default value of 5 to be passed on to replace_na.\n :param data: list\n :param flag:\n :param precision:\n :return: none\n \"\"\"\n for row in range(len(data)):\n for col in range(len(data[row])):\n if data[row][col] == flag:\n replace_na(data, row, col, flag, precision)\n\n\ndef recalculate_annual_data(data, value=False, precision=5):\n \"\"\"\n on the website, the last column is the total rainfall for the year or the average annual temperature.\n We have transposed this data, so this information is now in the last row, i.e. the last inner list.\n Because we have replace missing data with reasonable approximations, the data in our annual column no\n longer matches the value calculated from the monthly data. Therefore, we need to recalculate our annual\n data. This function has three parameters. The first is the list of lists (we are recalculating\n the last row); the second a Boolean with a default value of False. The Boolean argument is True if the\n annual data should be averages (temperature data); False if they should be totals (precipitation data).\n The third argument is a precision with a default value of 5. Round the recalculated annual data to\n this precision.In order to minimize round-off errors messing with the test, when recalculating\n averages, round the sum before dividing by N, then round again after div id in\n :param value:\n :param data: 2d list\n :param bool: boolean\n :param precision: int\n :return:\n \"\"\"\n result = []\n _list = data[1:-1]\n for each in range(len(_list[0])):\n current = 0\n total = 0\n while current != len(_list):\n total += _list[current][each]\n current += 1\n result.append(total)\n if value:\n for each in range(len(result)):\n result[each] = round(round(result[each], precision) / len(_list), precision)\n data[-1] = result\n return result\n\n\ndef clean_and_jsonify(fnames, flag, precision=5):\n \"\"\"\n this function takes three arguments. The first is a list of filenames to be cleaned and saved to files as json\n objects. The second is the flag. The third is a precision to be passed on to functions that clean_and_jasonify\n calls. It has a default value of 5. For each file in the first argument, get soup, transform it into a list of\n lists, clean the list, recalculate the annual data, and store it in a file as a json object (as described in\n class). Name your JSON files the same as your html files but with the extension .json. So your wrcc_pcpn.html\n will result in a file called wrcc_pcpn.json.\n :param fnames: list\n :param flag:\n :param precision:\n :return: none\n \"\"\"\n for file in fnames:\n data = load_lists(get_soup(fname=file), flag)\n clean_data(data, flag, precision)\n\n files = os.listdir()\n names = ['wrcc_pcpn.html', 'wrcc_mint.html', 'wrcc_maxt.html']\n for f in files:\n if f in names:\n if \"pcpn\" in f:\n recalculate_annual_data(data, False)\n else:\n recalculate_annual_data(data, True)\n print(f)\n with open(f[:-4] + \"json\", 'w') as fp:\n json.dump(data, fp)\n\n\ndef main():\n fnames = ['wrcc_pcpn.html', 'wrcc_mint.html', 'wrcc_maxt.html']\n clean_and_jsonify(fnames, -999, 2)\n\n","sub_path":"assignmengs/assignment_7/hw7.py","file_name":"hw7.py","file_ext":"py","file_size_in_byte":10066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"1487435","text":"import cv2\nimport numpy as np\nfrom skimage.transform import hough_line, hough_line_peaks\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\ndef intersection(line1, line2):\n \"\"\"Finds the intersection of two lines given in Hesse normal form.\n\n Returns closest integer pixel locations.\n See https://stackoverflow.com/a/383527/5087436\n \"\"\"\n rho1, theta1 = line1\n rho2, theta2 = line2\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n return [x0, y0]\n\n\ndef interseccion_lineas(lines):\n \n index = 0\n number = lines.shape[0]\n intersections = np.array(np.zeros([number*number,2]))\n for i in range(0, number):\n line1= lines[i]\n for j in range(i+1,number):\n line2= lines[j] \n intersections[index]= intersection(line1, line2)\n index+=1\n\n return intersections, index\n\n\n#programa ppal\nI = cv2.imread('rombo.png', cv2.IMREAD_GRAYSCALE)\nBW = cv2.Canny(I,50,150,apertureSize = 3)\n\nangulos = np.linspace(-np.pi/2, np.pi/2, 360)\nh, theta, d = hough_line(BW, theta=angulos)\n\n# Mapa de acumulación\nfig = plt.figure()\nplt.subplot(111)\nplt.imshow(np.log(1 + h),\n extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]), d[-1], d[0]],\n cmap=cm.hot,aspect=1/15)\n\nfig = plt.figure()\nplt.subplot(111)\nplt.imshow(BW, cmap=cm.gray)\neje_x = np.array((0, BW.shape[1]))\n\nvalores_maximos= hough_line_peaks(h, theta, d)\n#*hough_line_peaks(h, theta, d) desarma el arreglo\nmax_lines = np.size(valores_maximos,1)\nlines=np.array(np.zeros([max_lines,2]))\nindex=0\n\nfor accum, theta, rho in zip(*valores_maximos):\n y0 = (rho - eje_x[0] * np.cos(theta)) / np.sin(theta)\n y1 = (rho - eje_x[1] * np.cos(theta)) / np.sin(theta)\n lines[index]= [rho, theta]\n index+=1\n plt.plot(eje_x, (y0, y1), '-r')\n\nintersections, num_ins = interseccion_lineas(lines)\n\nfor i in range(0,num_ins):\n x, y = intersections[i]\n plt.plot(x,y, 'yo')\n\n\nplt.xlim(eje_x)\nplt.ylim((BW.shape[0], 0))\nplt.title('Lineas de mapa de Hough')\n\nplt.show()","sub_path":"06-Procesamieto-Morfologico-Segmentacion/codigos_previos/71_puntos_interseccion.py","file_name":"71_puntos_interseccion.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455580225","text":"from sklearn import metrics\n\ndef evaluateClusters(X, labels):\n #Silhouette Coefficient: -1 incorrect clustering and +1 highly dense clustering\n siScore = metrics.silhouette_score(X, labels, metric='precomputed')\n\n #Calinski-Harabasz Index: High score --> dense and well separated clusters\n chScore = metrics.calinski_harabasz_score(X, labels)\n\n #Davies-Bouldin Index: 0 best value --> indicates partition\n dbScore = metrics.davies_bouldin_score(X, labels)\n\n return siScore, chScore, dbScore","sub_path":"evaluation/insintric.py","file_name":"insintric.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"4324667","text":"#!/usr/bin/python\n#\n# Failed PoC to replicate a MIME/content sniffing vulnerability with\n# Internet Explorer.\n#\nfrom flask import Flask, Response, request, json\n\napp = Flask(__name__)\n\n@app.route('/')\ndef api_root():\n return 'Welcome'\n\n@app.route('/hello')\ndef api_hello():\n return 'Hello, anonymous'\n\n@app.route('/hello//')\ndef api_hello_name(name):\n if name:\n data = {\n 'hello' : 'world',\n 'user' : name\n }\n \n js = json.dumps(data)\n resp = Response(js, status=200, mimetype='application/json')\n # uncomment the line below to enable a countermeasure for the issue\n # resp.headers['X-Content-Type-Options'] = 'nosniff'\n resp.headers['X-Wildfire-Labs'] = 'Blaze Information Security'\n\n return resp\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"json-contentsniffing.py","file_name":"json-contentsniffing.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343321235","text":"MatrizRespostas = []\nGabarito = [\"\"] * 10\nVetorResultado = [0] * 5 \nContAcertos = 0\nAluno = 1\nfor i in range(5):\n MatrizRespostas.append([0]*10)\nprint(\"Digite o gabarito da prova\")\nprint()\nfor c in range(10):\n Gabarito[c] = str(input(\"Resposta do gabarito? \"))\n while Gabarito[c] != \"a\" and Gabarito[c] != \"b\" and Gabarito[c] != \"c\" and Gabarito[c] != \"d\":\n print(\"Erro de digitação, digite novamente a resposta do gabarito a, b, c ou d\")\n Gabarito[c] = str(input(\"Resposta do gabarito? \"))\nprint() \nfor a in range(5):\n for b in range(10):\n if b == 0:\n print(\"Resposta do aluno\",Aluno)\n Aluno = Aluno + 1\n print()\n MatrizRespostas[a][b] = str(input(\"Qual a reposta do aluno ? \"))\n while MatrizRespostas[a][b] != \"a\" and MatrizRespostas[a][b] != \"b\" and MatrizRespostas[a][b] != \"c\" and MatrizRespostas[a][b] != \"d\":\n print(\"Resposta invalida, por favor digite novamente a, b, c ou d\")\n MatrizRespostas[a][b] = str(input(\"Qual a reposta do aluno ? \"))\n print()\nprint()\nfor x in range(5):\n for z in range(10):\n if MatrizRespostas[x][z] == Gabarito[z]:\n ContAcertos = ContAcertos + 1\n VetorResultado[x] = ContAcertos\n ContAcertos = 0\nprint(\"Abaixo se encontra o vetor resultado contendo respectivamente o total de acertos de cada aluno\")\nprint()\nprint(VetorResultado) ","sub_path":"Lista 6/lista06ex09.py","file_name":"lista06ex09.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178249930","text":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Expressions to parse a proto.\n\nThese expressions return values with more information than standard node values.\nSpecifically, each node calculates additional tensors that are used as inputs\nfor its children.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\n\nimport abc\nfrom struct2tensor import calculate_options\nfrom struct2tensor import expression\nfrom struct2tensor import path\nfrom struct2tensor import prensor\nfrom struct2tensor.expression_impl import parse_message_level_ex\nfrom struct2tensor.ops import struct2tensor_ops\nimport tensorflow as tf\nfrom typing import FrozenSet, Mapping, Optional, Sequence, Set, Tuple, Union\n\n\nfrom google.protobuf.descriptor_pb2 import FileDescriptorSet\nfrom google.protobuf import descriptor\nfrom google.protobuf.descriptor_pool import DescriptorPool\n\n# To the best of my knowledge, ProtoFieldNames ARE strings.\n# Also includes extensions, encoded in a parentheses like (foo.bar.Baz).\nProtoFieldName = str # pylint: disable=g-ambiguous-str-annotation\nProtoFullName = str # pylint: disable=g-ambiguous-str-annotation\n\n# A string representing a step in a path.\nStrStep = str # pylint: disable=g-ambiguous-str-annotation\n\n\ndef is_proto_expression(expr):\n \"\"\"Returns true if an expression is a ProtoExpression.\"\"\"\n return isinstance(\n expr, (_ProtoRootExpression, _ProtoChildExpression, _ProtoLeafExpression))\n\n\ndef create_expression_from_file_descriptor_set(\n tensor_of_protos, proto_name,\n file_descriptor_set):\n \"\"\"Create an expression from a 1D tensor of serialized protos.\n\n Args:\n tensor_of_protos: 1D tensor of serialized protos.\n proto_name: fully qualified name (e.g. \"some.package.SomeProto\") of the\n proto in `tensor_of_protos`.\n file_descriptor_set: The FileDescriptorSet proto containing `proto_name`'s\n and all its dependencies' FileDescriptorProto. Note that if file1 imports\n file2, then file2's FileDescriptorProto must precede file1's in\n file_descriptor_set.file.\n\n Returns:\n An expression.\n \"\"\"\n\n pool = DescriptorPool()\n for f in file_descriptor_set.file:\n # This method raises if f's dependencies have not been added.\n pool.Add(f)\n\n # This method raises if proto not found.\n desc = pool.FindMessageTypeByName(proto_name)\n\n return create_expression_from_proto(tensor_of_protos, desc)\n\n\ndef create_expression_from_proto(\n tensor_of_protos,\n desc):\n \"\"\"Create an expression from a 1D tensor of serialized protos.\n\n Args:\n tensor_of_protos: 1D tensor of serialized protos.\n desc: a descriptor of protos in tensor of protos.\n\n Returns:\n An expression.\n \"\"\"\n return _ProtoRootExpression(desc, tensor_of_protos)\n\n\nclass _ProtoRootNodeTensor(prensor.RootNodeTensor):\n \"\"\"The value of the root node.\n\n This not only contains the normal size information, but also information\n needed by its children.\n\n In particular:\n 1. Any needed regular fields are included.\n 2. Any needed extended fields are included.\n 3. Any needed map fields are included.\n 4. if this is an Any proto, any needed casted fields are included.\n\n \"\"\"\n\n def __init__(self, size,\n fields):\n super(_ProtoRootNodeTensor, self).__init__(size)\n self.fields = fields\n\n\nclass _ProtoChildNodeTensor(prensor.ChildNodeTensor):\n \"\"\"The value of a child node.\n\n This not only contains the normal parent_index information, but also\n information needed by its children.\n\n In particular:\n 1. Any needed regular fields are included.\n 2. Any needed extended fields are included.\n 3. Any needed map fields are included.\n 4. if this is an Any proto, any needed casted fields are included.\n \"\"\"\n\n def __init__(self, parent_index, is_repeated,\n fields):\n super(_ProtoChildNodeTensor, self).__init__(parent_index, is_repeated)\n self.fields = fields\n\n\n_ParentProtoNodeTensor = Union[_ProtoRootNodeTensor, _ProtoChildNodeTensor]\n\n\nclass _AbstractProtoChildExpression(expression.Expression):\n \"\"\"A child or leaf proto expression.\"\"\"\n\n def __init__(self, parent, name_as_field,\n is_repeated, my_type):\n super(_AbstractProtoChildExpression, self).__init__(is_repeated, my_type)\n self._parent = parent\n self._name_as_field = name_as_field\n\n @property\n def name_as_field(self):\n return self._name_as_field\n\n def get_needed_fields(self, expr):\n return [self._name_as_field]\n\n def get_path(self):\n \"\"\"Returns the path to the root of the proto.\"\"\"\n return self._parent.get_path().get_child(self.name_as_field)\n\n def get_proto_source(self):\n \"\"\"Returns the proto root.\"\"\"\n return self._parent.get_proto_source()\n\n def get_source_expressions(self):\n # In order to parse this proto, you need to parse its parent.\n return [self._parent]\n\n def calculate(self, sources,\n destinations,\n options):\n [parent_value] = sources\n if isinstance(parent_value, _ProtoRootNodeTensor) or isinstance(\n parent_value, _ProtoChildNodeTensor):\n parsed_field = parent_value.fields.get(self.name_as_field)\n if parsed_field is None:\n raise ValueError(\"Cannot find {} in {}\".format(\n str(self), str(parent_value)))\n return self.calculate_from_parsed_field(parsed_field, destinations)\n raise ValueError(\"Not a _ParentProtoNodeTensor: \" + str(type(parent_value)))\n\n @abc.abstractmethod\n def calculate_from_parsed_field(self,\n parsed_field,\n destinations\n ):\n \"\"\"Calculate the NodeTensor given the parsed fields requested from a parent.\n\n Args:\n parsed_field: the parsed field from name_as_field.\n destinations: the destination of the expression.\n Returns:\n A node tensor for this node.\n \"\"\"\n raise NotImplementedError()\n\n def calculation_is_identity(self):\n return False\n\n\nclass _ProtoLeafExpression(_AbstractProtoChildExpression):\n \"\"\"Represents parsing a leaf field.\"\"\"\n\n def __init__(self, parent,\n desc, name_as_field):\n \"\"\"Initialize a proto leaf expression.\n\n Args:\n parent: the parent of the expression.\n desc: the field descriptor of the expression name_as_field.\n name_as_field: the name of the field.\n \"\"\"\n super(_ProtoLeafExpression, self).__init__(\n parent, name_as_field,\n desc.label == descriptor.FieldDescriptor.LABEL_REPEATED,\n struct2tensor_ops._get_dtype_from_cpp_type(desc.cpp_type)) # pylint: disable=protected-access\n # TODO(martinz): make _get_dtype_from_cpp_type public.\n self._field_descriptor = desc\n\n def calculate_from_parsed_field(self,\n parsed_field,\n destinations\n ):\n return prensor.LeafNodeTensor(parsed_field.index, parsed_field.value,\n self.is_repeated)\n\n def calculation_equal(self, expr):\n return (isinstance(expr, _ProtoLeafExpression) and\n self._field_descriptor == expr._field_descriptor) # pylint: disable=protected-access\n\n def _get_child_impl(self,\n field_name):\n return None\n\n def known_field_names(self):\n return frozenset()\n\n def __str__(self): # pylint: disable=g-ambiguous-str-annotation\n return \"_ProtoLeafExpression: {} from {}\".format(self.name_as_field,\n self._parent)\n\n\nclass _ProtoChildExpression(_AbstractProtoChildExpression):\n \"\"\"An expression representing a proto submessage.\n\n Supports:\n A standard submessage.\n An extension submessage.\n A protobuf.Any submessage.\n A proto map submessage.\n Also supports having fields of the above types.\n \"\"\"\n\n def __init__(self, parent,\n desc, is_repeated,\n name_as_field):\n \"\"\"Initialize a _ProtoChildExpression.\n\n This does not take a field descriptor so it can represent syntactic sugar\n fields such as Any and Maps.\n Args:\n parent: the parent.\n desc: the message descriptor of the submessage represented by this\n expression.\n is_repeated: whether the field is repeated.\n name_as_field: the name of the field.\n \"\"\"\n super(_ProtoChildExpression, self).__init__(parent, name_as_field,\n is_repeated, None)\n self._desc = desc\n\n def calculate_from_parsed_field(self,\n parsed_field,\n destinations\n ):\n needed_fields = _get_needed_fields(destinations)\n fields = parse_message_level_ex.parse_message_level_ex(\n parsed_field.value, self._desc, needed_fields)\n return _ProtoChildNodeTensor(parsed_field.index, self.is_repeated, fields)\n\n def calculation_equal(self, expr):\n return (isinstance(expr, _ProtoChildExpression) and\n self._desc == expr._desc and # pylint: disable=protected-access\n self.name_as_field == expr.name_as_field)\n\n def _get_child_impl(self,\n field_name):\n return _get_child(self, self._desc, field_name)\n\n def known_field_names(self):\n return _known_field_names_from_descriptor(self._desc)\n\n def __str__(self): # pylint: disable=g-ambiguous-str-annotation\n return \"_ProtoChildExpression: name_as_field: {} desc: {} from {}\".format(\n str(self.name_as_field), str(self._desc.full_name), self._parent)\n\n\nclass _ProtoRootExpression(expression.Expression):\n \"\"\"The expression representing the parse of the root of a proto.\n\n This class returns a _ProtoRootNodeTensor, that parses out fields for\n _ProtoChildExpression and _ProtoLeafExpression to consume.\n \"\"\"\n\n def __init__(self, desc, tensor_of_protos):\n \"\"\"Initialize a proto expression.\n\n Args:\n desc: the descriptor of the expression.\n tensor_of_protos: a 1-D tensor to get the protos from.\n \"\"\"\n super(_ProtoRootExpression, self).__init__(True, None)\n self._descriptor = desc\n self._tensor_of_protos = tensor_of_protos\n\n def get_path(self):\n \"\"\"Returns the path to the root of the proto.\"\"\"\n return path.Path([])\n\n def get_proto_source(self):\n \"\"\"Returns the tensor of protos and the original descriptor.\"\"\"\n return (self._tensor_of_protos, self._descriptor)\n\n def get_source_expressions(self):\n return []\n\n def calculate(self, sources,\n destinations,\n options):\n if sources:\n raise ValueError(\"_ProtoRootExpression has no sources\")\n size = tf.size(self._tensor_of_protos, out_type=tf.int64)\n needed_fields = _get_needed_fields(destinations)\n fields = parse_message_level_ex.parse_message_level_ex(\n self._tensor_of_protos, self._descriptor, needed_fields)\n return _ProtoRootNodeTensor(size, fields)\n\n def calculation_is_identity(self):\n return False\n\n def calculation_equal(self, expr):\n # TODO(martinz): In theory, we could check for the equality of the\n # tensor_of_protos and the descriptors.\n return self is expr\n\n def _get_child_impl(self,\n field_name):\n return _get_child(self, self._descriptor, field_name)\n\n def known_field_names(self):\n return _known_field_names_from_descriptor(self._descriptor)\n\n def __str__(self): # pylint: disable=g-ambiguous-str-annotation\n return \"_ProtoRootExpression: {}\".format(str(self._descriptor.full_name))\n\n\nProtoExpression = Union[_ProtoRootExpression, _ProtoChildExpression, # pylint: disable=invalid-name\n _ProtoLeafExpression]\n\n_ParentProtoExpression = Union[_ProtoChildExpression, _ProtoRootExpression]\n\n\ndef _known_field_names_from_descriptor(\n desc):\n return frozenset([field.name for field in desc.fields])\n\n\ndef _get_field_descriptor(\n desc,\n field_name):\n if path.is_extension(field_name):\n try:\n return desc.file.pool.FindExtensionByName(\n path.get_raw_extension_name(field_name))\n except KeyError:\n return None\n return desc.fields_by_name.get(field_name)\n\n\ndef _get_any_child(\n parent,\n desc, field_name\n):\n \"\"\"Gets the child of an any descriptor.\"\"\"\n if path.is_extension(field_name):\n full_name_child = parse_message_level_ex.get_full_name_from_any_step(\n field_name)\n if full_name_child is None:\n return None\n field_message = desc.file.pool.FindMessageTypeByName(full_name_child)\n return _ProtoChildExpression(parent, field_message, False, field_name)\n else:\n return _get_child_helper(parent, desc.fields_by_name.get(field_name),\n field_name)\n\n\ndef _is_map_field_desc(field_desc):\n return (field_desc.message_type and\n field_desc.message_type.GetOptions().map_entry)\n\n\ndef _get_map_child(\n parent,\n desc, field_name\n):\n \"\"\"Gets the child given a map field.\"\"\"\n [map_field_name, _] = path.parse_map_indexing_step(field_name)\n map_field_desc = desc.fields_by_name.get(map_field_name)\n if map_field_desc is None:\n return None\n if not _is_map_field_desc(map_field_desc):\n return None\n map_message_desc = map_field_desc.message_type\n if map_message_desc is None:\n # Note: I don't know if this is reachable. Theoretically, _is_map_field_desc\n # should have already returned false.\n return None\n value_field_desc = map_message_desc.fields_by_name.get(\"value\")\n if value_field_desc is None:\n # Note: I don't know if this is reachable. Theoretically, _is_map_field_desc\n # should have already returned false.\n return None\n # This relies on the fact that the value is an optional field.\n return _get_child_helper(parent, value_field_desc, field_name)\n\n\ndef _get_child_helper(\n parent,\n field_descriptor,\n field_name\n):\n \"\"\"Helper function for _get_child, _get_any_child, and _get_map_child.\n\n Note that the field_descriptor.field_name is not necessarily equal to\n field_name, especially if this is called from _get_map_child.\n\n Args:\n parent: the parent expression\n field_descriptor: the field descriptor of the submessage represented by the\n returned expression, if present. If None, this will just return None.\n field_name: the field name of the _AbstractProtoChildExpression returned.\n\n Returns:\n An _AbstractProtoChildExpression.\n \"\"\"\n if field_descriptor is None:\n return None\n field_message = field_descriptor.message_type\n if field_message is None:\n return _ProtoLeafExpression(parent, field_descriptor, field_name)\n return _ProtoChildExpression(\n parent, field_message,\n field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED,\n field_name)\n\n\ndef _get_child(parent,\n desc, field_name\n ):\n \"\"\"Get a child expression.\n\n This will get one of the following:\n A regular field.\n An extension.\n An Any filtered by value.\n A map field.\n\n Args:\n parent: The parent expression.\n desc: The descriptor of the parent.\n field_name: The name of the field.\n\n Returns:\n The child expression, either a submessage or a leaf.\n \"\"\"\n if isinstance(field_name, path.AnonymousId):\n return None\n if parse_message_level_ex.is_any_descriptor(desc):\n return _get_any_child(parent, desc, field_name)\n if path.is_map_indexing_step(field_name):\n return _get_map_child(parent, desc, field_name)\n # Works for extensions and regular fields, but not any or map.\n return _get_child_helper(parent, _get_field_descriptor(desc, field_name),\n field_name)\n\n\ndef _get_needed_fields(\n destinations):\n field_names = set() # type: Set[StrStep]\n for destination in destinations:\n if isinstance(destination, _AbstractProtoChildExpression):\n field_names.add(destination.name_as_field)\n return field_names\n","sub_path":"struct2tensor/expression_impl/proto.py","file_name":"proto.py","file_ext":"py","file_size_in_byte":16329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"141000465","text":"#!/usr/bin/python\nimport json\nfrom sense_emu import SenseHat\n\nsense = SenseHat()\n\nfilename = \"led_display.json\";\n\n\nif filename:\n\twith open(filename, 'r') as f:\n\t\tledDisplayArray = json.load(f)\n\n\n#DEBUG: print results\nfor led in ledDisplayArray:\n\tsense.set_pixel(led[0], led[1], led[2], led[3], led[4])\n\t#print(\"(x:\"+str(led[0])+ \"y:\"+str(led[1])+r:\"+str(led[2])+\"g:\"+str(led[3])+\"b:\"+str(led[4]))\n","sub_path":"led_display.py","file_name":"led_display.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529557700","text":"import requests\nimport re\n\ndef get():\n word=input(\"(・ω・)ノ请输入需要翻译的英文单词:\\n\")\n url=\"http://dict.youdao.com/w/\"+word+\"/#keyfrom=dict2.index\"\n gotit=requests.get(url).content.decode('utf-8')\n return gotit\ndef find():\n l1=re.findall(\"详细释义.+

\",get(),re.S)\n l2=re.findall(\" \\w+\",str(l1))\n print(\"翻译结果如下:\\n\")\n for i in l2:\n i=i.strip()\n print(i)\nwhile (1):\n find()\n","sub_path":"dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649302215","text":"import numpy as np\nimport sys\nimport os\nimport csv\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Conv2D, MaxPooling2D, Flatten, Softmax\nfrom keras.utils import np_utils\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.callbacks import ModelCheckpoint\n\ndrive_path = \"/content/drive/My Drive/ML2019Spring/hw4/\"\ninDrive = False\n\n\nvalid_rate = 0.2\nepochs = 100\nbatch_size = 256\n\ndef preprocess(path):\n\tif inDrive:\n\t\tx_path = drive_path + \"data/x_train.npy\"\n\t\ty_path = drive_path + \"data/y_train.npy\"\n\telse:\n\t\tx_path = \"data/x_train.npy\"\n\t\ty_path = \"data/y_train.npy\"\n\n\tif (os.path.isfile(x_path) and os.path.isfile(y_path)):\n\t\tx_train = np.load(x_path)\n\t\ty_train = np.load(y_path)\n\t\tn_valid = (int)(x_train.shape[0]*valid_rate)\n\telse:\n\t\tx_train = []\n\t\ty_train = []\n\t\twith open(path, newline='') as csvfile:\n\t\t\tnext(csvfile)\n\t\t\trows = csv.reader(csvfile)\n\t\t\tfor row in rows:\n\t\t\t\tone_hot = np.zeros(7)\n\t\t\t\tone_hot[int(row[0])] = 1\n\t\t\t\ty_train.append(one_hot)\n\t\t\t\tx_train.append(np.array( row[1].split() ).astype(np.float))\n\n\t\t# x_train.shape = (28709, 48*48)\n\t\t# y_train.shape = (28709, 7)\n\t\tx_train = np.reshape(np.array(x_train),[-1,48,48,1])\n\t\ty_train = np.array(y_train)\n\n\t\t# 0-255 subject to 0-1\n\t\tx_train = x_train / 255.\n\t\tn_valid = (int)(x_train.shape[0]*valid_rate)\n\t\tif inDrive:\n\t\t\tnp.save(drive_path + 'data/x_train.npy', x_train)\n\t\t\tnp.save(drive_path + 'data/y_train.npy', y_train)\n\t\telse:\n\t\t\tnp.save('data/x_train.npy', x_train)\n\t\t\tnp.save('data/y_train.npy', y_train)\n\n\treturn x_train[:-n_valid], y_train[:-n_valid], x_train[-n_valid:], y_train[-n_valid:]\n\ndef train(epochs, batch_size, argumentation=False):\n\tglobal x_train\n\tglobal y_train\n\tglobal x_valid\n\tglobal y_valid\n\n\t#data argumentation\n\tif argumentation:\n\t\tdatagen = ImageDataGenerator(horizontal_flip=True, rotation_range=30, zoom_range=0.2,\n\t\t\t\tshear_range=0.2, fill_mode='nearest')\n\t\tdatagen.fit(x_train)\n\t#model\n\tmodel = Sequential()\n\n\tmodel.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', input_shape=(48, 48, 1)))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=2))\n\t# model.add(Dropout(0.25))\n\n\tmodel.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=2))\n\t# model.add(Dropout(0.25))\n\n\n\tmodel.add(Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=2))\n\t# model.add(Dropout(0.25))\n\n\n\tmodel.add(Flatten())\n\n\tmodel.add(Dense(512, activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Dropout(0.5))\n\n\tmodel.add(Dense(512, activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Dropout(0.5))\n\n\tmodel.add(Dense(7))\n\tmodel.add(Softmax(axis=-1))\n\n\tmodel.summary()\n\t#compiling\n\tadam = Adam(lr=0.0001)\n\tmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n\t\n\t#check point\n\tif inDrive:\n\t\tfilepath=drive_path + 'model_noaug.h5'\n\telse:\n\t\tfilepath='model_noaug.h5'\n\tcheckpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n\tcallbacks_list = [checkpoint]\n\t\n\t#fitting params\n\tsamples_per_epoch = len(y_train)*8\n\t#fitting\n\tif argumentation:\n\t\tmodel.fit_generator(datagen.flow(x_train, y_train, batch_size),\n\t\t\t\tsamples_per_epoch=samples_per_epoch, epochs=epochs,\n\t\t\t\tvalidation_data=(x_valid, y_valid),\n\t\t\t\tcallbacks=callbacks_list)\n\telse:\n\t\tmodel.fit(x_train, y_train,\n\t\t\t\tbatch_size=batch_size,\n\t\t\t\tepochs=epochs,\n\t\t\t\tvalidation_data=(x_valid, y_valid), \n\t\t\t\tcallbacks=callbacks_list)\n\nif __name__==\"__main__\":\n\tif len(sys.argv) > 1:\n\t\tif sys.argv[1].strip('-') == 'G' or sys.argv[1].strip('-') == 'g':\n\t\t\tinDrive = True\n\tif inDrive:\n\t\tpath = drive_path + \"data/train.csv\"\n\t\tprint (\"Train in google drive...\")\n\telse:\n\t\tprint (\"Train in local...\")\n\t\tpath = \"data/train.csv\"\n\n\tx_train, y_train, x_valid, y_valid = preprocess(path)\n\ttrain(epochs, batch_size, False)","sub_path":"hw3/hw3_keras.py","file_name":"hw3_keras.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"73061558","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom tools.ui.pager import get_record_index\n\nfrom todolist.models import Item, ItemWork, Step\nfrom secu.models import User\nimport json\n\nclass _ItemWork:\n\n def add(request):\n if request.method == 'POST':\n item = Item.objects.get(pk=request.POST['item_id'])\n instance = ItemWork()\n instance.item = item\n instance.work = request.POST['work']\n instance.minutes = int(request.POST['minutes'])\n instance.entry_user = User.objects.get(user_name=request.session.get('user_name'))\n instance.save()\n return HttpResponse(50000) \n\n def delete(request):\n if request.method == 'POST':\n ItemWork.objects.get(pk=request.POST['id']).delete()\n return HttpResponse(50000) \n\n def edit(request):\n if request.method == 'GET':\n instance = ItemWork.objects.get(pk=request.GET['id'])\n return JsonResponse(instance.json, safe=False) \n elif request.method == 'POST':\n item = Item.objects.get(pk=request.POST['item_id'])\n instance = ItemWork.objects.get(pk=request.POST['id'],item=item) \n instance.work = request.POST['work']\n instance.minutes = int(request.POST['minutes'])\n instance.entry_user = User.objects.get(user_name=request.session.get('user_name'))\n instance.save() \n return HttpResponse(50000)\n\n def list(request):\n if request.method == 'GET':\n record = get_record_index(request)\n item = Item.objects.get(pk=request.GET['item_id'])\n\n data = [\n {\n \"id\": ins.id,\n \"work\": ins.work, \n \"minutes\": ins.minutes,\n \"user\": ins.entry_user.first_name+' '+ ins.entry_user.last_name if ins.entry_user else '',\n \"entry_date\": ins.entry_date \n } for ins in ItemWork.objects.filter(item=item)[record['start']:record['end']] \n ] \n\n context = {\n \"totalrecords\": ItemWork.objects.filter(item=item).count(),\n \"data\": data\n } \n return JsonResponse(context, safe=False)\n\n def instance(request):\n if request.method == 'GET':\n instance = Item.objects.get(pk=request.GET['id'])\n\n data = instance.json\n data['step'] = instance.step.id if instance.step else ''\n data['status'] = instance.status.id if instance.status else ''\n data['person_in_charge'] = instance.person_in_charge.id if instance.person_in_charge else ''\n data['total_time'] = instance.minutes\n data['used_total_time'] = _ItemWork.get_used_item_total_time(instance) \n return JsonResponse(data, safe=False) \n\n def get_used_item_total_time(item):\n total_time = 0\n item_works = ItemWork.objects.filter(item=item)\n for item_work in item_works:\n total_time = total_time + item_work.minutes\n return total_time\n ","sub_path":"todolist/views/_ItemWork.py","file_name":"_ItemWork.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"110851491","text":"import numpy as np\nfrom scipy.signal import argrelextrema\nfrom display_patterns import *\n\n\n\ndef find_doubles(X, maximas):\n MARGIN_TOPS = PIP\n pairs = []\n for array in maximas:\n nb_maximas = array.shape[0]\n if nb_maximas > 1:\n for m1 in range(0, nb_maximas):\n for m2 in range(nb_maximas, 0, -1):\n if not m1 == m2:\n diff = abs(X[m1]-X[m2])\n if diff <= MARGIN_TOPS:\n pairs.append([m1, m2])\n return pairs\n\ndef find_trios(couples, maximas):\n trios = []\n\n for pair in couples:\n for maxima in maximas[0]:\n if pair[0] < maxima and maxima < pair[1]:\n trios.append([pair, maxima])\n return trios\n\ndef filter_trios(X, trios):\n MARGIN_TOP_BOT = 50*PIP\n new_trios = []\n is_max = True\n is_min = True\n for trio in trios:\n dip = abs(X[1][trio[0][0]]-X[2][trio[1]])\n for i in range(trio[0][0]+1, trio[0][1]):\n if X[1][i] >= X[1][trio[0][0]] or X[1][i] >= X[1][trio[0][1]]:\n is_max = False\n break\n if X[2][i] <= X[1][trio[1]] and not i==trio[1]:\n is_min = False\n break\n #and is_max == True and is_min == True\n if dip > MARGIN_TOP_BOT and is_max == True and is_min == True:\n new_trios.append(trio)\n return new_trios\n\ndef detect_double_top(X):\n global PIP\n PIP = (X[1].max())/10000\n maxOC = np.vstack([X[0], X[3]]).max(axis=0)\n minOC = np.vstack([X[0], X[3]]).min(axis=0)\n maxs = argrelextrema(maxOC, np.greater)\n mins = argrelextrema(minOC, np.less)\n\n if max:\n couples = find_doubles(X[1], maxs)\n if couples and mins:\n trios = find_trios(couples, mins)\n if trios:\n trios = filter_trios(X, trios)\n if trios:\n print(trios)\n display_double_top_bot(X, trios, maxs, mins, True)\n return trios\n return []\n","sub_path":"detect_double_top_bot.py","file_name":"detect_double_top_bot.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"203168922","text":"# A string is a palindrome if it has exactly the same sequence of characters when read left-to-right as it has when read right-to-left. For example, the following strings are palindromes:\n# \"kayak\",\n# \"codilitytilidoc\",\n# \"neveroddoreven\".\n# A string A is an anagram of a string B if A can be obtained from B by rearranging the characters. For example, in each of the following pairs one string is an anagram of the other:\n# \"mary\" and \"army\",\n# \"rocketboys\" and \"octobersky\",\n# \"codility\" and \"codility\".\n# Write a function:\n# def solution(S)\n# that, given a non-empty string S consisting of N characters, returns 1 if S is an anagram of some palindrome and returns 0 otherwise.\n# Assume that:\n# N is an integer within the range [1..100,000];\n# string S consists only of lower-case letters (a-z)\n# For example, given S = \"dooernedeevrvn\", the function should return 1, because \"dooernedeevrvn\" is an anagram of the palindrome \"neveroddoreven\". Given S = \"aabcba\", the function should return 0.\n# Complexity:\n# expected worst-case time complexity is O(N);\n# expected worst-case space complexity is O(1) (not counting the storage required for input arguments).\n#\n\ndef solution ( S ):\n flips = dict()\n num_flips = 0\n\n if len(S) == 1:\n return 1\n\n for c in S:\n if c in flips:\n if flips[c] == 0:\n flips[c] = 1\n else:\n flips[c] = 0\n else:\n flips.update({c: 1})\n\n for item in flips.values():\n if item == 1:\n num_flips += 1\n\n if len(S) % 2 == 0:\n # even\n print('even')\n if num_flips == 0:\n return 1\n else:\n return 0\n else:\n # odd\n print('odd')\n if num_flips == 1:\n return 1\n else:\n return 0\n\nif __name__ == \"__main__\":\n print('dooernedeevrvn:', solution('dooernedeevrvn'))\n print('neveroddoreven:', solution('neveroddoreven'))\n \n\n# go through S (string)\n# S == 1 return 1\n# for each letter in S, \n# set [letter] to 1 if it was 0, to 0 if it was 1\n# if N odd, only one [letter] must be set to 1\n# if N even, no [letter] must be set to 1\n","sub_path":"job_interviews/codility/interview.py","file_name":"interview.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"384078954","text":"#File:larger_lscroggs.py\r\n#Description: Checking the values of two digits to determine which\r\n#is larger\r\n#Author:Lawrence Scroggs\r\n#Date: 10/16/17\r\n#Compiler: Python 3.6.2\r\n#Couldnt figure out how to add float input\r\n#got float to work couldnt block string\r\n#tried isdecimal and isfloat instead of isdigit. Did not work.\r\n#isntructed user to enter whole numbers\r\nprint(\"Find out which number is great\"\"\\n\\n\")\r\nnumber_one = input(\"Please enter your first whole number \")\r\nprint('')\r\nnumber_two = input(\"Please enter your second whole number \")\r\nprint('')\r\nif number_one.isdigit():\r\n one = int(number_one)\r\nelse:\r\n print(\"Invalid Input\")\r\n exit()\r\nif number_two.isdigit():\r\n two = int(number_two)\r\nelse:\r\n print(\"Invalid Input\")\r\n exit()\r\nif one > two:\r\n print('',one,\"is greater than\",two,'')\r\nelif one < two:\r\n print('',two,\"is greater than\",one,'')\r\nelif one == two:\r\n print(\"The numbers are equal\")\r\nelse:\r\n print(\"Invalid Input\")\r\n exit()\r\n","sub_path":"larger_lscroggs3.py","file_name":"larger_lscroggs3.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121152332","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nfrom ax.service.utils.instantiation import (\n _get_parameter_type,\n constraint_from_str,\n outcome_constraint_from_str,\n)\nfrom ax.utils.common.testutils import TestCase\n\n\nclass TestInstantiationtUtils(TestCase):\n \"\"\"Testing the instantiation utilities functionality that is not tested in\n main `AxClient` testing suite (`TestSErviceAPI`).\"\"\"\n\n def test_parameter_type_validation(self):\n with self.assertRaisesRegex(ValueError, \"No AE parameter type\"):\n _get_parameter_type(list)\n\n def test_constraint_from_str(self):\n with self.assertRaisesRegex(ValueError, \"Bound for sum constraint\"):\n constraint_from_str(\n \"x1 + x2 <= not_numerical_bound\", {\"x1\": None, \"x2\": None}\n )\n with self.assertRaisesRegex(ValueError, \"Outcome constraint bound\"):\n outcome_constraint_from_str(\"m1 <= not_numerical_bound\")\n","sub_path":"ax/service/tests/test_instantiation_utils.py","file_name":"test_instantiation_utils.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116997261","text":"import math\n\ndef format_plot(a):\n \"\"\"Format a plot for presenation. Function takes one parameter, a matplotlib.axes object.\"\"\"\n\n goldenratio = 1 / 2 * (1 + math.sqrt(5)) # The next few lines are used for the size of plots\n fsx = 7 # Width (in inches) for the figures.\n fsy = fsx / goldenratio # Height (in inches) for the figures.\n\n a.tick_params(labelsize=12)\n a.xaxis.label.set_size(14)\n a.yaxis.label.set_size(14)\n a.title.set_size(16)\n a.legend(fontsize=12)\n a.get_figure().set_size_inches(fsx, fsy)\n a.grid(1)","sub_path":"RandomExamples.py","file_name":"RandomExamples.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512792027","text":"# Copyright (C) 2021 Square, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations under\n# the License.\nload(\"@bazel_skylib//lib:unittest.bzl\", \"analysistest\", \"asserts\")\nload(\":sqldelight.bzl\", \"SqlDelightInfo\", \"sqldelight_codegen\")\nload(\":tests/setup.bzl\", \"test_case\")\n\ndef _test_impl(ctx):\n env = analysistest.begin(ctx)\n target_under_test = analysistest.target_under_test(env)\n asserts.equals(env, \"//:auto_module_test_rule\", target_under_test[SqlDelightInfo].module_name)\n return analysistest.end(env)\n\n_test = analysistest.make(_test_impl)\n\ndef setup():\n return test_case(\n name = \"auto_module\",\n tested_rule = sqldelight_codegen,\n test_rule = _test,\n srcs = [\"foo/bar/baz/blah.sq\"],\n src_dir = \"/bar\",\n package_name = \"baz\",\n )\n","sub_path":"tests/auto_module.bzl","file_name":"auto_module.bzl","file_ext":"bzl","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"153259621","text":"import csv\n\nimport sys\n\n\n# http://adventofcode.com/2017/day/2\n\n# Part 1:\n\n# The spreadsheet consists of rows of apparently-random numbers. To make sure the recovery process is on the right\n# track, they need you to calculate the spreadsheet's checksum. For each row, determine the difference between the\n# largest value and the smallest value; the checksum is the sum of all of these differences.\n\n# For example, given the following spreadsheet:\n\n# 5 1 9 5\n# 7 5 3\n# 2 4 6 8\n\n# The first row's largest and smallest values are 9 and 1, and their difference is 8.\n# The second row's largest and smallest values are 7 and 3, and their difference is 4.\n# The third row's difference is 6.\n\n# In this example, the spreadsheet's checksum would be 8 + 4 + 6 = 18.\n\n# What is the checksum for the spreadsheet in your puzzle input?\n\n\n# Part 2:\n\n# It sounds like the goal is to find the only two numbers in each row where one evenly divides the other - that is,\n# where the result of the division operation is a whole number. They would like you to find those numbers on each line,\n# divide them, and add up each line's result.\n\n# For example, given the following spreadsheet:\n\n# 5 9 2 8\n# 9 4 7 3\n# 3 8 6 5\n\n# In the first row, the only two numbers that evenly divide are 8 and 2; the result of this division is 4.\n# In the second row, the two numbers are 9 and 3; the result is 3.\n# In the third row, the result is 2.\n\n# In this example, the sum of the results would be 4 + 3 + 2 = 9.\n\n# What is the sum of each row's result in your puzzle input?\n\n\ndef corruption_checksum(filename):\n checksum = 0\n\n with open(filename) as csvfile:\n reader = csv.reader(csvfile)\n\n for row in reader:\n max_val = 0\n min_val = sys.maxsize\n\n for val in row:\n val = int(val)\n if val >= max_val:\n max_val = val\n if val <= min_val:\n min_val = val\n\n checksum += abs(max_val - min_val)\n\n return checksum\n\n\ndef evenly_divisible_corruption_checksum(filename):\n checksum = 0\n\n with open(filename) as csvfile:\n reader = csv.reader(csvfile)\n\n for row in reader:\n for i in range(0, len(row)):\n for j in range(i + 1, len(row)):\n left = int(row[i])\n right = int(row[j])\n\n if left % right == 0:\n checksum += int(left / right)\n break\n elif right % left == 0:\n checksum += int(right / left)\n break\n return checksum\n\n\nif __name__ == '__main__':\n\n print(\"The corruption checksum is %d\" % corruption_checksum('day_02_input.csv'))\n print(\"The evently divisible corruption checksum is %d\" % evenly_divisible_corruption_checksum('day_02_input.csv'))\n","sub_path":"day_02/corruption_checksum.py","file_name":"corruption_checksum.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376551861","text":"from tkinter import *\nimport Controller.EventController as EventController\nimport Controller.UserController as UserController\nfrom Model.EventModel import EventModel as EventModel\nfrom Model.UserModel import UserModel as UserModel\nfrom Constants.Constants import Errors\nfrom Constants.Constants import UserFields\nfrom Constants.Constants import Tags\n\n\ncurrent_event: UserModel = None\ncurrent_user: UserModel = None\n\n\ndef read_event():\n eid = 0\n event_title = str(title_input.get())\n tags = stringToEnum(str(tags_input.get()))\n description = str(description_input.get())\n image = str(image_input.get())\n hosts = None\n attendees = []\n event_date = str(event_date_input.get())\n location = str(location_input.get())\n register_period = str(period_input.get())\n new_event = EventModel(eid, event_title, tags.name, description, image, hosts, attendees, event_date,\n location, register_period)\n return new_event\n\n\ndef read_user():\n uid = 0\n real_name = str(realname_input.get())\n nickname = str(nickname_input.get())\n gender = str(gender_input.get())\n email = str(email_input.get())\n location = str(user_location_input.get())\n tags = stringToEnum(str(user_tags_input.get()))\n description = user_description_input.get()\n host_events = []\n join_events = []\n new_event = UserModel(uid, real_name, nickname, gender, email, location, tags, description,\n host_events, join_events)\n return new_event\n\n\ndef post_event():\n global current_event\n global current_user\n current_event = read_event()\n if not current_user:\n add_output(\"You have to login first to post events. \\n\")\n else:\n result = EventController.add_event(current_user, current_event)\n if result == Errors.DUPLICATE.name:\n current_event.eid = None\n add_output(\"The same event already exists. \\n\")\n elif result == Errors.FAILURE.name:\n current_event.eid = None\n return_failure()\n add_output('Event #' + str(result) + ' has been posted. \\n')\n current_event.eid = result\n\n result = EventController.host_event(current_user, current_event)\n if result == Errors.DUPLICATE.name:\n current_event.eid = None\n add_output(\"You have already hosted this event. \\n\")\n elif result == Errors.FAILURE.name:\n current_event.eid = None\n return_failure()\n add_output(\"You are the host of event \" + str(current_event.eid) + \" now. \\n\")\n current_event.hosts = current_user.uid\n current_user.host_events.append(current_event.eid)\n print('User #' + current_event.hosts + ' posted event #' + str(current_event.eid) + '. \\n')\n EventController.print_event(current_event)\n return\n\n\ndef update_event():\n global current_event\n global current_user\n event_id = event_id_input.get()\n current_event = read_event()\n current_event.eid = event_id\n host_id = None\n temp_event = EventController.retrieve_event(event_id)\n if temp_event == Errors.MISSING.name:\n add_output('No such event. \\n')\n current_event = None\n return\n if type(temp_event) == type(current_event):\n host_id = str(EventController.retrieve_event(event_id).hosts)\n if current_user.uid != host_id:\n add_output('You have to be the owner to update event #' + event_id + ' . \\n')\n current_event = None\n return\n\n result = EventController.edit_event(current_event)\n\n if result == Errors.MISSING.name:\n add_output('No such event exists. \\n')\n current_event = None\n EventController.print_event(current_event)\n return\n elif result == Errors.SUCCESS.name:\n add_output('Event #' + event_id + ' changed. \\n')\n current_event = EventController.retrieve_event(event_id)\n EventController.print_event(current_event)\n else:\n add_output('Update failed, please try again. \\n')\n current_event = None\n EventController.print_event(current_event)\n return\n\n\ndef remove_user():\n global current_user\n global current_event\n if not current_user:\n add_output('You have to log in first. \\n')\n event_id = event_id_input.get()\n current_event = EventController.retrieve_event(event_id)\n if current_event == Errors.MISSING.name:\n add_output('No such event. \\n')\n current_event = None\n return\n elif current_event == Errors.FAILURE.name:\n return_failure()\n current_event = None\n return\n if current_user.uid != current_event.hosts:\n add_output('You have to be the host to remove attendees. \\n')\n current_event = None\n return\n\n user_id = user_id_input.get()\n result = EventController.remove_user(user_id, current_event)\n if result == Errors.MISSING.name:\n add_output('User did not attend. \\n')\n current_event = None\n return\n elif result == Errors.FAILURE.name:\n return_failure()\n current_event = None\n return\n add_output('User #' + result[0] + ' removed from event #' + result[1] + '. \\n')\n return\n\n\ndef register():\n global current_user\n current_user = read_user()\n result = UserController.add_user(current_user)\n if result == Errors.DUPLICATE.name:\n add_output(\"A user with the same credentials already exists! \\n\")\n current_user = None\n elif result == Errors.FAILURE.name:\n return_failure()\n current_user = None\n else:\n add_output(\"User registered JoinMe with email \" + current_user.email + \". \\n\")\n current_user = UserController.retrieve_user(UserFields.email.name, current_user.email)\n UserController.print_user(current_user)\n return\n\n\ndef update_profile():\n global current_user\n if not current_user:\n add_output(\"You have to login first! \\n\")\n return\n temp = UserController.retrieve_user(UserFields.email.name, current_user.email)\n if temp == Errors.MISSING.name:\n add_output(\"No user with such credentials exists. \\n\")\n return\n elif temp == Errors.FAILURE.name:\n return_failure()\n return\n user_id = current_user.uid\n current_user = read_user()\n current_user.uid = user_id\n\n result = UserController.edit_user(current_user)\n if result == Errors.MISSING.name:\n add_output(\"No user with such credentials exists. \\n\")\n current_user = temp\n elif result == Errors.FAILURE.name:\n return_failure()\n current_user = temp\n elif result == Errors.DUPLICATE.name:\n add_output(\"A user with the same credentials already exists! \\n\")\n current_user = temp\n else:\n add_output(\"User updated. Email now at: \" + current_user.email + \". \\n\")\n current_user = UserController.retrieve_user(UserFields.userid.name, result)\n UserController.print_user(current_user)\n return\n\n\ndef login():\n global current_user\n email = user_email_input.get()\n result = UserController.retrieve_user(UserFields.email.name, email)\n if result == Errors.MISSING.name:\n add_output(\"No user with such credential exists. \\n\")\n UserController.print_user(current_user)\n return\n elif result == Errors.FAILURE.name:\n add_output(\"Failed to login. Please try again. \\n\")\n UserController.print_user(current_user)\n return\n add_output(\"You logged in with email \" + email + \". \\n\")\n current_user = UserController.retrieve_user(UserFields.email.name, email)\n UserController.print_user(current_user)\n return\n\n\ndef log_out():\n global current_user\n current_user = None\n text.set(value=\"This is the first iteration demo for JoinMe. \\n\")\n\n\ndef group_email():\n global current_user\n global current_event\n if not current_user:\n add_output('You have to log in first. \\n')\n event_id = event_id_input.get()\n current_event = EventController.retrieve_event(event_id)\n if current_event == Errors.MISSING.name:\n add_output('No such event. \\n')\n current_event = None\n return\n elif current_event == Errors.FAILURE.name:\n return_failure()\n current_event = None\n return\n if current_user.uid != current_event.hosts:\n add_output('You have to be the host to send a group email. \\n')\n current_event = None\n return\n\n message = email_message_input.get()\n user_list = current_event.attendees\n EventController.print_event(current_event)\n for user_id in user_list:\n sent = False\n try:\n temp_user = UserController.retrieve_user(UserFields.userid.name, user_id)\n send_email(message, temp_user.email)\n sent = True\n finally:\n if not sent:\n add_output('Unable to send email to ' + temp_user.email + '. \\n')\n return\n\n\ndef contact_friend():\n global current_user\n if not current_user:\n add_output('You have to log in first. \\n')\n message = email_message_input.get()\n nickname = email_nickname_input.get()\n sent = False\n\n try:\n temp_user = UserController.retrieve_user(UserFields.nickname.name, nickname)\n if temp_user == Errors.MISSING.name:\n add_output('No user with nickname ' + nickname + '. Please check again. \\n')\n else:\n send_email(message, temp_user.email)\n sent = True\n finally:\n if not sent:\n add_output('Unable to send email to ' + temp_user.email + '. \\n')\n\n return\n\n\ndef add_output(line: str):\n global text\n text.set(text.get() + line)\n\n\ndef return_failure():\n add_output(\"Connection failed. Please try again. \\n\")\n\n\n#TODO: Replace with sending real email\ndef send_email(message: str, address: str):\n add_output('Message: ' + message + ' send to ' + address + '. \\n')\n\n\ndef join_event():\n global current_event\n global current_user\n event_id = event_id_input.get()\n current_event = EventController.retrieve_event(event_id)\n if current_event == Errors.MISSING.name:\n add_output('Event not found. \\n')\n current_event = None\n return\n elif current_event == Errors.FAILURE.name:\n add_output('Failed to join event. \\n')\n current_event = None\n return\n if not current_user:\n add_output('You have to login first to join events. \\n')\n current_event = None\n return\n\n result = EventController.join_event(current_user, current_event)\n if result == Errors.DUPLICATE.name:\n add_output('You have already joined the event. \\n')\n elif result == Errors.FAILURE.name:\n return_failure()\n current_event = None\n return\n else:\n add_output('You have joined event #' + current_event.eid + '. User ID: ' + current_user.uid + '. \\n')\n\n EventController.print_event(current_event)\n return\n\n\ndef stringToEnum(tags_input):\n check_set = set(['sports', 'social', 'outdoors', 'indoors', 'sightseeing',\n 'exhibitions', 'entertaining', 'charity', 'business'])\n if tags_input not in check_set:\n tags_input = 'anything'\n return Tags[tags_input]\n\n\nwindow = Tk()\nwindow.title('JoinMe')\nwindow.geometry('1000x660')\ntitle = Label(window, text='JoinMe',)\ntitle.config(font='Helvetica -20 bold', fg='black')\ntitle.place(x=375, y=20, anchor=\"center\")\n\n# ------------------------------------------- Event -------------------------------------------\n\nEvent_title = Label(window, text='Title:')\nEvent_title.place(x=90, y=30)\ntitle_input = Entry(window, relief='ridge', width=50)\ntitle_input.place(x=170, y=30)\n\nEvent_Description = Label(window, text='Description:')\nEvent_Description.place(x=90, y=60)\ndescription_input = Entry(window, relief='ridge', width=50)\ndescription_input.place(x=170, y=60)\n\nEvent_Tags = Label(window, text='Tags:')\nEvent_Tags.place(x=90, y=90)\n\ntags_input = Entry(window, relief='ridge', width=50)\ntags_input.place(x=170, y=90)\n\n\nEvent_image = Label(window, text='Image URL:')\nEvent_image.place(x=90, y=120)\nimage_input = Entry(window, relief='ridge', width=50)\nimage_input.place(x=170, y=120)\n\nEvent_event_date = Label(window, text='Event Date:')\nEvent_event_date.place(x=90, y=150)\nevent_date_input = Entry(window, relief='ridge', width=50)\nevent_date_input.place(x=170, y=150)\n\nEvent_location = Label(window, text='Location:')\nEvent_location.place(x=90, y=180)\nlocation_input = Entry(window, relief='ridge', width=50)\nlocation_input.place(x=170, y=180)\n\nEvent_period = Label(window, text='Time Period:')\nEvent_period.place(x=90, y=210)\nperiod_input = Entry(window, relief='ridge', width=50)\nperiod_input.place(x=170, y=210)\n\npost_button = Button(window, text=\"Post Event\", command=post_event)\npost_button.place(x=90, y=242.5)\n\nupdate_button = Button(window, text=\"Update Event\", command=update_event)\nupdate_button.place(x=170, y=242.5)\n\nEvent_id_join = Label(window, text='Event ID:')\nEvent_id_join.place(x=260, y=242.5)\n\nevent_id_input = Entry(window, relief='ridge', width=10)\nevent_id_input.place(x=320, y=240.5)\n\njoin_button = Button(window, text=\"Join Event\", command=join_event)\njoin_button.place(x=420, y= 243.5)\n\nEvent_user = Label(window, text='User ID:')\nEvent_user.place(x=180, y=270)\nuser_id_input = Entry(window, relief='ridge', width=10)\nuser_id_input.place(x=240, y=270)\n\nremove_button = Button(window, text=\"Remove User\", command=remove_user)\nremove_button.place(x=90, y=270)\n\nlabel_user = Label(window,\n text='------------------------------------------- User -------------------------------------------')\nlabel_user.place(x=55, y=295)\n\n# ------------------------------------------- User -------------------------------------------\n\nUser_Realname = Label(window, text='Real Name:')\nUser_Realname.place(x=90, y=315)\nrealname_input = Entry(window, relief='ridge', width=50)\nrealname_input.place(x=170, y=315)\n\nUser_Nickname = Label(window, text='Nickname:')\nUser_Nickname.place(x=90, y=345)\nnickname_input = Entry(window, relief='ridge', width=50)\nnickname_input.place(x=170, y=345)\n\nUser_Gender = Label(window, text='Gender:')\nUser_Gender.place(x=90, y=375)\ngender_input = Entry(window, relief='ridge', width=50)\ngender_input.place(x=170, y=375)\n\nUser_Location = Label(window, text='Location:')\nUser_Location.place(x=90, y=405)\nuser_location_input = Entry(window, relief='ridge', width=50)\nuser_location_input.place(x=170, y=405)\n\nUser_Email = Label(window, text='Email:')\nUser_Email.place(x=90, y=435)\nemail_input = Entry(window, relief='ridge', width=50)\nemail_input.place(x=170, y=435)\n\nUser_UserTags = Label(window, text='Tags:')\nUser_UserTags.place(x=90, y=465)\nuser_tags_input = Entry(window, relief='ridge', width=50)\nuser_tags_input.place(x=170, y=465)\n\nUser_Description = Label(window, text='Description:')\nUser_Description.place(x=90, y=495)\nuser_description_input = Entry(window, relief='ridge', width=50)\nuser_description_input.place(x=170, y=495)\n\nregister_button = Button(window, text=\"Register\", command=register)\nregister_button.place(x=90, y=525)\n\nupdateProfile_button = Button(window, text=\"Update Profile\", command=update_profile)\nupdateProfile_button.place(x=155, y=525)\n\nuser_email_input = Entry(window, relief='ridge', width=30)\nuser_email_input.place(x=200, y=555)\n\nsearch_button = Button(window, text=\"Login with Email\", command=login)\nsearch_button.place(x=90, y=557.5)\n\nlogout_button = Button(window, text=\"Logout\", command=log_out)\nlogout_button.place(x=490, y=557.5)\n\n# ------------------------------------------- Email -------------------------------------------\n\ncontact_friend_button = Button(window, text=\"Contact Friend\", command=contact_friend)\ncontact_friend_button.place(x=90, y=587.5)\n\nEmail_User_Nickname = Label(window, text='User Nickname:')\nEmail_User_Nickname.place(x=180, y=587.5)\nemail_nickname_input = Entry(window, relief='ridge', width=10)\nemail_nickname_input.place(x=290, y=585)\n\ngroup_email_button = Button(window, text=\"Group Email\", command=group_email)\ngroup_email_button.place(x=90, y=615)\n\nemail_message_input = Entry(window, relief='ridge', width=50)\nemail_message_input.place(x=170, y=610)\n\n# ------------------------------------------- Output -------------------------------------------\n\noutput = Label(window, text='Output')\noutput.place(x=800, y=20)\ntext = StringVar(output)\noutput_value = Label(window, textvariable=text)\ntext.set(value=\"This is the first iteration demo for JoinMe. \\n\")\noutput_value.pack()\noutput_value.place(x=700, y=40)\nwindow.mainloop()\n","sub_path":"Frontend_View.py","file_name":"Frontend_View.py","file_ext":"py","file_size_in_byte":16489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"251623141","text":"import pygame, sys\nfrom pygame.locals import *\nfrom animacaov2 import *\nfrom menu import *\n\naltura = 400\nlargura = 900\n\n\nclass inimigo(pygame.sprite.Sprite):\n def __init__(self,posx,posy):\n pygame.sprite.Sprite.__init__(self)\n self.imagem1 = pygame.image.load(\"imagens/alien01.png\")\n self.imagem2 = pygame.image.load(\"imagens/alien02.png\")\n self.imagem3 = pygame.image.load(\"imagens/alien03.png\")\n self.imagem4 = pygame.image.load(\"imagens/nave2.png\")\n\n self.listaimagens = [self.imagem1 , self.imagem2, self.imagem3, self.imagem4]\n self.posimagem = 0\n self.imagemalien = self.listaimagens[self.posimagem]\n\n self.rect = self.imagemalien.get_rect()\n self.lista_disparo = []\n self.velocidade = 20\n self.rect.top = posy\n self.rect.left = posx\n self.configTempo = 1\n\n def comportamento(self,tempo):\n if self.configTempo < tempo:\n self.posimagem += 1\n self.configTempo += 1\n if self.posimagem > len(self.listaimagens)-1:\n self.posimagem = 0\n\n def colocar(self,superficie):\n self.imagemalien = self.listaimagens[self.posimagem]\n superficie.blit(self.imagemalien, self.rect)\n\nclass bala(pygame.sprite.Sprite):\n\n\n def __init__(self, posx, posy):\n pygame.sprite.Sprite.__init__(self)\n self.imagemBala = pygame.image.load(\"imagens/naveBala.png\")\n self.rect = self.imagemBala.get_rect()\n self.velocidadeBala = 20\n self.rect.top = posy\n self.rect.left = posx\n\n def trajetoria(self):\n self.rect.top -= self.velocidadeBala\n\n def colocar(self,superficie):\n superficie.blit(self.imagemBala, self.rect)\n\n\nclass nave_espacial(pygame.sprite.Sprite):\n\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.imagemNave = pygame.image.load(\"imagens/nave.png\")\n self.rect = self.imagemNave.get_rect()\n self.rect.centerx = largura/2\n self.rect.centery = altura - 30\n\n self.lista_disparo = []\n self.vida = True\n self.velocidade = 25\n\n def movimentodireita(self):\n self.rect.right += self.velocidade\n self.__movimento()\n\n def movimentoesquerda(self):\n self.rect.left -= self.velocidade\n self.__movimento()\n\n def __movimento(self):\n if self.vida == True:\n if self.rect.left <= 0:\n self.rect.left = 0\n elif self.rect.right > 900:\n self.rect.right = 900\n\n def disparo(self, x, y):\n minhaBala = bala(x, y)\n self.lista_disparo.append(minhaBala)\n\n def colocar(self, superficie):\n superficie.blit(self.imagemNave, self.rect)\n\n\ndef jogo():\n pygame.init()\n\n tela = pygame.display.set_mode([largura,altura])\n pygame.display.set_caption(\"SPACE INVADERS\")\n\n jogador = nave_espacial()\n invasor = inimigo(100,50)\n\n imagemfundo = pygame.image.load(\"imagens/cenario.jpg\")\n jogando = True\n relogio = pygame.time.Clock()\n tiro = bala(largura / 2, altura - 20)\n audio = pygame.mixer.Sound(\"audios/intro4.ogg\")\n audio.play()\n audio.set_volume(1)\n\n while True:\n relogio.tick(200)\n tempo = int(pygame.time.get_ticks()/1000)\n tiro.trajetoria()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n \n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n jogador.movimentoesquerda()\n elif event.key == K_RIGHT:\n jogador.movimentodireita()\n elif event.key == K_SPACE:\n x,y = jogador.rect.center\n jogador.disparo(x,y)\n\n tela.blit(imagemfundo, (0, 0))\n jogador.colocar(tela)\n invasor.comportamento(tempo)\n invasor.colocar(tela)\n\n if len(jogador.lista_disparo) > 0:\n for x in jogador.lista_disparo:\n x.colocar(tela)\n x.trajetoria()\n if x.rect.top < -10:\n jogador.lista_disparo.remove(x)\n pygame.display.update()\n\n\nanimacao()\nmenu()\njogo()","sub_path":"Pygame/space_invaders.py","file_name":"space_invaders.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"269848024","text":"# Exercicio 18\n# Peça para o usuário digitar os valores de a, b e c\n# \n# Calcule o delta 'delta = (b**2)-(4*a*c)'\n# \n# Se o delta der negativo, então deve aparecer a seguinte mensagem: 'Delta negativo! Equação não pode ser resolvida!'\n# \n# Se o delta der igual a zero, então deve aparecer a seguinte mensagem: 'Delta igual a zero!'\n# \n# Se o delta der positivo, então deve aparecer a seguinte mensagem: 'A equação pode ser resolvida!'\nwhile True:\n try:\n a = float(input('Informe o valor de a: '))\n b = float(input('Informe o valor de b: '))\n c = float(input('Informe o valor de c: '))\n delta = (b**2)-(4*a*c)\n if delta < 0:\n print('Delta negativo! Equação não pode ser resolvida!')\n elif delta == 0:\n print('Delta igual a zero!')\n else:\n print('A equação pode ser resolvida!')\n except ValueError:\n print('Oops! Valor inválido! Tente novamente: ')\n else: \n x = input('Digite 1 para continuar.')\n if not(x) or not(x == '1'):\n break","sub_path":"Exercicios/Aulas00/aula05/if_parte2/exercicio18.py","file_name":"exercicio18.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"90960999","text":"\"\"\"BulletML implementation.\"\"\"\n\nfrom __future__ import division\n\nfrom math import atan2, sin, cos\n\n__all__ = [\"Action\", \"Bullet\"]\n\nclass Action(object):\n \"\"\"Running action implementation.\n\n To implement new actions, add a new element/class pair to\n parser.ActionDef.CONSTRUCTORS. It should support FromXML,\n __getstate__, and __setstate__, and 5-ary __call__:\n\n def __call__(self, owner, action, params, rank, created)\n\n Which will be called to execute it. This function should modify\n owner, action, and created in-place, and return true if action\n execution should stop for this bullet this frame.\n\n \"\"\"\n\n def __init__(self, parent, actions, params, rank, repeat=1):\n self.actions = actions\n self.parent = parent\n self.repeat = repeat\n self.wait_frames = 0\n self.speed = 0\n self.speed_frames = 0\n self.direction = 0\n self.direction_frames = 0\n self.aiming = False\n self.mx = 0\n self.my = 0\n self.accel_frames = 0\n self.previous_fire_direction = 0\n self.previous_fire_speed = 0\n self.params = params\n self.pc = -1\n self.finished = False\n if parent:\n self.copy_state(parent)\n\n def __repr__(self):\n return \"%s(pc=%r, actions=%r)\" % (\n type(self).__name__, self.pc, self.actions)\n\n def vanish(self):\n \"\"\"End this action and its parents.\"\"\"\n if self.parent:\n self.parent.vanish()\n self.pc = None\n self.finished = True\n\n def copy_state(self, other):\n \"\"\"Copy fire/movement state from other to self.\"\"\"\n self.direction_frames = other.direction_frames\n self.direction = other.direction\n self.aiming = other.aiming\n self.speed_frames = other.speed_frames\n self.speed = other.speed\n self.accel_frames = other.accel_frames\n self.mx = other.mx\n self.my = other.my\n self.previous_fire_direction = other.previous_fire_direction\n self.previous_fire_speed = other.previous_fire_speed\n\n def step(self, owner, created):\n \"\"\"Advance by one frame.\"\"\"\n\n if self.speed_frames > 0:\n self.speed_frames -= 1\n owner.speed += self.speed\n\n if self.direction_frames > 0:\n self.direction_frames -= 1\n # I'm still not sure what the aim check is supposed to do.\n if self.aiming and self.direction_frames <= 0:\n owner.direction += owner.aim\n else:\n owner.direction += self.direction\n\n if self.accel_frames > 0:\n self.accel_frames -= 1\n owner.mx += self.mx\n owner.my += self.my\n\n if self.pc is None:\n return\n\n if self.wait_frames > 0:\n self.wait_frames -= 1\n return\n\n s_params = self.params\n rank = owner.rank\n\n while True:\n self.pc += 1\n\n try:\n action = self.actions[self.pc]\n except IndexError:\n self.repeat -= 1\n if self.repeat <= 0:\n self.pc = None\n self.finished = True\n if self.parent is not None:\n self.parent.copy_state(self)\n owner.replace(self, self.parent)\n break\n else:\n self.pc = 0\n action = self.actions[self.pc]\n\n if action(owner, self, s_params, rank, created):\n break\n\nclass Bullet(object):\n \"\"\"Simple bullet implementation.\n\n Attributes:\n x, y - current X/Y position\n px, py - X/Y position prior to the last step\n mx, my - X/Y axis-oriented speed modifier (\"acceleration\")\n direction - direction of movement, in radians\n speed - speed of movement, in units per frame\n target - object with .x and .y fields for \"aim\" directions\n vanished - set to true by a action\n rank - game difficulty, 0 to 1, default 0.5\n tags - string tags set by the running actions\n appearance - string used to set bullet appearance\n radius - radius for collision\n finished - true if all actions are finished and the bullet vanished\n\n Contructor Arguments:\n x, y, direction, speed, target, rank, tags, appearance, radius\n - same as the above attributes\n actions - internal action list\n Action - custom Action constructor\n\n \"\"\"\n\n def __init__(self, x=0, y=0, direction=0, speed=0, target=None,\n actions=(), rank=0.5, tags=(), appearance=None,\n radius=0.5):\n self.x = self.px = x\n self.y = self.py = y\n self.radius = radius\n self.mx = 0\n self.my = 0\n self.direction = direction\n self.speed = speed\n self.vanished = False\n self.finished = False\n self.target = target\n self.rank = rank\n self.tags = set(tags)\n self.appearance = appearance\n self.actions = list(actions)\n\n @classmethod\n def FromDocument(cls, doc, x=0, y=0, direction=0, speed=0, target=None,\n params=(), rank=0.5, Action=Action):\n \"\"\"Construct a new Bullet from a loaded BulletML document.\"\"\"\n actions = [action(None, Action, params, rank)\n for action in doc.actions]\n return cls(x=x, y=y, direction=direction, speed=speed,\n target=target, actions=actions, rank=rank)\n\n def __repr__(self):\n return (\"%s(%r, %r, accel=%r, direction=%r, speed=%r, \"\n \"actions=%r, target=%r, appearance=%r, vanished=%r)\") % (\n type(self).__name__, self.x, self.y, (self.mx, self.my),\n self.direction, self.speed, self.actions, self.target,\n self.appearance, self.vanished)\n\n @property\n def aim(self):\n \"\"\"Angle to the target, in radians.\n\n If the target does not exist or cannot be found, return 0.\n \"\"\"\n try:\n target_x = self.target.x\n target_y = self.target.y\n except AttributeError:\n return 0\n else:\n return atan2(target_x - self.x, target_y - self.y)\n\n def vanish(self):\n \"\"\"Vanish this bullet and stop all actions.\"\"\"\n self.vanished = True\n for action in self.actions:\n action.vanish()\n self.actions = []\n\n def replace(self, old, new):\n \"\"\"Replace an active action with another.\n\n This is mostly used by actions internally to queue children.\n \"\"\"\n try:\n idx = self.actions.index(old)\n except ValueError:\n pass\n else:\n self.actions[idx] = new\n\n def step(self):\n \"\"\"Advance by one frame.\n\n This updates the position and velocity, and may also set the\n vanished flag.\n\n It returns any new bullets this bullet spawned during this step.\n \"\"\"\n created = []\n\n finished = self.vanished\n for action in self.actions:\n action.step(self, created)\n finished = finished and action.finished\n if finished:\n for action in self.actions:\n finished = finished and action.finished\n self.finished = finished\n\n speed = self.speed\n direction = self.direction\n self.px = self.x\n self.py = self.y\n self.x += self.mx + sin(direction) * speed\n self.y += -self.my + cos(direction) * speed\n\n return created\n","sub_path":"bulletml/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":7533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369157937","text":"#!/usr/bin/env python3\n\n\"\"\"\nCompares performance of estimators and pipelines.\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.datasets import make_classification\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_val_score, GridSearchCV, StratifiedKFold\nfrom imblearn.pipeline import make_pipeline\nfrom imblearn.over_sampling import SMOTE\n\nIRS = [1., 3., 5., 10.]\nLR = LogisticRegression(solver='lbfgs', max_iter=5000)\nDT = DecisionTreeClassifier(random_state=0)\nSMOTE_LR = make_pipeline(SMOTE(random_state=1), LogisticRegression(solver='lbfgs', max_iter=5000))\nDT_PARAM_GRID = {'max_depth': [2, 3, 4, 5], 'criterion': ['gini', 'entropy']}\nSMOTE_LR_PARAM_GRID = {'smote__k_neighbors': [2, 3, 4], 'logisticregression__C': [1e3, 1e2, 1e1, 1e0, 1e-1]}\nCV = StratifiedKFold(n_splits=5, shuffle=True, random_state=2)\n\n\ndef generate_data(imbalance_ratio):\n \"\"\"Generate data of given IR.\"\"\"\n weights = imbalance_ratio / (imbalance_ratio + 1), 1 / (imbalance_ratio + 1)\n X, y = make_classification(n_samples=200, n_features=5, random_state=3, weights=weights)\n return X, y\n\n\ndef mean_cv_score(estimator, X, y):\n \"\"\"Return stratified 5-fold mean f-score.\"\"\"\n return cross_val_score(estimator, X, y, cv=CV, scoring='f1').mean()\n\n\ndef optimal_mean_cv_score(estimator, param_grid, X, y):\n \"\"\"Return highest stratified 5-fold mean f-score for a parameter grid.\"\"\"\n gscv = GridSearchCV(estimator, param_grid, cv=CV, scoring='f1', iid=False).fit(X, y)\n return gscv.best_score_\n\n\nif __name__ =='__main__':\n\n # Scores placeholder\n scores = []\n\n # Iterate through imbalance ratios\n for imbalance_ratio in IRS:\n\n # Generate data\n X, y = generate_data(imbalance_ratio)\n \n # Calculate scores\n ir_scores = [\n imbalance_ratio,\n mean_cv_score(LR, X, y), \n mean_cv_score(DT, X, y), \n mean_cv_score(SMOTE_LR, X, y), \n optimal_mean_cv_score(DT, DT_PARAM_GRID, X, y), \n optimal_mean_cv_score(SMOTE_LR, SMOTE_LR_PARAM_GRID, X, y)\n ]\n scores.append(ir_scores)\n\n # Create and print scores table\n scores = pd.DataFrame(scores, columns=['IR', 'LR', 'DT', 'SMOTE + LR', 'Optimal DT', 'Optimal SMOTE + LR'])\n print(scores)\n","sub_path":"machine-learning/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149050396","text":"# Builtins\nfrom datetime import timedelta, timezone\nimport datetime as dt\nfrom typing import Any, List, Tuple\nimport math\n\n# External libraries\nfrom finta import TA\nimport numpy as np\nimport pandas as pd\n\nfrom harvest.utils import *\nfrom harvest.plugin._base import Plugin\n\n\"\"\"\nMethods that perform an action should follow this naming convention:\n [action]_[target]_[returns]\n - action: The action taken by the method, such as 'get', 'buy'\n - target: The entity the method operates on, such as 'stock', 'option', 'account'\n - returns: What the method returns, such as 'list', 'price'\n\nWhen dealing with dates, unlike datetime objects in other classes,\ndates in Algo class are NOT localized to UTC timezone. This keeps the\ninterface easier for users, especially for beginners of Python.\n\"\"\"\n\n\nclass BaseAlgo:\n \"\"\"\n The BaseAlgo class is where the algorithm resides.\n It provides an interface to monitor stocks and place orders.\n Helper functions are also provided for common calculations such as RSI and SMA.\n \"\"\"\n\n def __init__(self):\n self.trader = None\n self.interval = None\n self.aggregations = None\n self.watchlist = []\n\n def config(self):\n self.interval = None\n self.aggregations = None\n self.watchlist = []\n\n def setup(self):\n pass\n\n def main(self):\n pass\n\n def add_plugin(self, plugin: Plugin):\n value = getattr(self, plugin.name, None)\n if value is None:\n setattr(self, plugin.name, plugin)\n else:\n debugger.error(\n f\"Plugin name is already in use! {plugin.name} points to {value}.\"\n )\n\n ############ Functions interfacing with broker through the trader #################\n\n def buy(\n self,\n symbol: str = None,\n quantity: int = None,\n in_force: str = \"gtc\",\n extended: bool = False,\n ):\n \"\"\"Buys the specified asset.\n\n When called, a limit buy order is placed with a limit\n price 5% higher than the current price.\n\n :param str? symbol: Symbol of the asset to buy. defaults to first symbol in watchlist\n :param float? quantity: Quantity of asset to buy. defaults to buys as many as possible\n :param str? in_force: Duration the order is in force. '{gtc}' or '{gtd}'. defaults to 'gtc'\n :param str? extended: Whether to trade in extended hours or not. defaults to False\n :returns: The following Python dictionary\n\n - type: str, 'STOCK' or 'CRYPTO'\n - id: str, ID of order\n - symbol: str, symbol of asset\n\n :raises Exception: There is an error in the order process.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n if quantity is None:\n quantity = self.get_asset_max_quantity(symbol)\n\n debugger.debug(f\"Algo BUY: {symbol}, {quantity}\")\n return self.trader.buy(symbol, quantity, in_force, extended)\n\n def sell(\n self,\n symbol: str = None,\n quantity: int = None,\n in_force: str = \"gtc\",\n extended: bool = False,\n ):\n \"\"\"Sells the specified asset.\n\n When called, a limit sell order is placed with a limit\n price 5% lower than the current price.\n\n :param str? symbol: Symbol of the asset to sell. defaults to first symbol in watchlist\n :param float? quantity: Quantity of asset to sell defaults to sells all\n :param str? in_force: Duration the order is in force. '{gtc}' or '{gtd}'. defaults to 'gtc'\n :param str? extended: Whether to trade in extended hours or not. defaults to False\n :returns: A dictionary with the following keys:\n\n - type: str, 'STOCK' or 'CRYPTO'\n - id: str, ID of order\n - symbol: str, symbol of asset\n\n :raises Exception: There is an error in the order process.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n if quantity is None:\n quantity = self.get_asset_quantity(symbol)\n\n debugger.debug(f\"Algo SELL: {symbol}, {quantity}\")\n return self.trader.sell(symbol, quantity, in_force, extended)\n\n # def await_buy(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n # \"\"\"Buys the specified asset, and hangs the code until the order is filled.\n\n # :param str? symbol: Symbol of the asset to buy. defaults to first symbol in watchlist\n # :param float? quantity: Quantity of asset to buy. defaults to buys as many as possible\n # :param str? in_force: Duration the order is in force. '{gtc}' or '{gtd}'. defaults to 'gtc'\n # :param str? extended: Whether to trade in extended hours or not. defaults to False\n # :returns: A dictionary with the following keys:\n\n # - type: 'STOCK' or 'CRYPTO'\n # - id: ID of order\n # - symbol: symbol of asset\n\n # :raises Exception: There is an error in the order process.\n # \"\"\"\n # if symbol == None:\n # symbol = self.watchlist[0]\n # if quantity == None:\n # quantity = self.get_asset_max_quantity(symbol)\n # return self.trader.await_buy(symbol, quantity, in_force, extended)\n\n # def await_sell(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n # \"\"\"Sells the specified asset, and hangs the code until the order is filled.\n\n # :param str? symbol: Symbol of the asset to sell. defaults to first symbol in watchlist\n # :param float? quantity: Quantity of asset to sell defaults to sells all\n # :param str? in_force: Duration the order is in force. '{gtc}' or '{gtd}'. defaults to 'gtc'\n # :param str? extended: Whether to trade in extended hours or not. defaults to False\n # :returns: A dictionary with the following keys:\n\n # - type: 'STOCK' or 'CRYPTO'\n # - id: ID of order\n # - symbol: symbol of asset\n\n # :raises Exception: There is an error in the order process.\n # \"\"\"\n # if symbol == None:\n # symbol = self.watchlist[0]\n # if quantity == None:\n # quantity = self.get_asset_quantity(symbol)\n # return self.trader.await_sell(symbol, quantity, in_force, extended)\n\n def buy_option(self, symbol: str, quantity: int = None, in_force: str = \"gtc\"):\n \"\"\"Buys the specified option.\n\n When called, a limit buy order is placed with a limit\n price 5% higher than the current price.\n\n :param str symbol: Symbol of the asset to buy, in {OCC} format.\n :param float? quantity: Quantity of asset to buy. defaults to buys as many as possible\n :param str? in_force: Duration the order is in force. '{gtc}' or '{gtd}'. defaults to 'gtc'\n :returns: A dictionary with the following keys:\n\n - type: 'OPTION'\n - id: ID of order\n - symbol: symbol of asset\n\n :raises Exception: There is an error in the order process.\n \"\"\"\n if quantity is None:\n quantity = self.get_asset_max_quantity(symbol)\n return self.trader.buy_option(symbol, quantity, in_force)\n\n def sell_option(\n self, symbol: str = None, quantity: int = None, in_force: str = \"gtc\"\n ):\n \"\"\"Sells the specified option.\n\n When called, a limit sell order is placed with a limit\n price 5% lower than the current price.\n\n If the option symbol is specified, it will sell that option. If it is not, then the\n method will select the first stock symbol in the watchlist, and sell all options\n related to that stock.\n\n :param str? symbol: Symbol of the asset to sell, in {OCC} format. defaults to sell all options for the first stock in watchlist\n :param float? quantity: Quantity of asset to sell. defaults to sells all\n :param str? in_force: Duration the order is in force. '{gtc}' or '{gtd}'. defaults to 'gtc'\n :returns: A dictionary with the following keys:\n\n - type: 'OPTION'\n - id: ID of order\n - symbol: symbol of asset\n\n :raises Exception: There is an error in the order process.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n symbols = [\n s[\"occ_symbol\"]\n for s in self.get_account_option_positions()\n if s[\"symbol\"] == symbol\n ]\n else:\n symbols = [symbol]\n for s in symbols:\n if quantity is None:\n quantity = self.get_asset_quantity(s)\n return self.trader.sell_option(s, quantity, in_force)\n\n def filter_option_chain(\n self,\n symbol=None,\n type=None,\n lower_exp=None,\n upper_exp=None,\n lower_strike=None,\n upper_strike=None,\n ):\n \"\"\"\n Automatically buys an option that satisfies the criteria specified.\n\n :param str? symbol: Symbol of stock. defaults to first symbol in watchlist\n :param str? type: 'call' or 'put'\n :param datetime lower_exp: Minimum expiration date of the option.\n :param datetime upper_exp: Maximum expiration date of the option.\n :param float lower_strike: The minimum strike price of the option\n :param float upper_strike: The maximum strike price of the option\n\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n lower_exp = _convert_input_to_datetime(lower_exp)\n upper_exp = _convert_input_to_datetime(upper_exp)\n\n exp_dates = self.get_option_chain_info(symbol)[\"exp_dates\"]\n if lower_exp is not None:\n exp_dates = list(filter(lambda x: x >= lower_exp, exp_dates))\n if upper_exp is not None:\n exp_dates = list(filter(lambda x: x <= upper_exp, exp_dates))\n exp_dates = sorted(exp_dates)\n\n exp_date = exp_dates[0]\n chain = self.get_option_chain(symbol, exp_date)\n if lower_strike is not None:\n chain = chain[chain[\"strike\"] >= lower_strike]\n if upper_strike is not None:\n chain = chain[chain[\"strike\"] <= upper_strike]\n\n if type is not None:\n chain = chain[chain[\"type\"] == type]\n\n chain = chain.sort_values(by=[\"strike\", \"exp_date\"])\n\n return chain\n\n # ------------------ Functions to trade options ----------------------\n\n def get_option_chain_info(self, symbol: str = None):\n \"\"\"Returns metadata about a stock's option chain\n\n :param str? symbol: symbol of stock. defaults to first symbol in watchlist\n :returns: A dict with the following keys:\n - exp_dates: List of expiration dates, in the fomrat \"YYYY-MM-DD\"\n - multiplier: Multiplier of the option, usually 100\n \"\"\"\n if symbol == None:\n symbol = self.watchlist[0]\n return self.trader.fetch_chain_info(symbol)\n\n def get_option_chain(self, symbol: str, date):\n \"\"\"Returns the option chain for the specified symbol and expiration date.\n\n :param str symbol: symbol of stock\n :param dt.datetime date: date of option expiration\n :returns: A dataframe with the follwing columns:\n\n - exp_date(datetime.datetime): The expiration date\n - strike(float): Strike price\n - type(str): 'call' or 'put'\n\n The index is the {OCC} symbol of the option.\n Note that the expiration date is not adjusted to the local time zone.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n date = _convert_input_to_datetime(date)\n return self.trader.fetch_chain_data(symbol, date)\n\n def get_option_market_data(self, symbol: str):\n \"\"\"Retrieves data of specified option.\n\n :param str? symbol: {OCC} symbol of option\n :returns: A dictionary:\n\n - price: price of option\n - ask: ask price\n - bid: bid price\n\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n return self.trader.fetch_option_market_data(symbol)\n\n # ------------------ Technical Indicators -------------------\n\n def _default_param(self, symbol, interval, ref, prices):\n if symbol is None:\n symbol = self.watchlist[0]\n if self.trader is None:\n if interval == None:\n interval = Interval.MIN_5\n if prices is None:\n raise Exception(f\"No prices found for symbol {symbol}\")\n else:\n if interval is None:\n interval = self.trader.interval[symbol][\"interval\"]\n if prices == None:\n prices = self.trader.storage.load(symbol, interval)[symbol][ref]\n\n return symbol, interval, ref, prices\n\n def rsi(\n self,\n symbol: str = None,\n period: int = 14,\n interval: Interval = None,\n ref: str = \"close\",\n prices=None,\n ) -> np.array:\n \"\"\"Calculate RSI\n\n :param str? symbol: Symbol to perform calculation on. defaults to first symbol in watchlist\n :param int? period: Period of RSI. defaults to 14\n :param str? interval: Interval to perform the calculation. defaults to interval of algorithm\n :param str? ref: 'close', 'open', 'high', or 'low'. defaults to 'close'\n :param list? prices: When specified, this function will use the values provided in the\n list to perform calculations and ignore other parameters. defaults to None\n :returns: A list in numpy format, containing RSI values\n \"\"\"\n symbol, interval, ref, prices = self._default_param(\n symbol, interval, ref, prices\n )\n\n if len(prices) < period:\n debugger.warning(\"Not enough data to calculate RSI, returning None\")\n return None\n\n ohlc = pd.DataFrame(\n {\n \"close\": np.array(prices),\n \"open\": np.zeros(len(prices)),\n \"high\": np.zeros(len(prices)),\n \"low\": np.zeros(len(prices)),\n }\n )\n return TA.RSI(ohlc, period=period).to_numpy()\n\n def sma(\n self,\n symbol: str = None,\n period: int = 14,\n interval: Interval = None,\n ref: str = \"close\",\n prices=None,\n ) -> np.array:\n \"\"\"Calculate SMA\n\n :param str? symbol: Symbol to perform calculation on. defaults to first symbol in watchlist\n :param int? period: Period of SMA. defaults to 14\n :param str? interval: Interval to perform the calculation. defaults to interval of algorithm\n :param str? ref: 'close', 'open', 'high', or 'low'. defaults to 'close'\n :param list? prices: When specified, this function will use the values provided in the\n list to perform calculations and ignore other parameters. defaults to None\n :returns: A list in numpy format, containing SMA values\n \"\"\"\n symbol, interval, ref, prices = self._default_param(\n symbol, interval, ref, prices\n )\n\n if len(prices) < period:\n debugger.warning(\"Not enough data to calculate SMA, returning None\")\n return None\n\n ohlc = pd.DataFrame(\n {\n \"close\": np.array(prices),\n \"open\": np.zeros(len(prices)),\n \"high\": np.zeros(len(prices)),\n \"low\": np.zeros(len(prices)),\n }\n )\n return TA.SMA(ohlc, period=period).to_numpy()\n\n def ema(\n self,\n symbol: str = None,\n period: int = 14,\n interval: Interval = None,\n ref: str = \"close\",\n prices=None,\n ) -> np.array:\n \"\"\"Calculate EMA\n\n :param str? symbol: Symbol to perform calculation on. defaults to first symbol in watchlist\n :param int? period: Period of EMA. defaults to 14\n :param str? interval: Interval to perform the calculation. defaults to interval of algorithm\n :param str? ref: 'close', 'open', 'high', or 'low'. defaults to 'close'\n :param list? prices: When specified, this function will use the values provided in the\n list to perform calculations and ignore other parameters. defaults to None\n :returns: A list in numpy format, containing EMA values\n \"\"\"\n symbol, interval, ref, prices = self._default_param(\n symbol, interval, ref, prices\n )\n\n if len(prices) < period:\n debugger.warning(\"Not enough data to calculate EMA, returning None\")\n return None\n\n ohlc = pd.DataFrame(\n {\n \"close\": np.array(prices),\n \"open\": np.zeros(len(prices)),\n \"high\": np.zeros(len(prices)),\n \"low\": np.zeros(len(prices)),\n }\n )\n return TA.EMA(ohlc, period=period).to_numpy()\n\n def bbands(\n self,\n symbol: str = None,\n period: int = 14,\n interval: Interval = None,\n ref: str = \"close\",\n dev: float = 1.0,\n prices=None,\n ) -> Tuple[np.array, np.array, np.array]:\n \"\"\"Calculate Bollinger Bands\n\n :param str? symbol: Symbol to perform calculation on. defaults to first symbol in watchlist\n :param int? period: Period of BBands. defaults to 14\n :param str? interval: Interval to perform the calculation. defaults to interval of algorithm\n :param str? ref: 'close', 'open', 'high', or 'low'. defaults to 'close'\n :param float? dev: Standard deviation of the bands. defaults to 1.0\n :param list? prices: When specified, this function will use the values provided in the\n list to perform calculations and ignore other parameters. defaults to None\n :returns: A tuple of numpy lists, each a list of BBand top, average, and bottom values\n \"\"\"\n symbol, interval, ref, prices = self._default_param(\n symbol, interval, ref, prices\n )\n\n if len(prices) < period:\n debugger.warning(\"Not enough data to calculate BBands, returning None\")\n return None, None, None\n\n ohlc = pd.DataFrame(\n {\n \"close\": np.array(prices),\n \"open\": np.zeros(len(prices)),\n \"high\": np.zeros(len(prices)),\n \"low\": np.zeros(len(prices)),\n }\n )\n\n t, m, b = TA.BBANDS(\n ohlc, period=period, std_multiplier=dev, MA=TA.SMA(ohlc, period)\n ).T.to_numpy()\n return t, m, b\n\n def crossover(self, prices_0, prices_1):\n \"\"\"Performs {crossover analysis} on two sets of price data\n\n :param list prices_0: First set of price data.\n :param list prices_1: Second set of price data\n :returns: 'True' if prices_0 most recently crossed over prices_1, 'False' otherwise\n\n :raises Exception: If either or both price list has less than 2 values\n \"\"\"\n if len(prices_0) < 2 or len(prices_1) < 2:\n raise Exception(\n \"There must be at least 2 datapoints to calculate crossover\"\n )\n return prices_0[-2] < prices_1[-2] and prices_0[-1] > prices_1[-1]\n\n ############### Getters for Trader properties #################\n\n def get_asset_quantity(self, symbol: str = None) -> float:\n \"\"\"Returns the quantity owned of a specified asset.\n\n :param str? symbol: Symbol of asset. defaults to first symbol in watchlist\n :returns: Quantity of asset as float. 0 if quantity is not owned.\n :raises:\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n if len(symbol) <= 6:\n search = self.trader.stock_positions + self.trader.crypto_positions\n for p in search:\n if p[\"symbol\"] == symbol:\n return p[\"quantity\"]\n else:\n for p in self.trader.option_positions:\n if p[\"occ_symbol\"] == symbol:\n return p[\"quantity\"]\n return 0\n\n def get_asset_cost(self, symbol: str = None) -> float:\n \"\"\"Returns the average cost of a specified asset.\n\n :param str? symbol: Symbol of asset. defaults to first symbol in watchlist\n :returns: Average cost of asset. Returns None if asset is not being tracked.\n :raises Exception: If symbol is not currently owned.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n if len(symbol) <= 6:\n search = self.trader.stock_positions + self.trader.crypto_positions\n for p in search:\n if p[\"symbol\"] == symbol:\n return p[\"avg_price\"]\n else:\n for p in self.trader.option_positions:\n if p[\"occ_symbol\"].replace(\" \", \"\") == symbol.replace(\" \", \"\"):\n return p[\"avg_price\"]\n\n raise Exception(f\"{symbol} is not currently owned\")\n\n def get_asset_price(self, symbol: str = None) -> float:\n \"\"\"Returns the current price of a specified asset.\n\n :param str? symbol: Symbol of asset. defaults to first symbol in watchlist\n :returns: Price of asset.\n :raises Exception: If symbol is not in the watchlist.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n if len(symbol) <= 6:\n return self.trader.storage.load(symbol, self.interval)[symbol][\"close\"][-1]\n for p in self.trader.option_positions:\n if p[\"occ_symbol\"] == symbol:\n return p[\"current_price\"] * p[\"multiplier\"]\n return self.get_option_market_data(symbol)[\"price\"] * 100\n\n def get_asset_price_list(\n self, symbol: str = None, interval: list = None, ref: str = \"close\"\n ):\n \"\"\"Returns a list of recent prices for an asset.\n\n This function is not compatible with options.\n\n :param str? symbol: Symbol of stock or crypto asset. defaults to first symbol in watchlist\n :param str? interval: Interval of data. defaults to the interval of the algorithm\n :param str? ref: 'close', 'open', 'high', or 'low'. defaults to 'close'\n :returns: List of prices\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n if interval is None:\n interval = self.interval\n if len(symbol) <= 6:\n return list(self.trader.storage.load(symbol, interval)[symbol][ref])\n debugger.warning(\"Price list not available for options\")\n return None\n\n def get_asset_candle(self, symbol: str, interval=None) -> pd.DataFrame():\n \"\"\"Returns the most recent candle as a pandas DataFrame\n\n This function is not compatible with options.\n\n :param str? symbol: Symbol of stock or crypto asset. defaults to first symbol in watchlist\n :returns: Price of asset as a dataframe with the following columns:\n\n - open\n - high\n - low\n - close\n - volume\n\n The index is a datetime object\n\n :raises Exception: If symbol is not in the watchlist.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n if interval is None:\n interval = self.interval\n if len(symbol) <= 6:\n df = self.trader.storage.load(symbol, interval).iloc[[-1]][symbol]\n return pandas_timestamp_to_local(df, self.trader.timezone)\n debugger.warning(\"Candles not available for options\")\n return None\n\n def get_asset_candle_list(\n self, symbol: str = None, interval=None\n ) -> pd.DataFrame():\n \"\"\"Returns the candles of an asset as a pandas DataFrame\n\n This function is not compatible with options.\n\n :param str? symbol: Symbol of stock or crypto asset. defaults to first symbol in watchlist\n :returns: Prices of asset as a dataframe with the following columns:\n\n - open\n - high\n - low\n - close\n - volume\n\n The index is a datetime object\n\n :raises Exception: If symbol is not in the watchlist.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n if interval is None:\n interval = self.interval\n df = self.trader.storage.load(symbol, interval)[symbol]\n return pandas_timestamp_to_local(df, self.trader.timezone)\n\n def get_asset_returns(self, symbol=None) -> float:\n \"\"\"Returns the return of a specified asset.\n\n :param str? symbol: Symbol of stock, crypto, or option. Options should be in {OCC} format.\n defaults to first symbol in watchlist\n :returns: Return of asset, expressed as a decimal.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n cost = self.get_asset_cost(symbol)\n # For options, apply the multiplier\n if len(symbol) > 6:\n cost = cost * 100\n price = self.get_asset_price(symbol)\n return (price - cost) / cost\n\n def get_asset_max_quantity(self, symbol=None):\n \"\"\"Calculates the maximum quantity of an asset that can be bought given the current buying power.\n\n :param str? symbol: Symbol of stock, crypto, or option. Options should be in {OCC} format.\n defaults to first symbol in watchlist\n :returns: Quantity that can be bought.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n\n power = self.get_account_buying_power()\n price = self.get_asset_price(symbol)\n debugger.debug(f\"{symbol} price: {price}, buying power: {power}\")\n if is_crypto(symbol):\n price = mark_up(price)\n return math.floor(power / price * 10 ** 5) / 10 ** 5\n else:\n price = mark_up(price)\n return math.floor(power / price)\n\n def get_account_buying_power(self) -> float:\n \"\"\"Returns the current buying power of the user\n\n :returns: The current buying power as a float.\n \"\"\"\n return self.trader.account[\"buying_power\"]\n\n def get_account_equity(self) -> float:\n \"\"\"Returns the current equity.\n\n :returns: The current equity as a float.\n \"\"\"\n return self.trader.account[\"equity\"]\n\n def get_account_stock_positions(self) -> List:\n \"\"\"Returns the current stock positions.\n\n :returns: A list of dictionaries with the following keys:\n - symbol\n - quantity\n - avg_price\n \"\"\"\n return self.trader.stock_positions\n\n def get_account_crypto_positions(self) -> List:\n \"\"\"Returns the current crypto positions.\n\n :returns: A list of dictionaries with the following keys:\n - symbol\n - quantity\n - avg_price\n \"\"\"\n return self.trader.crypto_positions\n\n def get_account_option_positions(self) -> List:\n \"\"\"Returns the current option positions.\n\n :returns: A list of dictionaries with the following keys:\n - symbol\n - quantity\n - avg_price\n \"\"\"\n return self.trader.option_positions\n\n def get_watchlist(self) -> List:\n \"\"\"Returns the current watchlist.\"\"\"\n return self.watch\n\n def get_stock_watchlist(self) -> List:\n \"\"\"Returns the current watchlist.\"\"\"\n return [s for s in self.watchlist if not is_crypto(s)]\n\n def get_crypto_watchlist(self) -> List:\n \"\"\"Returns the current watchlist.\"\"\"\n return [s for s in self.watchlist if is_crypto(s)]\n\n def get_time(self):\n \"\"\"Returns the current hour and minute.\n\n This returns the current time, which is different from the timestamp\n on a ticker. For example, if you are running an algorithm every 5 minutes,\n at 11:30am you will get a ticker for 11:25am. This function will return\n 11:30am.\n\n :returns: The current time as a datetime object\n \"\"\"\n return self.get_datetime().time()\n\n def get_date(self):\n \"\"\"Returns the current date.\n\n :returns: The current date as a datetime object\n \"\"\"\n return self.get_datetime().date()\n\n def get_datetime(self):\n \"\"\"Returns the current date and time.\n\n This returns the current time, which is different from the timestamp\n on a ticker. For example, if you are running an algorithm every 5 minutes,\n at 11:30am you will get a ticker for 11:25am. This function will return\n 11:30am.\n\n :returns: The current date and time as a datetime object\n \"\"\"\n return datetime_utc_to_local(\n self.trader.streamer.timestamp, self.trader.timezone\n )\n\n def get_option_position_quantity(self, symbol: str = None) -> bool:\n \"\"\"Returns the number of types of options held for a stock.\n\n :param str symbol: Symbol of the stock to check\n :returns: True if the user has an option for the specified symbol.\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n pos = [p for p in self.trader.option_positions if p[\"symbol\"] == symbol]\n return len(pos)\n\n def is_day_trade(self, action, symbol=None) -> bool:\n \"\"\"\n Checks if performing a buy or sell will be considered day trading\n \"\"\"\n if symbol is None:\n symbol = self.watchlist[0]\n\n history = self.trader.logger.get_transactions()\n if len(history) < 1:\n return False\n recent = history.loc[self.get_date() :]\n recent = recent[recent[\"symbol\"] == symbol]\n if action == \"buy\":\n return False\n if action == \"sell\":\n recent = recent.loc[recent[\"action\"] == \"buy\"]\n return len(recent.index) > 0\n\n # Used for testing\n def add_symbol(self, symbol: str):\n \"\"\"Adds a symbol to the watchlist.\n\n :param str symbol: Symbol of stock or crypto asset.\n \"\"\"\n self.watchlist.append(symbol)\n","sub_path":"harvest/algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":30192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582109758","text":"# -*- coding: utf-8 -*-\nfrom PySide import QtGui,QtCore\nfrom task_view_staff_UI import task_view_staff_UI\nfrom miraLibs.pipeLibs.pipeDb import sql_api, get_members\nfrom miraLibs.pyLibs import join_path\nimport datetime,os\nclass dateCalendar(QtGui.QCalendarWidget):\n def __init__(self, parent = None):\n QtGui.QCalendarWidget.__init__(self, parent)\n self.pen = QtGui.QPen(QtGui.QColor(255, 255, 255))\n self.brush = QtGui.QBrush(QtGui.QColor(255,140,0),QtCore.Qt.SolidPattern)\n self.date_list = []\n\n def paintCell(self, painter, rect, date):\n QtGui.QCalendarWidget.paintCell(self, painter, rect, date)\n\n # if date.day() % self.args== 0:\n # painter.setPen(self.pen)\n # painter.setBrush(self.brush)\n # painter.drawRect(rect.adjusted(0, 0, -1, -1))\n # painter.drawText(rect,QtCore.Qt.AlignCenter,unicode(date.day()))\n\n\n painter.setPen(self.pen)\n painter.setBrush(self.brush)\n\n for i in self.date_list:\n if i == date:\n painter.drawRect(rect.adjusted(0, 0, -1, -1))\n painter.drawText(rect,QtCore.Qt.AlignCenter,unicode(date.day()))\n\n def set_date_list(self,inList):\n self.date_list = inList\n\nclass task_view_staff(task_view_staff_UI):\n def __init__(self,project_name,parent=None):\n super(task_view_staff,self).__init__(parent)\n qss_path = join_path.join_path2(os.path.dirname(__file__), \"style.qss\")\n self.setStyleSheet(open(qss_path, 'r').read())\n self.project_name = project_name\n self.db_handle = sql_api.SqlApi(self.project_name)\n self.set_additional()\n self.set_department_list_widget()\n self.make_connections()\n\n def set_additional(self):\n self.calendarWidget = dateCalendar(self.splitter_3)\n self.calendarWidget.setSelectionMode(QtGui.QCalendarWidget.NoSelection)\n self.calendarWidget.setGridVisible(True)\n self.splitter_2.setStretchFactor(0,3)\n self.splitter_2.setStretchFactor(1,10)\n self.splitter_3.setStretchFactor(0,4)\n self.splitter_3.setStretchFactor(1,6)\n self.set_task_widget()\n self.translate_label()\n self.finisheTtableWidget.setEditTriggers(QtGui.QAbstractItemView.DoubleClicked)\n self.finisheTtableWidget.setAlternatingRowColors(True)\n self.finisheTtableWidget.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)\n self.finisheTtableWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n self.progressTableWidget.setEditTriggers(QtGui.QAbstractItemView.DoubleClicked)\n self.progressTableWidget.setAlternatingRowColors(True)\n self.progressTableWidget.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)\n self.progressTableWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n\n\n def make_connections(self):\n self.departmentListWidget.itemClicked.connect(self.set_staff_list_widget)\n self.staffListWidget.itemClicked.connect(self.update_task_widget)\n self.finisheTtableWidget.itemClicked.connect(self.finished_update_calander)\n self.progressTableWidget.itemClicked.connect(self.process_update_calander)\n\n def get_memeber_dict(self):\n self.all_staff_dict = get_members.JRF_staff_DB('JRF_baseInfo', 'staff').getAll()\n resDict = {}\n for key in self.all_staff_dict.keys():\n department = self.all_staff_dict[key]['department']\n name = self.all_staff_dict[key][\"name\"]\n if not resDict.__contains__(department):\n resDict[department] = []\n resDict[department].append(key+'_'+name)\n return resDict\n\n def set_task_widget(self):\n self.taskListTableWidgeLableList = [u'taskId',u\"taskRank\",u\"taskDescription\",u\"taskStatus\",u\"taskStartDate\",u\"taskPredictDate\",u\"taskEndDate\"]\n self.taskListTableWidgeLableList_cn = [u'任务号',u\"任务等级\",u\"描述\",u\"状态\",u\"开始时间\",u\"预计完成时间\",u\"入库时间\"]\n listLength = len(self.taskListTableWidgeLableList)\n self.finisheTtableWidget.setColumnCount(listLength)\n self.progressTableWidget.setColumnCount(listLength)\n self.finisheTtableWidget.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)\n self.progressTableWidget.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)\n for i in range(listLength):\n item = QtGui.QTableWidgetItem()\n item.setText( self.taskListTableWidgeLableList[i])\n item_1 = QtGui.QTableWidgetItem()\n item_1.setText( self.taskListTableWidgeLableList[i])\n self.finisheTtableWidget.setHorizontalHeaderItem(i, item)\n self.progressTableWidget.setHorizontalHeaderItem(i, item_1)\n\n def translate_label(self):\n for i in range(len(self.taskListTableWidgeLableList_cn)):\n item = QtGui.QTableWidgetItem(self.taskListTableWidgeLableList_cn[i])\n self.finisheTtableWidget.setHorizontalHeaderItem(i,item)\n item_1 = QtGui.QTableWidgetItem(self.taskListTableWidgeLableList_cn[i])\n self.progressTableWidget.setHorizontalHeaderItem(i,item_1)\n\n def set_department_list_widget(self):\n self.__members_dict = self.get_memeber_dict()\n for key in self.__members_dict.keys():\n item = QtGui.QListWidgetItem()\n item.setText(key)\n self.departmentListWidget.addItem(item)\n\n def set_staff_list_widget(self):\n self.staffListWidget.clear()\n active_department = self.departmentListWidget.currentItem().text()\n staff_list = self.__members_dict[active_department]\n for i in staff_list:\n item = QtGui.QListWidgetItem()\n item.setText(i)\n self.staffListWidget.addItem(item)\n\n def update_task_widget(self):\n self.finisheTtableWidget.clear()\n self.progressTableWidget.clear()\n\n staff_id = self.staffListWidget.currentItem().text().split('_')[0]\n staff_name = self.staffListWidget.currentItem().text().split('_')[1]\n staff_domainname = self.all_staff_dict[staff_id]['domainname']\n if not staff_domainname:\n QtGui.QMessageBox.information(self,u'错误',u'%s还没有域账户,请联系TD'%staff_name)\n return\n task_dict = self.db_handle.getAllTaskByDomainname(staff_domainname)\n self.finished_task_dict = {}\n self.process_task_dict = {}\n for key in task_dict.keys():\n if task_dict[key]['taskStatus'] == 'release':\n self.finished_task_dict[key] = task_dict[key]\n else:\n self.process_task_dict[key] = task_dict[key]\n # set finished widget\n finished_line = len(self.finished_task_dict)\n self.finisheTtableWidget.setRowCount(finished_line)\n rowp = 0\n for key in sorted(self.finished_task_dict.keys()):\n item = QtGui.QTableWidgetItem()\n item.setText(unicode(key))\n self.finisheTtableWidget.setItem(rowp,0,item)\n tmpDict = self.finished_task_dict[key]\n for i in range(1,len(self.taskListTableWidgeLableList)):\n item = QtGui.QTableWidgetItem()\n item.setText(unicode(tmpDict[self.taskListTableWidgeLableList[i]]))\n self.finisheTtableWidget.setItem(rowp,i,item)\n rowp += 1\n # set process widget\n finished_line = len(self.process_task_dict)\n self.progressTableWidget.setRowCount(finished_line)\n rowp = 0\n for key in sorted(self.process_task_dict.keys()):\n item = QtGui.QTableWidgetItem()\n item.setText(unicode(key))\n self.progressTableWidget.setItem(rowp,0,item)\n tmpDict = self.process_task_dict[key]\n for i in range(1,len(self.taskListTableWidgeLableList)):\n item = QtGui.QTableWidgetItem()\n item.setText(unicode(tmpDict[self.taskListTableWidgeLableList[i]]))\n self.progressTableWidget.setItem(rowp,i,item)\n rowp += 1\n self.translate_label()\n\n def finished_update_calander(self):\n current_row = self.finisheTtableWidget.currentRow()\n start_date_str = self.finisheTtableWidget.item(current_row,4).text()\n end_date_str = self.finisheTtableWidget.item(current_row,5).text()\n self.update_calander(start_date_str,end_date_str)\n #print (d2-d1).days\n\n\n def process_update_calander(self):\n current_row = self.progressTableWidget.currentRow()\n start_date_str = self.progressTableWidget.item(current_row,4).text()\n end_date_str = self.progressTableWidget.item(current_row,5).text()\n self.update_calander(start_date_str,end_date_str)\n\n def update_calander(self,start_date,end_date):\n # get date list\n d1 = datetime.datetime.strptime(start_date[:10]+' 00:00:00',\"%Y-%m-%d %H:%M:%S\")\n d2 = datetime.datetime.strptime(end_date[:10]+' 00:00:00',\"%Y-%m-%d %H:%M:%S\")\n date_list = []\n diff_day = (d2-d1).days\n a_day = datetime.timedelta(days=1)\n i = 0\n while i <= diff_day:\n date_list.append(QtCore.QDate(d1.year,d1.month,d1.day))\n i += 1\n d1 += a_day\n # update calendarWidget\n self.calendarWidget.set_date_list(date_list)\n tmp_date = date_list[0]\n if tmp_date.month == 1:\n self.calendarWidget.setCurrentPage(tmp_date.year(),tmp_date.month()+1)\n self.calendarWidget.setCurrentPage(tmp_date.year(),tmp_date.month())\n else :\n self.calendarWidget.setCurrentPage(tmp_date.year(),tmp_date.month()-1)\n self.calendarWidget.setCurrentPage(tmp_date.year(),tmp_date.month())\n\n def get_time_list(self,start_date_str,end_date_str):\n pass\n\n\nif __name__ == '__main__':\n\n import sys\n\n app = QtGui.QApplication(sys.argv)\n window = task_view_staff('sct')\n #window = get_main_ui()\n window.show()\n\n app.exec_()\n","sub_path":"miraScripts/pipeTools/task_view_staff/task_view_staff.py","file_name":"task_view_staff.py","file_ext":"py","file_size_in_byte":10025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130304272","text":"from collections import defaultdict\n\nclass TrieNode:\n\n def __init__(self):\n\n self.dict = defaultdict(TrieNode)\n self.is_word = False\n\n\nclass StreamChecker:\n\n def __init__(self, words: List[str]):\n '''\n Build a trie for each word in reversed order\n '''\n\n # for user query record, init as empty string\n self.prefix = ''\n\n # for root node of trie, init as empty Trie\n self.trie = TrieNode()\n\n for word in words:\n\n cur_node = self.trie\n\n\t\t\t# make word in reverse order\n word = word[::-1]\n\n for char in word:\n cur_node = cur_node.dict[ char ]\n\n\t\t\t# mark this trie path as a valid word\n cur_node.is_word = True\n\n\n\n def query(self, letter: str) -> bool:\n '''\n Search user input in trie with reversed order\n '''\n\n self.prefix += letter\n\n cur_node = self.trie\n for char in reversed(self.prefix):\n\n if char not in cur_node.dict:\n # current char not in Trie, impossible to match words\n break\n\n cur_node = cur_node.dict[char]\n\n if cur_node.is_word:\n # user input match a word in Trie\n return True\n\n # No match\n return False\n","sub_path":"streamOfCharacters_1.py","file_name":"streamOfCharacters_1.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490962949","text":"# _*_ coding: utf-8 _*_\n# ===============================================================================\n# DESCRIPTION: CLASSE PARA MANIPULAÇÃO DO BANCO DE DADOS POSTGRES NO SERVIDOR DRJSCGERLX044 10.216.76.232\n# AUTHOR: P555375 - Clayton Sandes Monteiro\n# DATE: 2019-06-06\n# ===============================================================================\n\nimport psycopg2\nimport json\n\nclass PostgreSQL():\n\n def conexao(self, DBNAME):\n try:\n #USUARIO E SENHA EM ARQUIVO POR SEGURANÇA\n connection = json.loads(open('db_credencial.json').read())\n DBUSERNAME = connection['USER']\n DBPASSWORD = connection['PASS']\n HOST = '10.216.76.232'\n conn = psycopg2.connect(host=HOST, database=DBNAME, user=DBUSERNAME, password=DBPASSWORD)\n cursor = conn.cursor()\n return conn, cursor\n except Exception as e:\n return e\n\n\n def select(self, DBNAME, TABLES, COLUMNS, CLAUSE):\n try:\n conn, cursor = self.conexao(DBNAME)\n sql = 'SELECT {0}, messageid,procid,incident_number,alertid FROM {1} {2};'.format(COLUMNS,TABLES,CLAUSE)\n #sql = \"\"\"SELECT incident_number FROM abertura_incidentes where messageid = '143407' and abertura_flag = 1;\"\"\"\n cursor.execute(sql)\n\n if cursor is not None:\n result = cursor.fetchone()\n conn.close()\n return result\n except Exception as e:\n return e\n\n def select_all(self, DBNAME, TABLES, COLUMNS, CLAUSE):\n try:\n conn, cursor = self.conexao(DBNAME)\n sql = 'SELECT {0} FROM {1} {2};'.format(COLUMNS, TABLES, CLAUSE)\n # sql = \"\"\"SELECT incident_number FROM abertura_incidentes where messageid = '143407' and abertura_flag = 1;\"\"\"\n cursor.execute(sql)\n\n if cursor is not None:\n result = cursor.fetchall()\n conn.close()\n return result\n except Exception as e:\n return e\n\n\n def insert(self, DBNAME, TABLES, COLUMNS, VALUES):\n try:\n conn, cursor = self.conexao(DBNAME)\n sql = 'INSERT INTO {0} ({1}) VALUES ({2});'.format(TABLES, COLUMNS, VALUES)\n cursor.execute(sql)\n conn.commit()\n conn.close()\n return True\n except Exception as e:\n return e\n\n\n def update(self, DBNAME, TABLES, COLUMNS, CLAUSE):\n try:\n conn, cursor = self.conexao(DBNAME)\n sql = 'UPDATE {0} SET {1} {2};'.format(TABLES, COLUMNS, CLAUSE)\n cursor.execute(sql)\n conn.commit()\n conn.close()\n return True\n\n except Exception as e:\n return e\n","sub_path":"abertura_inc_control_center2/dao/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293769442","text":"import argparse\nimport pandas as pd\n\n\ndef parse_args():\n \"\"\"Parse input arguments.\n Returns:\n args (obj)\"\"\"\n parser = argparse.ArgumentParser(description=\"Path to .feather data file.\")\n parser.add_argument(\"--path\", dest=\"path\", help=\"Path to .feather data file.\")\n args = parser.parse_args()\n return args\n\n\ndef main(path):\n df = pd.read_feather(path)\n df = df.pivot_table(index=\"date\", columns=\"cam_id\", values=\"count\") \\\n .resample(\"5min\").mean()\n df = df[df.index.weekday < 5]\n df = df.groupby(df.index.time).mean()\n df.index = map(lambda x: pd.to_datetime(\"01-01-2017 \" + str(x)), df.index)\n df = df.stack().reset_index()\n df.columns = [\"date\", \"cam\", \"count\"]\n df.set_index(\"cam\", inplace=True)\n df.to_csv(\"./weekdayavg.csv\")\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args.path)\n \n","sub_path":"hadive/assorted/format-web-data.py","file_name":"format-web-data.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"361635357","text":"\"\"\"Check a VMware vCenter environment\n\n Gather all VMs from a VMware vCenter environment and check, wether\n they are all properly documented.\n\n Requires suds https://fedorahosted.org/suds\n\"\"\"\n\nfrom ScannerAdapter import ScannerAdapter\nimport logging\nfrom suds.client import Client\n\nclass VMCheck(ScannerAdapter):\n \"\"\" Check a VMware vCenter environment\n\n Valid options:\n\n * vcenter.host - vCenter-Server\n * vcenter.username - vCenter Username\n * vcenter.password - vCenter Password\n\n \"\"\"\n\n vms = None\n \"\"\" retrieved VM informations \"\"\"\n\n def initialize(self):\n \"\"\" Fetch all VMs and configuration data from vCenter\n :return:\n \"\"\"\n url = \"https://%s/sdk/vimService.wsdl\" % (self.options[\"vcenter.host\"])\n location = \"https://%s/sdk/vimService\" % (self.options[\"vcenter.host\"])\n\n client = Client(url, location = location)\n\n serviceInstance = client.factory.create('ns0:ManagedObjectReference')\n serviceInstance._type = \"ServiceInstance\"\n serviceInstance.value = \"ServiceInstance\"\n\n serviceContent = client.service.RetrieveServiceContent(\n serviceInstance\n )\n\n client.service.Login(\n serviceContent.sessionManager,\n self.options[\"vcenter.username\"],\n self.options[\"vcenter.password\"]\n )\n\n containerView = client.service.CreateContainerView(\n serviceContent.viewManager,\n serviceContent.rootFolder,\n [\"VirtualMachine\"],\n True\n )\n\n propertySpec = client.factory.create(\"ns0:PropertySpec\")\n propertySpec.all = True\n propertySpec.type = \"VirtualMachine\"\n\n objectSpec = client.factory.create(\"ns0:ObjectSpec\")\n objectSpec.obj = containerView\n objectSpec.skip = True\n\n traversalSpec = client.factory.create(\"ns0:TraversalSpec\")\n traversalSpec.path = \"view\"\n traversalSpec.skip = False\n traversalSpec.type = \"ContainerView\"\n\n objectSpec.selectSet.append(traversalSpec)\n\n propertyFilterSpec = client.factory.create(\"ns0:PropertyFilterSpec\")\n propertyFilterSpec.propSet.append(propertySpec)\n propertyFilterSpec.objectSet.append(objectSpec)\n\n containerContents = client.service.RetrievePropertiesEx(\n serviceContent.propertyCollector,\n [propertyFilterSpec]\n )\n\n self.vms = {}\n\n for object in containerContents.objects:\n name = \"\"\n guestInfo = \"\"\n\n for prop in object.propSet:\n if prop[\"name\"].lower() == \"name\":\n name = prop[\"val\"]\n elif prop[\"name\"].lower() == \"guest\":\n guestInfo = prop[\"val\"]\n\n self.vms[name] = {\n \"name\": name,\n \"guestInfo\": guestInfo\n }\n\n return True\n\n def scan(self):\n\n for name, vm in self.vms.iteritems():\n\n found = False\n\n for id, object in self.object_cache.iteritems():\n\n if object[\"name\"].lower() == name.lower():\n found = True\n\n if not found:\n\n hostName = \"unknown\"\n ipAddress = \"unknown\"\n\n if \"hostName\" in vm[\"guestInfo\"]:\n hostName = vm[\"guestInfo\"][\"hostName\"]\n\n if \"ipAddress\" in vm[\"guestInfo\"]:\n ipAddress = vm[\"guestInfo\"][\"ipAddress\"]\n\n logging.error(\n \"VM %(name)s not documented or not found. Guest-Info:\\n\"\n \"Hostname: %(hostName)s\\n\"\n \"IP-Adress: %(ipAddress)s\" % {\n \"name\": name,\n \"hostName\": hostName,\n \"ipAddress\": ipAddress\n }\n )","sub_path":"qualityscan/VMCheck.py","file_name":"VMCheck.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"520793769","text":"\"\"\"\r\n============================================\r\nvidgear library code is placed under the MIT license\r\nCopyright (c) 2019 Abhishek Thakur\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in\r\nall copies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\nTHE SOFTWARE.\r\n===============================================\r\n\"\"\"\r\n\r\n#Video credit: http://www.liushuaicheng.org/CVPR2014/index.html\r\n\r\nimport pytest\r\nfrom vidgear.gears import VideoGear\r\n\r\n\r\n\r\ndef test_PiGear_import():\r\n\t\"\"\"\r\n\tTesting VideoGear Import - made to fail when PiGear class is imported\r\n\t\"\"\"\r\n\twith pytest.raises(ImportError):\r\n\t\tstream = VideoGear(enablePiCamera = True, logging = True).start()\r\n\t\tstream.stop()\r\n\r\n\r\n\r\ndef test_CamGear_import():\r\n\t\"\"\"\r\n\tTesting VideoGear Import - Passed if CamGear Class is Imported sucessfully\r\n\t\"\"\"\r\n\ttry:\r\n\t\tUrl = 'rtsp://184.72.239.149/vod/mp4:BigBuckBunny_175k.mov'\r\n\t\toptions = {'THREADED_QUEUE_MODE':False}\r\n\t\toutput_stream = VideoGear(source = Url, **options).start()\r\n\t\toutput_stream.stop()\r\n\texcept Exception as e:\r\n\t\tpytest.fail(str(e))\r\n\r\n\r\n\r\ndef test_video_stablization():\r\n\t\"\"\"\r\n\tTesting VideoGear Video Stablization Feature - Passed if ran sucessfully\r\n\t\"\"\"\r\n\ttry:\r\n\t\tUrl = 'http://www.liushuaicheng.org/CVPR2014/data/example4_train_input.avi'\r\n\t\toptions = {'SMOOTHING_RADIUS': 5, 'BORDER_SIZE': 0, 'BORDER_TYPE': 'replicate'}\r\n\t\tstab_stream = VideoGear(source = Url, stabilize = True, logging = True, **options).start()\r\n\t\twhile True:\r\n\t\t\tframe = stab_stream.read() #read stablized frames\r\n\t\t\tif frame is None:\r\n\t\t\t\tbreak\r\n\t\tstab_stream.stop()\r\n\texcept Exception as e:\r\n\t\tpytest.fail(str(e))\r\n","sub_path":"vidgear/tests/videocapture_tests/test_videogear.py","file_name":"test_videogear.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"324757417","text":"# -*- coding: utf-8 -*-\nimport json\nimport re\nimport time\n\nfrom datetime import datetime\nfrom threading import Thread\nfrom threading import Timer\nfrom websocket import create_connection\n\nfrom livecli.cache import Cache\nfrom livecli.exceptions import PluginError\nfrom livecli.plugin import Plugin\nfrom livecli.plugin import PluginOptions\nfrom livecli.plugin.api import http\nfrom livecli.plugin.api import useragents\nfrom livecli.plugin.api import validate\nfrom livecli.stream import RTMPStream\nfrom livecli.utils import filter_urlquery\n\n__livecli_docs__ = {\n 'domains': [\n 'live.fc2.com',\n ],\n 'geo_blocked': [],\n 'notes': '',\n 'live': True,\n 'vod': False,\n 'last_update': '2018-05-02',\n}\n\n\nclass FC2(Plugin):\n '''Livecli Plugin for live.fc2.com'''\n\n url_login = 'https://secure.id.fc2.com/?mode=login&switch_language=en'\n url_member_api = 'https://live.fc2.com/api/memberApi.php'\n url_server = 'https://live.fc2.com/api/getControlServer.php'\n\n _url_re = re.compile(r'''https?://live\\.fc2\\.com/(?P\\d+)/?$''')\n\n count = 0\n count_ping = 0\n\n _version_schema = validate.Schema({\n 'status': int,\n 'data': {\n 'channel_data': {\n 'channelid': validate.text,\n 'userid': validate.text,\n 'adult': int,\n 'login_only': int,\n 'version': validate.text,\n 'fee': int,\n },\n 'user_data': {\n 'is_login': int,\n 'userid': int,\n 'fc2id': int,\n 'name': validate.text,\n 'point': int,\n 'adult_access': int,\n 'recauth': int,\n }\n }\n })\n\n host_data = ''\n host_found = False\n\n expires_time = 3600 * 24\n\n options = PluginOptions({\n 'username': None,\n 'password': None,\n 'purge_credentials': None\n })\n\n def __init__(self, url):\n super(FC2, self).__init__(url)\n self._session_attributes = Cache(filename='plugin-cache.json', key_prefix='fc2:attributes')\n self._authed = (self._session_attributes.get('fcu')\n and self._session_attributes.get('fgcv')\n and self._session_attributes.get('FCSID')\n and self._session_attributes.get('login_status')\n and self._session_attributes.get('glgd_val')\n and self._session_attributes.get('PHPSESSID')\n and self._session_attributes.get('secure_check_fc2'))\n self._expires = self._session_attributes.get('expires', time.time() + self.expires_time)\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url)\n\n def set_expires_time_cache(self):\n expires = time.time() + self.expires_time\n self._session_attributes.set('expires', expires, expires=self.expires_time)\n\n def _login(self, username, password):\n '''login and update cached cookies'''\n self.logger.debug('login ...')\n http.get(self.url)\n data = {\n 'pass': password,\n 'email': username,\n 'done': 'livechat',\n 'keep_login': 1\n }\n\n http.post(self.url_login, data=data, allow_redirects=True)\n for cookie in http.cookies:\n self._session_attributes.set(cookie.name, cookie.value, expires=3600 * 24)\n\n if (self._session_attributes.get('fcu')\n and self._session_attributes.get('fgcv')\n and self._session_attributes.get('FCSID')\n and self._session_attributes.get('login_status')\n and self._session_attributes.get('glgd_val')\n and self._session_attributes.get('PHPSESSID')\n and self._session_attributes.get('secure_check_fc2')):\n\n self.logger.debug('New session data')\n self.set_expires_time_cache()\n return True\n else:\n self.logger.error('Failed to login, check your username/password')\n return False\n\n def _get_version(self, user_id):\n data = {\n 'user': 1,\n 'channel': 1,\n 'profile': 1,\n 'streamid': int(user_id)\n }\n res = http.post(self.url_member_api, data=data)\n res_data = http.json(res, schema=self._version_schema)\n channel_data = res_data['data']['channel_data']\n user_data = res_data['data']['user_data']\n\n if (channel_data['login_only'] != 0 and user_data['is_login'] != 1):\n raise PluginError('A login is required for this stream.')\n\n if channel_data['fee'] != 0:\n raise PluginError('Only streams without a fee are supported by Livecli.')\n\n version = channel_data['version']\n if user_data['is_login']:\n self.logger.info('Logged in as {0}'.format(user_data['name']))\n self.logger.debug('Found version: {0}'.format(version))\n return version\n\n def payload_msg(self, name):\n ''' Format the WebSocket message '''\n self.count_ping += 1\n payload = json.dumps(\n {\n 'name': str(name),\n 'arguments': {},\n 'id': int(self.count_ping)\n }\n )\n return payload\n\n def _get_ws_url(self, user_id, version):\n self.logger.debug('_get_ws_url ...')\n data = {\n 'channel_id': user_id,\n 'channel_version': version,\n 'client_type': 'pc',\n 'client_app': 'browser'\n }\n\n res = http.post(self.url_server, data=data)\n w_data = http.json(res)\n if w_data['status'] == 11:\n raise PluginError('The broadcaster is currently not available')\n\n new_dict = {\n 'control_token': w_data['control_token'],\n 'mode': 'pay',\n 'comment': '0',\n }\n ws_url = filter_urlquery(w_data['url'], new_dict=new_dict)\n self.logger.debug('WS URL: {0}'.format(ws_url))\n return ws_url\n\n def _get_ws_data(self, ws_url):\n self.logger.debug('_get_ws_data ...')\n ws = create_connection(ws_url)\n ws.send(self.payload_msg('get_media_server_information'))\n\n def ws_ping():\n ''' ping the WebSocket '''\n if ws.connected is True:\n t1 = Timer(30.0, ws_ping)\n t1.daemon = True\n t1.start()\n ws.send(self.payload_msg('heartbeat'))\n\n def ws_recv():\n ''' print WebSocket messages '''\n while True:\n self.count += 1\n data = json.loads(ws.recv())\n time_utc = datetime.utcnow().strftime('%H:%M:%S UTC')\n if data['name'] not in ['comment', 'ng_commentq', 'user_count', 'ng_comment']:\n self.logger.debug('{0} - {1} - {2}'.format(time_utc, self.count, data['name']))\n\n if data['name'] == '_response_' and data['arguments'].get('host'):\n self.logger.debug('Found host data')\n self.host_data = data\n self.host_found = True\n elif data['name'] == 'media_connection':\n self.logger.debug('successfully opened stream')\n elif data['name'] == 'control_disconnection':\n break\n elif data['name'] == 'publish_stop':\n self.logger.debug('Stream ended')\n elif data['name'] == 'channel_information':\n if data['arguments'].get('fee') != 0:\n self.logger.error('Stream requires a fee now, this is not supported by Livecli.'.format(data['arguments'].get('fee')))\n break\n\n ws.close()\n\n # WebSocket background process\n ws_ping()\n t2 = Thread(target=ws_recv)\n t2.daemon = True\n t2.start()\n\n # wait for the WebSocket\n host_timeout = False\n while self.host_found is False:\n if self.host_found is True:\n break\n if self.count >= 30:\n host_timeout = False\n\n if host_timeout:\n return False\n return True\n\n def _get_rtmp(self, data):\n self.logger.debug('_get_rtmp ...')\n\n app = filter_urlquery(data['application'],\n new_dict={'media_token': data['media_token']})\n host = data['host']\n\n params = {\n 'app': app,\n 'flashVer': 'WIN 29,0,0,140',\n 'swfUrl': 'https://live.fc2.com/swf/liveVideo.swf',\n 'tcUrl': 'rtmp://{0}/{1}'.format(host, app),\n 'live': 'yes',\n 'pageUrl': self.url,\n 'playpath': data['play_rtmp_stream'],\n 'host': host,\n }\n yield 'live', RTMPStream(self.session, params)\n\n def _get_streams(self):\n http.headers.update({\n 'User-Agent': useragents.FIREFOX,\n 'Referer': self.url\n })\n\n login_username = self.get_option('username')\n login_password = self.get_option('password')\n\n if self.options.get('purge_credentials'):\n self._session_attributes.set('fcu', None, expires=0)\n self._session_attributes.set('fgcv', None, expires=0)\n self._session_attributes.set('FCSID', None, expires=0)\n self._session_attributes.set('login_status', None, expires=0)\n self._session_attributes.set('glgd_val', None, expires=0)\n self._session_attributes.set('PHPSESSID', None, expires=0)\n self._session_attributes.set('secure_check_fc2', None, expires=0)\n self._authed = False\n self.logger.info('All credentials were successfully removed.')\n\n if self._authed:\n if self._expires < time.time():\n self.logger.debug('get new cached cookies')\n # login after 24h\n self.set_expires_time_cache()\n self._authed = False\n else:\n self.logger.info('Attempting to authenticate using cached cookies')\n http.cookies.set('fcu', self._session_attributes.get('fcu'))\n http.cookies.set('fgcv', self._session_attributes.get('fgcv'))\n http.cookies.set('FCSID', self._session_attributes.get('FCSID'))\n http.cookies.set('login_status', self._session_attributes.get('login_status'))\n http.cookies.set('glgd_val', self._session_attributes.get('glgd_val'))\n http.cookies.set('PHPSESSID', self._session_attributes.get('PHPSESSID'))\n http.cookies.set('secure_check_fc2', self._session_attributes.get('secure_check_fc2'))\n\n if (not self._authed and login_username and login_password):\n self._login(login_username, login_password)\n\n match = self._url_re.match(self.url)\n if not match:\n return\n\n user_id = match.group('user_id')\n\n version = self._get_version(user_id)\n ws_url = self._get_ws_url(user_id, version)\n if self._get_ws_data(ws_url):\n return self._get_rtmp(self.host_data['arguments'])\n\n\n__plugin__ = FC2\n","sub_path":"src/livecli/plugins/fc2.py","file_name":"fc2.py","file_ext":"py","file_size_in_byte":11190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"447345893","text":"\nimport logging \nfrom flask import Flask,request\nfrom telegram.ext import Updater,CommandHandler,MessageHandler,Filters,Dispatcher\nfrom telegram import Bot,Update,ReplyKeyboardMarkup\nfrom utils import get_reply\nfrom utils import fetch_news,topics_keyboard\n\n\n\n#enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s- %(levelname)s-%(message)s',level=logging.INFO)\nlogger=logging.getLogger(__name__)\n\nTOKEN=\"1272801736:AAHN2rMaLsSPqXjv8hYaSWYbrjJUXoqVySo\"\n\napp=Flask(__name__) #creating a Flask app object\n\n@app.route('/')\ndef index():\n\treturn \"Hello!\"\n\n\n@app.route(f'/{TOKEN}',methods=['GET','POST'])\ndef webhook():\n\t\"\"\"webhook view which receives updates from telegram\"\"\"\n\n\t#create update object from json-format request data\n\tupdate=Update.de_json(request.get_json(),bot)\n\t#process update\n\tdp.process_update(update) #dispatcher responsible for handling updates\n\treturn \"ok\"\n\n\n\n\ndef start(bot,update):\n print(update)\n author=update.message.from_user.first_name\n reply=\"Hi! {}\".format(author)\n update.message.reply_text(reply)\n \n\ndef _help(bot,update):\n help_txt=\"What help do you want?\"\n update.message.reply_text(help_txt)\n\ndef news(bot,update):\n bot.send_message(chat_id=update.message.chat_id,text=\"Choose a category\",\n reply_markup=ReplyKeyboardMarkup(keyboard=topics_keyboard,one_time_keyboard=True))\n\ndef reply_text(bot,update):\n \n intent,reply=get_reply(update.message.text,update.message.chat_id)\n \n if intent==\"get_news\":\n reply_text=\"Okay!Here's the news\"\n articles=fetch_news(reply)\n for article in articles:\n update.message.reply_text(article['link'])\n\n else:\n update.message.reply_text(reply)\n\n\ndef error(bot,update):\n logger.error(\"Update '%s' caused error '%s'\",update,update.error)\n\n\nbot=Bot(TOKEN)\ntry:\n bot.set_webhook(\"https://radiant-headland-89852.herokuapp.com/\"+TOKEN)\n\nexcept Exception as e:\n print(e)\n \ndp=Dispatcher(bot,None)\n\ndp.add_handler(CommandHandler('start',start))\ndp.add_handler(CommandHandler('help',_help))\ndp.add_handler(CommandHandler('news',news))\ndp.add_handler(MessageHandler(Filters.text,reply_text))\ndp.add_error_handler(error)\n\n\n\nif __name__ == '__main__':\n app.run(port=8443)\n","sub_path":"News Bot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574336618","text":"from socket import gethostname\n\nclass CurrentEnvironment(object):\n\n def __init__(s):\n s.hostname = gethostname()\n if s.hostname == 'cortex.ml.cmu.edu':\n s.base_data_dir = '/share/volume0/newmeg/'\n s.cassandra_ip = '127.0.0.1'\n s.cassandra_port = 9042\n s.cassandra_dev_keyspace = 'development'\n elif s.hostname == 'eggss-MacBook-Pro.local':\n s.base_data_dir = '/Users/dhowarth/work/db/data'\n s.cassandra_ip = '127.0.0.1'\n s.cassandra_port = 9042\n s.cassandra_dev_keyspace = 'development'\n else:\n raise ValueError('Unknown machine. you must modify CurrentEnvironment in currenv.py')\n","sub_path":"currenv.py","file_name":"currenv.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"363071908","text":"import subprocess\r\nimport sys\r\n\r\nimport os\r\nimport importlib\r\nimport contextlib\r\n\r\ndef install(package):\r\n subprocess.call([sys.executable,-\"-m\",\"pip\",\"install\",package])\r\n\r\nrequired = []\r\nfailed =[]\r\n\r\ntry :\r\n file = open(\"requirements.txt\",\"r\")\r\n file_lines = file.readlines()\r\n required = [line.strip().lower() for line in file_lines]\r\n file.close()\r\nexcept FileNotFoundError:\r\n print(\"[ERROR] No requirements.txt file not found\" )\r\n\r\nif len(required)> 0:\r\n print(\"[INPUT] You Are about to install this\", len(required),\"package, would like processed y/n):\", end=\" \")\r\n ans = input()\r\n if ans.lower ==\"y\":\r\n for package in required:\r\n try:\r\n print(\"[LOG] LOOKING FOR:\", package)\r\n with contextlib.redirect_stdout(None):\r\n __import__(package)\r\n print(\"[LOG]\", package, \"is already installe skipping...\")\r\n except ImportError:\r\n print(\"[LOG]\", package, \"not installed\")\r\n try:\r\n print(\"[LOG] Trying to install\",package,\"via pip\")\r\n try:\r\n import pip\r\n except:\r\n print(\"[EXCEPTION] pip is not installed\")\r\n print(\"[LOG] Trying to pip installed\")\r\n get_pip.main()\r\n print(\"[LOG pip is has been installed]\")\r\n print(\"[LOG] insatlling\",package)\r\n install(package)\r\n with contextlib.redirect_stdout(None):\r\n __import__(package)\r\n print(\"[LOG]\", package,\"has been installed\")\r\n except Exception as e:\r\n print(\"[Error] could not installed\",package,\"-\",e)\r\n failed.append(package)\r\n else:\r\n print(\"[STOP] Operation terminated by user\")\r\nelse:\r\n print(\"[LOG] No package to install\")\r\nif len (failed) > 0:\r\n print(\"[failed]\", len(failed),\"package(s) were not installed, Failed package inastall \", end=\" \")\r\n for x,package in enumerate(failed):\r\n if x!= len(failed) -1:\r\n print(package, end=\", \")\r\n else:\r\n print(package)\r\n\r\n","sub_path":"install_requirements.py","file_name":"install_requirements.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415744881","text":"#encoding: utf-8\n\nfrom flask import Flask,render_template\nfrom models import Movie,session\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n # 1. 获取所有的电影信息\n movies = session.query(Movie).all()\n return render_template('index.html',movies=movies)\n\n@app.route('/detail/')\ndef detail(movie_id):\n movie = session.query(Movie).get(movie_id)\n return render_template('detail.html',movie=movie)\n\n\nif __name__ == '__main__':\n app.run(debug=True,port=7000)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"174925065","text":"from numpy import *\nimport matplotlib.pyplot as plt\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\ndef readdata(filename):\n numfeat = len(open(filename).readline().split('\\t'))-1\n datamat = []\n labelmat = []\n fr = open(filename)\n for line in fr.readlines():\n linearr = []\n curline = line.strip().split('\\t')\n for i in range(numfeat):\n linearr.append(float(curline[i]))\n datamat.append(linearr)\n labelmat.append(float(curline[-1]))\n return datamat, labelmat\ndef stocgraddscent1(xArr,yArr,numIter=200):\n S=[]\n xmat = array(xArr)\n m, n = shape(xmat)\n alpha = 0.01\n weights = ones(n)\n for j in range(numIter):\n for i in range(m):\n yHat = sum(xmat[i]*weights)\n deltws = xmat[i]*(yArr[i]-yHat)\n weights += alpha*deltws\n s = sum(weights)\n S.append(s)\n ax.plot(range(numIter), S,'purple')\ndef rssError(yArr,yHatArr):\n a=sum((yArr-yHatArr)**2)\n return a\ndef graddscent(xArr,yArr):\n S=[]\n xmat = mat(xArr)\n ymat = mat(yArr).T\n m, n = shape(xmat)\n alpha = 0.01\n maxcycles = 200\n weights = ones((n, 1))\n for k in range(maxcycles):\n yHat = xmat*weights\n deltws = xmat.T*(ymat-yHat)\n weights += alpha*deltws/m\n yHat1 = xmat * weights\n s = sum(weights)\n S.append(s)\n ax.plot(range(maxcycles),S,'red')\ndef stocgraddscent2(xArr,yArr,numIter=200):\n S=[]\n xmat = array(xArr)\n m, n = shape(xmat)\n weights = ones(n)\n for j in range(numIter):\n for i in range(m):\n alpha =4/(1+i+j)+0.001\n yHat = sum(xmat[i]*weights)\n deltws = xmat[i]*(yArr[i]-yHat)\n weights += alpha*deltws\n s = sum(weights)\n S.append(s)\n ax.plot(range(numIter), S, 'black')\n\nabx, aby=readdata('ex2.txt')\nstocgraddscent1(abx[0:3000],aby[0:3000],200)\ngraddscent(abx[0:3000],aby[0:3000])\nstocgraddscent2(abx[0:3000],aby[0:3000],200)\nplt.show()","sub_path":"python/program1/高/draw2_.py","file_name":"draw2_.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"20080845","text":"from typing import Any, Dict, List, Optional # noqa\n\nimport sublime\nimport sublime_plugin\n\nfrom .scripts import deep_dict\nfrom .scripts import utilities\n\n\n# Bit ugly that we take this approach. We feel the need to do so because I\n# can't see how to iterate over a ST settings object.\nCURRENT_SETTINGS = {\n \"buffer/on\",\n \"builder/auto/extra_opts_for_ConTeXt\",\n \"builder/auto/on\",\n \"builder/auto/open_PDF_after_build\",\n \"builder/auto/output/show\",\n \"builder/auto/output/show_ConTeXt_path\",\n \"builder/auto/output/show_errors\",\n \"builder/auto/output/show_errors_inline\",\n \"builder/auto/output/show_full_command\",\n \"builder/auto/return_focus_after_open_PDF\",\n \"builder/normal/open_PDF_after_build\",\n \"builder/normal/opts_for_ConTeXt\",\n \"builder/normal/output/show\",\n \"builder/normal/output/show_ConTeXt_path\",\n \"builder/normal/output/show_errors\",\n \"builder/normal/output/show_errors_inline\",\n \"builder/normal/output/show_full_command\",\n \"builder/normal/return_focus_after_open_PDF\",\n \"citations/command_regex\",\n \"citations/format\",\n \"citations/on\",\n \"file_links/on\",\n \"option_completions/on\",\n \"path\",\n \"PDF/viewer\",\n \"pop_ups/hang_indentation\",\n \"pop_ups/line_break\",\n \"pop_ups/match_indentation\",\n \"pop_ups/methods/on_hover\",\n \"pop_ups/methods/on_modified\",\n \"pop_ups/show_copy_pop_up\",\n \"pop_ups/show_source_files\",\n \"pop_ups/try_generate_on_demand\",\n \"references/command_regex\",\n \"references/on\",\n \"script/timeout\",\n}\n\n\ndef simplify(obj) -> str:\n if isinstance(obj, str):\n return obj\n elif isinstance(obj, dict):\n return \"[\" + \"] [\".join(sorted(obj)) + \"]\"\n elif isinstance(obj, list):\n return \"[\" + \"] [\".join(tup[0] for tup in sorted(obj) if tup) + \"]\"\n return str(obj)\n\n\n# Would be nice to do the `quick_panel` in a better way, with a row for each\n# sub-option. However, I'm not sure is that's possible at the moment: it seems\n# that each entry is expected to have the same number of rows, which limits our\n# options.\nclass SimpleContextSettingsControllerCommand(\n utilities.BaseSettings, sublime_plugin.WindowCommand,\n):\n def reload_settings(self) -> None:\n super().reload_settings()\n self.context_paths = \\\n utilities.get_setting_location(self, \"ConTeXt_paths\", default={})\n\n def update_settings(self) -> None:\n self.current_settings = {} # type: Dict[str, Any]\n for k in CURRENT_SETTINGS:\n deep_dict.set_safe(\n self.current_settings, k.split(\"/\"), self.get_setting(k),\n )\n\n def run(self) -> None:\n self.reload_settings()\n self.update_settings()\n self.encode_settings()\n self.last_scheme = None # type: Optional[str]\n self.location = [] # type: List[str]\n self.history = {} # type: Dict[int, int]\n self.run_panel()\n\n def run_panel(self) -> None:\n self.window.show_quick_panel(\n self.flatten_current_level(),\n self.run_handle,\n selected_index=self.get_history(),\n )\n\n def run_handle(self, index: int) -> None:\n if index < 0:\n return\n\n self.set_history(index)\n here = self.current_level()\n key = self.flatten_current_level()[index][0]\n\n if key == \"..\":\n self.location.pop()\n self.run_panel()\n\n elif key == \"setting_groups\":\n self.location.append(key)\n self.run_panel_scheme()\n\n else:\n value = here[key]\n self.location.append(key)\n\n # It's nice to be able to quickly toggle booleans, but we also want\n # the option to change a boolean into, say, a string. So we do this\n # compromise.\n if isinstance(value, bool):\n self.window.show_input_panel(\n \"new value:\",\n str(not value),\n self.on_done,\n self.on_change,\n self.on_cancel,\n )\n\n elif isinstance(value, (int, float, str)) or value is None:\n self.window.show_input_panel(\n \"new value:\",\n str(value),\n self.on_done,\n self.on_change,\n self.on_cancel,\n )\n\n elif isinstance(value, utilities.Choice):\n self.run_panel_choice()\n\n else:\n self.run_panel()\n\n def run_panel_scheme(self) -> None:\n self.window.show_quick_panel(\n self.flatten_current_level(),\n self.run_handle_scheme,\n selected_index=self.get_history(),\n )\n\n def run_handle_scheme(self, index: int) -> None:\n if index < 0:\n return\n\n self.set_history(index)\n here = self.current_level()\n key = self.flatten_current_level()[index][0]\n\n if key == \"..\":\n self.location.pop()\n self.run_panel()\n else:\n value = here[key]\n self.last_scheme = key\n self.decode_settings()\n for location, val in deep_dict.iter_(value):\n deep_dict.set_safe(self.encoded_settings, location, val)\n self.save(decode=False)\n self.run_panel_scheme()\n\n def run_panel_choice(self) -> None:\n self.window.show_quick_panel(\n self.flatten_current_level(),\n self.run_handle_choice,\n selected_index=self.get_history(),\n )\n\n def run_handle_choice(self, index: int) -> None:\n if index < 0:\n return\n\n self.set_history(index)\n here = self.current_level()\n key = self.flatten_current_level()[index][0]\n\n if key == \"..\":\n self.location.pop()\n self.run_panel()\n else:\n here.set(key)\n deep_dict.set_safe(self.encoded_settings, self.location, here)\n self.save()\n self.run_panel_choice()\n\n def on_done(self, text: str) -> None:\n deep_dict.set_safe(\n self.encoded_settings, self.location, utilities.guess_type(text),\n )\n self.location.pop()\n self.save()\n self.run_panel()\n\n def on_change(self, text: str) -> None:\n pass\n\n def on_cancel(self) -> None:\n self.location.pop()\n self.run_panel()\n\n def current_level(self) -> Any:\n return deep_dict.get_safe(self.encoded_settings, self.location)\n\n def flatten_current_level(self) -> List[List[str]]:\n current_level = self.current_level()\n if self.location and self.location[-1] == \"setting_groups\":\n main = [\n [k, \"[✓]\" if k == self.last_scheme else \"[ ]\"]\n for k in sorted(current_level)\n ]\n elif isinstance(current_level, utilities.Choice):\n main = current_level.to_list(string=True)\n else:\n main = [\n [k, simplify(current_level[k])] for k in sorted(current_level)\n ]\n if self.location:\n return [[\"..\", \"in /{}/\".format(\"/\".join(self.location))]] + main\n return main\n\n def get_history(self) -> int:\n return self.history.get(len(self.location), 0)\n\n def set_history(self, index: int) -> None:\n self.history[len(self.location)] = index\n\n def save(self, decode: bool = True) -> None:\n if decode:\n self.decode_settings()\n self.write_settings()\n for k, v in self.to_write.items():\n self.sublime_settings.set(\"current.{}\".format(k), v)\n sublime.save_settings(\"simple_ConTeXt.sublime-settings\")\n self.reload_settings()\n self.encode_settings()\n\n def encode_settings(self) -> None:\n \"\"\"\n Load the settings on file into memory, and perform some simple\n transformations to them.\n \"\"\"\n\n self.encoded_settings = self.current_settings\n self.encoded_settings[\"path\"] = utilities.Choice(\n self.context_paths, choice=self.current_settings.get(\"path\"),\n )\n viewer = utilities.Choice(\n utilities.get_setting_location(self, \"PDF_viewers\", default={}),\n choice=self.get_setting(\"PDF/viewer\"),\n )\n self.encoded_settings.setdefault(\"PDF\", {})[\"viewer\"] = viewer\n self.encoded_settings[\"setting_groups\"] = \\\n self.sublime_settings.get(\"setting_groups\", {})\n\n def decode_settings(self) -> None:\n \"\"\"\n Write the settings in memory onto the appropriate file, undoing the\n transformations as appropriate.\n \"\"\"\n\n self.current_settings[\"path\"] = self.encoded_settings[\"path\"].get()\n self.current_settings.get(\"PDF\", {})[\"viewer\"] = \\\n self.encoded_settings[\"PDF\"][\"viewer\"].get()\n del self.current_settings[\"setting_groups\"]\n\n def write_settings(self) -> None:\n self.to_write = {} # type: Dict[str, Any]\n for k, v in deep_dict.iter_(self.current_settings):\n for opt in {\"extra_opts_for_ConTeXt\", \"opts_for_ConTeXt\"}:\n if opt in k:\n i = k.index(opt) + 1\n deep_dict.set_safe(\n self.to_write, [\"/\".join(k[:i])] + k[i:], v,\n )\n break\n else:\n self.to_write[\"/\".join(k)] = v\n\n\nclass SimpleContextEditSettingsCommand(sublime_plugin.WindowCommand):\n def run(self, *args, **kwargs) -> None:\n base_file = \\\n \"${packages}/simple_ConTeXt/simple_ConTeXt.sublime-settings\"\n dict_ = {\"base_file\": base_file, \"default\": \"{\\n\\t$0\\n}\\n\"}\n sublime.run_command(\"edit_settings\", dict_)\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":9756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493718","text":"# Написать функцию, которая рассчитывает сумму всех цифр некоторого трехзначного числа,\n# введенного пользователем в консоли, без использования операторов цикла.\n# a) cо строками,\n# б) без использования строк.\n\nnumber = input('Enter 3 digit number, please\\n')\n\ndef string_var ():\n fnum = number.find('')\n sum1 = number[fnum]\n sum2 = number[fnum+1]\n sum3 = number[fnum+2]\n sum = int(sum1) + int(sum2) + int(sum3)\n return sum\ndef integer_var():\n a = int(number) % 10\n b = int(number) // 10\n c = int(b) % 10\n d = int(b) // 10\n return d + c + a\n\nprint('The sum of entered number are:', '\\n1) String variant -', string_var(), '\\n2) Integer variant -', integer_var())","sub_path":"HW7/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"567569712","text":"from ..charting import bar\nimport os\nimport glob\n\nfrom .. import fin_time as t\n\ndef file_get_symbol_and_tf(fn):\n entries = fn.split('\\\\')[-1].split('.')\n return entries[0], entries[1]\n\ndef grab_data_files(directory_path):\n ''' Return list of files in data directory'''\n return glob.glob(os.path.join(directory_path, '*.csv'))\n\ndef load_data(filename):\n '''Load data from file to a list of type bars'''\n fd = open(filename)\n\n bars = []\n\n # Get our backtesting data\n for line in fd :\n entry = line.split(',')\n\n '''\"Time\",\"Open\", \"High\", \"Low\", \"Close\"'''\n try:\n cur_bar = bar.Bar(float(entry[1]), float(entry[2]), float(entry[3]), float(entry[4]), t.mql_to_std(entry[0]))\n bars.append(cur_bar)\n except ValueError :\n pass\n\n fd.close()\n \n return bars","sub_path":"data/file_util.py","file_name":"file_util.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"595803524","text":"\"\"\"\nDialite is a pure Python package to show dialogs. It is lightweight,\ncross-platform, and has no dependencies. It provides a handful of\nfunctions, each a verb, that can be used to inform(), warn() or fail()\nthe user, or to ask_ok(), ask_retry() or ask_yesno().\n\nDialite works on Window, OS X and Linux, and falls back to a terminal\ninterface otherwise (or if dialogs are unavailable, e.g. with an SSH\nconnection).\n\nOn Windows, it uses Windows Script Host (cscript.exe). On OS X it uses\nosascript to show a dialog from the frontmost application. On Linux it\nuses Zenity.\n\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport sys\n\nimport logging\nlogger = logging.getLogger(__name__)\ndel logging\n\n# We import all modules; no dynamic loading. That will only complicate things,\n# e.g. for tools like cx_Freeze.\nfrom ._base import BaseApp, TerminalApp, StubApp\nfrom ._windows import WindowsApp\nfrom ._linux import LinuxApp\nfrom ._osx import OSXApp\n\n\nif sys.version_info > (3, ):\n string_types = str, # noqa\nelse: # pragma: no cover\n string_types = basestring, # noqa\n\n\ndef _select_app():\n # Select preferred app\n if sys.platform.startswith('win'):\n app = WindowsApp()\n elif sys.platform.startswith('linux'):\n app = LinuxApp()\n elif sys.platform.startswith('darwin'):\n app = OSXApp()\n else:\n app = TerminalApp()\n \n # Fall back to tty, or to stub that fails on anything other than info/warn\n if not app.works():\n app = TerminalApp()\n if not app.works():\n app = StubApp()\n return app\n\n_the_app = _select_app()\nassert isinstance(_the_app, BaseApp)\n\n\ndef is_supported():\n \"\"\" Get whether Dialite is supported for the current platform.\n \"\"\"\n return not isinstance(_the_app, StubApp)\n\n\ndef fail(title='Error', message=''):\n \"\"\" Show a message to let the user know that something failed.\n \n Parameters:\n title (str): the text to show as the window title.\n message (str): the message to show in the body of the dialog.\n \"\"\"\n if not isinstance(title, string_types):\n raise TypeError('fail() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('fail() message must be a string.')\n _the_app.fail(title, message)\n\n\ndef warn(title='Warning', message=''):\n \"\"\" Warn the user about something.\n \n Parameters:\n title (str): the text to show as the window title.\n message (str): the message to show in the body of the dialog.\n \"\"\"\n if not isinstance(title, string_types):\n raise TypeError('warn() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('warn() message must be a string.')\n _the_app.warn(title, message)\n\n\ndef inform(title='Info', message=''):\n \"\"\" Inform the user about something.\n \n Parameters:\n title (str): the text to show as the window title.\n message (str): the message to show in the body of the dialog.\n \"\"\"\n if not isinstance(title, string_types):\n raise TypeError('inform() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('inform() message must be a string.')\n _the_app.inform(title, message)\n\n\ndef ask_ok(title='Confirm', message=''):\n \"\"\" Ask the user to confirm something via an ok-cancel question.\n \n Parameters:\n title (str): the text to show as the window title.\n message (str): the message to show in the body of the dialog.\n \n Returns:\n result (bool): Whether the user selected \"OK\".\n \"\"\"\n if not isinstance(title, string_types):\n raise TypeError('ask_ok() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_ok() message must be a string.')\n return _the_app.ask_ok(title, message)\n\n\ndef ask_retry(title='Retry', message=''):\n \"\"\" Ask the user whether to retry something via a retry-cancel question.\n \n Parameters:\n title (str): the text to show as the window title.\n message (str): the message to show in the body of the dialog.\n \n Returns:\n result (bool): Whether the user selected \"Retry\".\n \"\"\"\n if not isinstance(title, string_types):\n raise TypeError('ask_retry() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_retry() message must be a string.')\n return _the_app.ask_retry(title, message)\n\n\ndef ask_yesno(title='Question', message=''):\n \"\"\" Ask the user a yes-no question.\n \n Parameters:\n title (str): the text to show as the window title.\n message (str): the message to show in the body of the dialog.\n \n Returns:\n result (bool): Whether the user selected \"Yes\".\n \"\"\"\n if not isinstance(title, string_types):\n raise TypeError('ask_yesno() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_yesno() message must be a string.')\n return _the_app.ask_yesno(title, message)\n","sub_path":"flexx/dialite/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"435967817","text":"import model_function\r\n\r\n\r\nrandom.seed(1)\r\ndm_rat_invivo_mean_prop=batchFS(\"./mean_TG_\",\"./dm_invivo_label.csv\",1,125)\r\ndm_invivo_mean_res=cptRes(dm_rat_invivo_mean_prop,label)\r\ntg_invivo_mean_res=pd.DataFrame(dm_invivo_mean_res)\r\ndm_invivo_mean_res.to_csv(\"./dm_invivo_mean_res.csv\")\r\n\r\ndata=pd.read_csv(open(\"./mean_TG_19.csv\"))\r\ndata=data.values\r\ndata=data[:,1:len(data[1,:])]\r\nlabel=pd.read_csv(open(\"./dm_invivo_label.csv\"))\r\nlabel=label.values\r\nlabel=label[:,1]\r\n\r\n\r\np_svc=loocvSVC(data,label)\r\np_lr=loocvLR(data,label)\r\np_gbdt=loocvGBDT(data,label)\r\np_knn=loocvKnn(data,label)\r\np_RF=loocvRF(data,label)\r\np_lgb=loocvLGB(data,label)\r\n\r\nlr_fpr,lr_tpr,lr_threshold= roc_curve(label,p_lr)\r\nlr_roc_auc = auc(lr_fpr,lr_tpr)\r\nlr_roc_auc\r\n\r\nlgb_fpr,lgb_tpr,lgb_threshold = roc_curve(label,p_lgb)\r\nlgb_roc_auc = auc(lgb_fpr,lgb_tpr)\r\nlgb_roc_auc\r\ngbdt_fpr,gbdt_tpr,gbdt_threshold = roc_curve(label,p_gbdt)\r\ngbdt_roc_auc = auc(gbdt_fpr,gbdt_tpr)\r\ngbdt_roc_auc\r\n\r\nknn_fpr,knn_tpr,knn_threshold= roc_curve(label,p_knn)\r\nknn_roc_auc = auc(knn_fpr,knn_tpr)\r\nknn_roc_auc\r\n\r\nsvc_fpr,svc_tpr,svc_threshold= roc_curve(label,p_svc)\r\nsvc_roc_auc = auc(svc_fpr,svc_tpr)\r\nsvc_roc_auc\r\n\r\nRF_fpr,RF_tpr,RF_threshold= roc_curve(label,p_RF)\r\nRF_roc_auc = auc(RF_fpr,RF_tpr)\r\nRF_roc_auc","sub_path":"model_dm_invivo.py","file_name":"model_dm_invivo.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228526088","text":"def improbabilityCalculator(coordinates, remove):\r\n res=\"\"\r\n res = tempFun(coordinates,remove,res)\r\n return res\r\n pass\r\n\r\n\r\ndef tempFun(s,rem,res):\r\n length=len(s)\r\n if rem == 0:\r\n res+=s\r\n return res\r\n if rem>length:\r\n res+=\"\"\r\n return \"0\"\r\n if length == 0:\r\n return res\r\n else:\r\n minIndex=0\r\n for i in range(1,rem+1):\r\n if s[i]\\d+)/$', \n\t\tlogin_required(StartView.as_view(permanent=False)), name='start'),\n\turl(r'^questionnaire/(?P\\d+)/$', QuestionnaireView.as_view(), \n\t\tname='questionnaire'),\n\turl(r'^questionnaire/(?P\\d+)/(?P\\d+)$', \n\t\tQuestionnaireView.as_view(), name='questionset'),\n#questionnaireblank/poster_id/?role=creator or questionnaireblank/poster_id/?role=consumer\n\turl(r'^questionnaireblank/(?P\\d+)/', QuestionnaireBlankView.as_view(), \n\t\tname='questionnaireblank'),\n#answer/poster_id/?role=creator or answer/poster_id/?role=consumer\n\turl(r'^answer/(?P\\d+)/$', AnswerDetailView.as_view(), \n\t\tname='answer'),\n]","sub_path":"survey/urls/mobile.py","file_name":"mobile.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"566439297","text":"#coding=utf-8\n__author__ = 'lzp'\n\nimport os\nimport sys\nimport requests\nimport urllib.parse\nimport hashlib\n\nimport tempfile\n\n\n\n\n\n# http://www.qqxieyi.com/function/ js模拟\n# WebQQ MD5加密的方式,公式是这样的:\n# md5(md5(hexchar2bin(md5(p)) + uin) + verify.toUpperCase());\n# verify是验证码,uin就是那段\\x0的代码,p是密码\n# http://www.cnblogs.com/uu102/archive/2012/09/16/2687959.html\n\n\n\n# 其中这里最关键的函数,就是hexchar2bin了.我们先来罗列一下基础的MD5函数吧,其实是MD5的标准算法,直接贴在下面了,将普通文本���换成MD5。\n\n# md5_str:需要加密的文本\n# return: 一次md5加密\ndef MD5_Encrypt(md5_str):\n m = hashlib.md5()\n m.update(md5_str.encode(\"utf-8\"))\n # m.update(md5_str.encode(\"gbk\"))\n return str(m.hexdigest()).upper()\n\n\ndef MD5_bin_Encrypt(md5_str):\n m = hashlib.md5()\n m.update(md5_str)\n # m.update(md5_str.encode(\"gbk\"))\n return str(m.hexdigest()).upper()\n\n\n\n\"\"\" js\nfunction hexchar2bin(str)\n{\n\tvar arr=[];\n\tfor(var i=0;i 12) or (len(findFirstName(name,regularNames)) == 3 and len(name) > 9):\r\n continue\r\n if (regularPhoneNumber(phoneNumber) == -1):\r\n continue\r\n \r\n foutputToudi.write(name+','+outphoneNumber + '\\n')\r\n \r\n foutputToudi.write('宗凯,13520588785'+'\\n')\r\n foutputToudi.write('一博,13911552167') \r\n \r\n fName.close()\r\n fToudi.close()\r\n foutputToudi.close() \r\n \r\n return \r\n\r\nsourceName = 'E:\\\\Zhiwei\\\\name.txt'\r\nsourceToudi = 'E:\\\\Zhiwei\\\\toudi.txt'\r\nsourceBailing = 'E:\\\\Zhiwei\\\\bailing.txt'\r\n\r\noutputToudi = 'E:\\\\Zhiwei\\\\out\\\\TOUDI.txt'\r\noutputBailing = 'E:\\\\Zhiwei\\\\out\\\\BAILING.txt'\r\n\r\nfiling(sourceToudi,outputToudi,sourceName)\r\nfiling(sourceBailing,outputBailing,sourceName)\r\n\r\nos.startfile('http://10.10.32.229:9080/asumtso/login.d')\r\n\r\n","sub_path":"Zhiweilaiqiaomen/Zhiweilaiqiaomen.py","file_name":"Zhiweilaiqiaomen.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245366911","text":"# atom group counters\nfrom mdgo.shell_functions import get_cation_anion_shells\nimport numpy as np\n\n\ndef get_counts(atom_group):\n unique_ids = np.unique(atom_group.resids, return_index=True)\n names, counts = np.unique(atom_group.resnames[unique_ids[1]], return_counts=True)\n return {i: j for i, j in zip(names, counts)}\n\n\ndef get_pair_type(u, central_species, cation_group, anion_group, radius=4):\n cation_name = cation_group.names[0]\n anion_name = anion_group.names[0]\n first, second, third, fourth = (get_counts(shell) for shell in\n get_cation_anion_shells(u, central_species, cation_group,\n anion_group, radius))\n if len(first) == 0:\n return \"SSIP\"\n if (len(first) == 1) & (len(second) == 0):\n return \"CIP\"\n else:\n return \"AGG\"\n\n\ndef count_dicts(dict_list):\n unique_dicts = []\n dict_counts = []\n for dic in dict_list:\n new = True\n for i, unique_dict in enumerate(unique_dicts):\n if dic == unique_dict:\n dict_counts[i] += 1\n new = False\n break\n if new:\n unique_dicts.append(dic)\n dict_counts.append(1)\n return zip(dict_counts, unique_dicts)\n\n# def get_detailed_cation_speciation(self, timestep):\n# \"\"\"\n# This function should be move to a utility folder and wrapped, right now\n# its not very useful. Currently broken\n#\n# Returns:\n#\n# \"\"\"\n# self.u_wrapped.trajectory[timestep]\n# solvation_shell_speciation = [get_counts(get_radial_shell(self.u, cation, 3))\n# for cation in self.cations]\n# counts_by_speciation = count_dicts(solvation_shell_speciation)\n# print('before: ', len(solvation_shell_speciation))\n# print('after: ', len(counts_by_speciation))\n# print('after: ', sum(counts_by_speciation.keys()))\n# return counts_by_speciation\n","sub_path":"mdgo/counting_utils.py","file_name":"counting_utils.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"7350112","text":"def gcd(k,m):\r\n n=[k,m]\r\n n.sort()\r\n \r\n if n[1]%n[0]==1:\r\n return 1\r\n elif n[1]%n[0]==0:\r\n return n[0]\r\n else:\r\n return gcd(n[0],n[1]%n[0])\r\ndata=list(map(int, input().split()))\r\n\r\n\r\nprint(gcd(data[0],data[1]))\r\n","sub_path":"Week 2/Greatest_common_divisor_Assigmen_Week_2_3.py","file_name":"Greatest_common_divisor_Assigmen_Week_2_3.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"477290960","text":"from django.conf import settings\r\nfrom django.db import models\r\nfrom products.models import Product\r\n\r\n\r\nclass ProductQuestion(models.Model):\r\n product = models.ForeignKey(Product, related_name='questions', on_delete=models.CASCADE)\r\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\"product_questions\", on_delete=models.CASCADE)\r\n text = models.TextField()\r\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\r\n\r\n class Meta:\r\n verbose_name = '상품 관련 질문'\r\n verbose_name_plural = '상품 관련 질문'\r\n\r\n def __str__(self):\r\n return '{}] {}'.format(self.id, self.product.name)\r\n\r\n\r\nclass ProductAnswer(models.Model):\r\n question = models.ForeignKey(ProductQuestion, related_name='answers', on_delete=models.CASCADE)\r\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='product_answers', on_delete=models.CASCADE)\r\n text = models.TextField()\r\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\r\n\r\n class Meta:\r\n verbose_name = '상품 관련 답변'\r\n verbose_name_plural = '상품 관련 답변'\r\n\r\n def __str__(self):\r\n return '{}] {}| Q_id:{}'.format(self.id, self.question.product.name, self.question.id)\r\n","sub_path":"siiot/products/reply/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"146858164","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfig = plt.figure(figsize=(8, 3))\nax1 = fig.add_subplot(151, projection = '3d')\nax2 = fig.add_subplot(152, projection = '3d')\nax3 = fig.add_subplot(153, projection = '3d')\nax4 = fig.add_subplot(154, projection = '3d')\nax5 = fig.add_subplot(155, projection = '3d')\n_x = np.arange(4)\n_y = np.arange(5)\n_xx, _yy = np.meshgrid(_x, _y)\nx, y = _xx.ravel(), _yy.ravel()\ntop = x + y\nbottom = np.zeros_like(top)\nwidth = depth = 1\nax1.bar3d(x, y, bottom, width, depth, top, shade = True )\nax2.bar3d(x, y, bottom, width, depth, top, shade = False )\nax3.bar3d(x, y, bottom, width, depth, top, shade = False,color='green',zsort='min')\nax4.bar3d(x, y, bottom, width, depth, top, shade = False,color='yellow',zsort='max')\nax5.bar3d(x, y, bottom, width, depth, top, shade = True,alpha=0.5)\nplt.show()","sub_path":"lab_11/zadanie4.py","file_name":"zadanie4.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"135080745","text":"#!/usr/bin/python3\nfrom flask import Flask, abort, render_template\nfrom models import storage\n\"\"\" starts a flask web application \"\"\"\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.route('/cities_by_states')\ndef cities_by_states():\n \"\"\" display HTML page \"\"\"\n s = storage.all(\"State\")\n return render_template('8-cities_by_states.html', storage=s)\n\n\n@app.teardown_appcontext\ndef teardown(error):\n \"\"\" close after request \"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","sub_path":"web_flask/8-cities_by_states.py","file_name":"8-cities_by_states.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"303622474","text":"#!/usr/bin/python2.6\n\n\n##################################################################\n# Script Information\n# This script can be used as iADC(2GSps) board testing\n# Connect the input I+ with a frequence such as 13MHz and clock\n# as 200MHz\n# 20120517@CASPER.Berkeley\n##################################################################\nimport numpy, corr, time, struct, sys, logging, pylab, matplotlib, scipy\n\n#connect roach\nfpga = corr.katcp_wrapper.FpgaClient('192.168.40.67')\ntime.sleep(1)\n\n#pay attention, the snap module is controled by the inner software register\n#as snap64_ctrl which can be found at /proc/pid/hw/ioreg/\n#the number write to the snap ctrl register is from 0 to 7, do not use the outer trig\n#to control the snap module\n\nfpga.write_int('reshaper_snap64_ctrl', 0)\ntime.sleep(1)\nfpga.write_int('reshaper_snap64_ctrl', 7)\ntime.sleep(1)\n\n\nd_0 = struct.unpack('>16384b', fpga.read('reshaper_snap64_bram_msb', 8192*2))\n\nfd_0 = []\n\n#put the parrellel data into a serial array\nfor i in range(2048):\n \n fd_0.append(d_0[i*4+0]/128.0)\n fd_0.append(d_0[i*4+1]/128.0)\n fd_0.append(d_0[i*4+2]/128.0)\n fd_0.append(d_0[i*4+3]/128.0)\n\n\n#plot it\npylab.plot(fd_0, label='data')\npylab.show()\n\n","sub_path":"mdls/mode13/newmode13_roach2_config/read_snap_reshaperout.py","file_name":"read_snap_reshaperout.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"449948951","text":"cnt = int(raw_input())\nnums = [int(t) for t in raw_input().split(' ')]\ns, m = 0, 10**9\nfor n in nums:\n\ts += n\n\tif(n % 2 == 1 and n < m):\n\t\tm = n\nif(s % 2 == 0):\n\tprint(s)\nelse:\n\tprint(s - m)\n","sub_path":"online_judge/codeforces/621, Codeforces Round #341/A. Wet Shark and Odd and Even.py","file_name":"A. Wet Shark and Odd and Even.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571596372","text":"text = 'osjgo(kjdngdnhg)'\n\nstack = [] \npchar = {\"(\": \")\", \"{\": \"}\", \"[\": \"]\"}\nfor parenthese in text:\n if parenthese in pchar:\n stack.append(parenthese)\n elif len(stack) == 0 or pchar[stack.pop()] != parenthese:\n print(False)\n break\nprint(len(stack) == 0)","sub_path":"skobki.py","file_name":"skobki.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"292531967","text":"import numpy as np\n\ndef transpose(x, source='NHWC', target='NCHW'):\n return x.transpose([source.index(d) for d in target])\n\ndef pad(x, border=4):\n return np.pad(x, [(0, 0), (border, border), (border, border)], mode='reflect')\n\nclass RandomPadandCrop(object):\n \"\"\"Crop randomly the image.\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size,default_pad=4,channel_first=True):\n self.default_pad=default_pad\n self.channel_first=channel_first\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, x):\n\n if self.channel_first:\n x = pad(x, self.default_pad)\n h, w = x.shape[1:]#3*40*40\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n x = x[:, top: top + new_h, left: left + new_w]\n else:\n border=self.default_pad\n x=np.pad(x, [(border, border), (border, border),(0, 0),], mode='reflect')\n h, w = x.shape[0:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n x = x[top: top + new_h, left: left + new_w,:]\n return x\n\nclass RandomFlip(object):\n \"\"\"Flip randomly the image.\n \"\"\"\n\n def __init__(self, channel_first=True):\n self.channel_first=channel_first\n def __call__(self, x):\n if np.random.rand() < 0.5:\n if self.channel_first:\n x = x[:, :, ::-1]\n else:\n x=x[:,::-1,:]\n #horizontal flip\n return x.copy()\nfrom scipy import misc, ndimage\nimport collections\ndef _is_numpy_image(img):\n return isinstance(img, np.ndarray)\nfrom PIL import Image\nimport PIL\nclass Resize(object):\n \"\"\"\n Rescale the given numpy image to a specified size.\n \"\"\"\n\n def __init__(self, size, interpolation=\"bilinear\"):\n assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, pic):\n\n # check type of [pic]\n if not _is_numpy_image(pic):\n raise TypeError('img should be numpy array. Got {}'.format(type(pic)))\n\n if isinstance(self.size, int):\n # if size is specified with one dimension only get the second one keeping the\n # aspect-ratio\n\n # get the size of the original image\n w, h = pic.shape[:2]\n if (w <= h and w == self.size) or (h <= w and h == self.size):\n return pic\n\n # calculate the ouput size keeping the aspect-ratio\n if w < h:\n ow = self.size\n oh = int(self.size * h / w)\n else:\n oh = self.size\n ow = int(self.size * w / h)\n\n # create the output array\n img_out = np.zeros((ow, oh, pic.shape[2]))\n\n if len(pic.shape) == 3:\n # if 3D image, scale each channel individually\n for i in range(pic.shape[2]):\n img_out[:, :, i] = np.array(Image.fromarray(pic[:, :, i]).resize((ow, oh), resample=PIL.Image.BILINEAR))\n #img_out[:, :, i] = misc.imresize(pic[:, :, i], (ow, oh), interp=self.interpolation, mode='F')\n return img_out\n else:\n # if 2D image, scale image\n #return misc.imresize(pic, (ow, oh), interp=self.interpolation, mode='F')\n return np.array(Image.fromarray(pic).resize((ow, oh), resample=PIL.Image.BILINEAR))\n else:\n # if size is specified with 2 dimensions apply the scale directly\n # create the output array\n\n if len(pic.shape) == 3:\n img_out = np.zeros((self.size[0], self.size[1], pic.shape[2]))\n\n # if 3D image, scale each channel individually\n for i in range(pic.shape[2]):\n #img_out[:, :, i] = misc.imresize(pic[:, :, i], self.size, interp=self.interpolation, mode='F')\n img_out[:, :, i] = np.array(\n Image.fromarray(pic[:, :, i]).resize(self.size, resample=PIL.Image.BILINEAR))\n return img_out\n else:\n # if 2D image, scale image\n #return misc.imresize(pic, self.size, interp=self.interpolation, mode='F')\n np.array(Image.fromarray(pic).resize(self.size, resample=PIL.Image.BILINEAR))\nimport numbers\nclass CenterCrop(object):\n \"\"\"Crops the given PIL Image at the center.\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n \"\"\"\n\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n @staticmethod\n def get_params(pic, output_size):\n \"\"\"Get parameters for ``crop`` for center crop.\n Args:\n pic (np array): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n Returns:\n tuple: params (i, j, h, w) to be passed to the crop for center crop.\n \"\"\"\n\n w, h, c = pic.shape\n th, tw = output_size\n\n i = int(round((h - th) / 2.))\n j = int(round((w - tw) / 2.))\n\n return i, j, th, tw\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (np array): Image to be cropped.\n Returns:\n np array: Cropped image.\n \"\"\"\n\n # check type of [pic]\n if not _is_numpy_image(pic):\n raise TypeError('img should be numpy array. Got {}'.format(type(pic)))\n\n # if image has only 2 channels make them 3\n if len(pic.shape) != 3:\n pic = pic.reshape(pic.shape[0], pic.shape[1], -1)\n\n # get crop params: starting pixels and size of the crop\n i, j, h, w = self.get_params(pic, self.size)\n\n return pic[i:i + h, j:j + w, :]\nimport math\n\ndef _get_inverse_affine_matrix(center, angle, translate, scale, shear):\n # Helper method to compute inverse matrix for affine transformation\n\n # As it is explained in PIL.Image.rotate\n # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1\n # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]\n # C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]\n # RSS is rotation with scale and shear matrix\n # RSS(a, scale, shear) = [ cos(a)*scale -sin(a + shear)*scale 0]\n # [ sin(a)*scale cos(a + shear)*scale 0]\n # [ 0 0 1]\n # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1\n\n angle = math.radians(angle)\n shear = math.radians(shear)\n scale = 1.0 / scale\n\n # Inverted rotation matrix with scale and shear\n d = math.cos(angle + shear) * math.cos(angle) + math.sin(angle + shear) * math.sin(angle)\n matrix = [\n math.cos(angle + shear), math.sin(angle + shear), 0,\n -math.sin(angle), math.cos(angle), 0\n ]\n matrix = [scale / d * m for m in matrix]\n\n # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1\n matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (-center[1] - translate[1])\n matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (-center[1] - translate[1])\n\n # Apply center translation: C * RSS^-1 * C^-1 * T^-1\n matrix[2] += center[0]\n matrix[5] += center[1]\n return matrix\n\ndef create_cutout_mask(img_height, img_width, num_channels, size):\n \"\"\"Creates a zero mask used for cutout of shape `img_height` x `img_width`.\n Args:\n img_height: Height of image cutout mask will be applied to.\n img_width: Width of image cutout mask will be applied to.\n num_channels: Number of channels in the image.\n size: Size of the zeros mask.\n Returns:\n A mask of shape `img_height` x `img_width` with all ones except for a\n square of zeros of shape `size` x `size`. This mask is meant to be\n elementwise multiplied with the original image. Additionally returns\n the `upper_coord` and `lower_coord` which specify where the cutout mask\n will be applied.\n \"\"\"\n assert img_height == img_width\n\n # Sample center where cutout mask will be applied\n height_loc = np.random.randint(low=0, high=img_height)\n width_loc = np.random.randint(low=0, high=img_width)\n\n # Determine upper right and lower left corners of patch\n upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))\n lower_coord = (min(img_height, height_loc + size // 2),\n min(img_width, width_loc + size // 2))\n mask_height = lower_coord[0] - upper_coord[0]\n mask_width = lower_coord[1] - upper_coord[1]\n assert mask_height > 0\n assert mask_width > 0\n\n mask = np.ones((img_height, img_width, num_channels))\n zeros = np.zeros((mask_height, mask_width, num_channels))\n mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = (\n zeros)\n return mask, upper_coord, lower_coord\n\ndef cutout_numpy(img, size=16):\n \"\"\"Apply cutout with mask of shape `size` x `size` to `img`.\n The cutout operation is from the paper https://arxiv.org/abs/1708.04552.\n This operation applies a `size`x`size` mask of zeros to a random location\n within `img`.\n Args:\n img: Numpy image that cutout will be applied to.\n size: Height/width of the cutout mask that will be\n Returns:\n A numpy tensor that is the result of applying the cutout mask to `img`.\n \"\"\"\n img_height, img_width, num_channels = (img.shape[0], img.shape[1],\n img.shape[2])\n assert len(img.shape) == 3\n mask, _, _ = create_cutout_mask(img_height, img_width, num_channels, size)\n return img * mask\n","sub_path":"clothing/ops/Transform_ops.py","file_name":"Transform_ops.py","file_ext":"py","file_size_in_byte":10307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343927130","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom pytest import raises\nfrom idpn.ident import TitleID\n\n\ndef test_tid_random():\n tid = TitleID()\n assert tid.is_valid()\n\n\ndef test_tid_min():\n\n tid = TitleID(ident=TitleID.INT_MIN)\n assert tid.is_valid()\n tid = TitleID(code=TitleID.CODE_MIN)\n assert tid.is_valid()\n\n with raises(ValueError):\n TitleID(ident=TitleID.INT_MIN - 1)\n with raises(ValueError):\n TitleID(code='H')\n\n\ndef test_tid_max():\n\n tid = TitleID(ident=TitleID.INT_MAX)\n assert tid.is_valid()\n tid = TitleID(code=TitleID.CODE_MAX)\n assert tid.is_valid()\n\n with raises(ValueError):\n TitleID(ident=TitleID.INT_MAX + 1)\n with raises(ValueError):\n TitleID(code='S6KDOSI')\n\n\ndef test_tid_normalize_text():\n title = ' This is, ön - crazy € Title!'\n n = TitleID.normalize_text(title)\n assert n == 'this is on crazy € title'\n\n\ndef test_tid_normalize_author():\n author = 'Joy, Dr Browne'\n nauthor = TitleID.normalize_author(author)\n assert nauthor == '[j.browne]'\n","sub_path":"tests/test_idpn_ident_title.py","file_name":"test_idpn_ident_title.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451444236","text":"#!/usr/bin/python3\nimport pandas as pd\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n dataset = pd.read_csv(\"Dataset-clustering.txt\", sep=\"\\t\")\n dataset.drop([\"Win_2015\", \"Win_2017\"], axis=1, inplace=True)\n data_coord = []\n for index, row in dataset.iterrows():\n data_coord.append((dataset[\"Rank_2015\"][index], dataset[\"Rank_2017\"][index]))\n \n centroids = [(1,1), (25,25)]\n kmeans(centroids, data_coord, \"scatter-2-1\")\n \n return\n\ndef kmeans(centroids, data_coord, name):\n red = [] \n blue = [] \n for coord in data_coord:\n distance1 = sum([abs(a - b) for a, b in zip(coord, centroids[0])])\n distance2 = sum([abs(a - b) for a, b in zip(coord, centroids[1])])\n if distance1 <= distance2:\n red.append(coord)\n else:\n blue.append(coord)\n centroids_old = centroids\n\n try:\n center1 = tuple(np.mean(red))\n except:\n center1 = centroids_old[0]\n try:\n center2 = tuple(np.mean(blue))\n except:\n center2 = centroids_old[1]\n centroids_new = [center1, center2]\n\n check = False\n while check == False:\n red_new = [] \n blue_new = [] \n for coord in data_coord:\n distance1 = sum([abs(a - b) for a, b in zip(coord, centroids_new[0])])\n distance2 = sum([abs(a - b) for a, b in zip(coord, centroids_new[1])])\n if distance1 <= distance2:\n red_new.append(coord)\n else:\n blue_new.append(coord)\n centroids_old = centroids_new\n centroids_new = [tuple(np.mean(red_new, axis=0)), tuple(np.mean(blue_new, axis=0))]\n\n if red == red_new and blue == blue_new:\n check = True\n red = red_new\n blue = blue_new\n\n red_x = []\n red_y = []\n for data in red:\n x, y = data\n red_x.append(x)\n red_y.append(y)\n blue_x = []\n blue_y = []\n for data in blue:\n x, y = data\n blue_x.append(x)\n blue_y.append(y) \n\n plt.scatter(red_x, red_y, c=\"red\")\n plt.scatter(blue_x, blue_y, c=\"blue\")\n plt.scatter(centroids_new[0][0], centroids_new[0][1], c=\"red\", marker=\"*\")\n plt.scatter(centroids_new[1][0], centroids_new[1][1], c=\"blue\", marker=\"*\")\n plt.title('K-Means Plot')\n plt.savefig(name)\n return\n\nif __name__ == \"__main__\":\n main()","sub_path":"kmadden5-hw3-programming/kmadden5-hw3-2.py","file_name":"kmadden5-hw3-2.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"630222297","text":"from django.core.management.base import BaseCommand\nfrom typing import List, Dict\nfrom sixdegrees.tba import headers\nimport requests\nfrom degrees.models import Team, Event, Connection, Chain, Stage\nfrom itertools import product\n\n# years = list(range(2010, 2018))\nyears = list(range(2009, 2018))\n\ntba_url = 'http://localhost:8080/'\n\n\ndef get_events(year: int) -> List[Dict[str, str]]:\n url = f'{tba_url}/events/{year}/simple'\n return list(filter(lambda e: e['event_type'] in [0, 1, 2, 3, 4, 5, 6],\n requests.get(url, headers=headers).json()))\n\n\ndef get_alliances(key: str) -> Dict[int, List[int]]:\n url = f'{tba_url}/event/{key}/alliances'\n alliances = {}\n\n data = requests.get(url, headers=headers).json()\n if data is None:\n return alliances\n\n for i, alliance in enumerate(data, start=1):\n allies = []\n for team in alliance['picks']:\n allies.append(int(team[3:]))\n\n alliances.update({\n i: allies\n })\n\n return alliances\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n Team.objects.all().delete()\n Event.objects.all().delete()\n Connection.objects.all().delete()\n Chain.objects.all().delete()\n Stage.objects.all().delete()\n\n connections = []\n chains = []\n stages = []\n\n teams_created = set()\n\n chain_id = 1\n connection_id = 1\n\n for year in years:\n events = get_events(year)\n for event in events:\n print(event['key'], end='')\n ev_obj = Event.objects.create(key=event['key'], name=event['name'], year=event['year'])\n alliances = get_alliances(event['key'])\n if len(alliances.items()) == 0:\n print(' (bad)')\n else:\n print('')\n\n for i, teams in alliances.items():\n for team in teams:\n if team in teams_created:\n continue\n\n Team.objects.create(id=team, num=team)\n teams_created.add(team)\n\n done = []\n for t1 in teams:\n for t2 in teams:\n if t1 == t2 or (t1, t2) in done or (t2, t1) in done:\n continue\n\n c1 = Connection(team_a_id=t1, team_b_id=t2, event=ev_obj, id=connection_id)\n c2 = Connection(team_a_id=t2, team_b_id=t1, event=ev_obj, id=connection_id + 1)\n done.append((t1, t2))\n done.append((t2, t1))\n\n connections.extend([c1, c2])\n\n ch1 = Chain(team_a_id=t1, team_b_id=t2, exists=True, id=chain_id)\n ch2 = Chain(team_a_id=t2, team_b_id=t1, exists=True, id=chain_id + 1)\n ch3 = Chain(team_a_id=t1, team_b_id=t2, exists=True, id=chain_id + 2,\n restricted_to=event['year'])\n ch4 = Chain(team_a_id=t2, team_b_id=t1, exists=True, id=chain_id + 3,\n restricted_to=event['year'])\n\n new_stages = [\n Stage(number=0, connection_id=connection_id, chain_id=chain_id),\n Stage(number=0, connection_id=connection_id, chain_id=chain_id + 2),\n Stage(number=0, connection_id=connection_id + 1, chain_id=chain_id + 1),\n Stage(number=0, connection_id=connection_id + 1, chain_id=chain_id + 3)\n ]\n\n chains.extend([ch1, ch2, ch3, ch4])\n stages.extend(new_stages)\n connection_id += 2\n chain_id += 4\n\n Connection.objects.bulk_create(connections)\n Chain.objects.bulk_create(chains)\n connections = []\n chains = []\n Stage.objects.bulk_create(stages)\n stages = []\n","sub_path":"degrees/management/commands/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"139377792","text":"#python code to open web browser and video\nimport time\nimport webbrowser\ntotal_breaks = 3\nbreak_count = 0\nprint(\"This code starts at \"+time.ctime()) #ctime is used to retrieve current time\nwhile (break_count < total_breaks):\n\ttime.sleep(60)\t\t\t\t\t\t\t\t\n\twebbrowser.open(\"https://youtu.be/_C7UgR_sIW0\")\n\tbreak_count =break_count + 1","sub_path":"breaktime.py","file_name":"breaktime.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519006956","text":"# -*- coding: utf-8 -*-\n\nimport logging\nfrom datetime import timedelta\nfrom functools import partial\n\nimport psycopg2\nimport pytz\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.tools import float_is_zero\nfrom odoo.exceptions import UserError\nfrom odoo.http import request\nimport odoo.addons.decimal_precision as dp\n\n_logger = logging.getLogger(__name__)\n\n\nclass ReportSaleDetails(models.AbstractModel):\n _inherit = 'report.point_of_sale.report_saledetails'\n\n @api.model\n def get_sale_details(self, date_start=False, date_stop=False, configs=False):\n \"\"\" Serialise the orders of the day information\n\n params: date_start, date_stop string representing the datetime of order\n \"\"\"\n print(\"GET_SALE_DETAILS\")\n if date_start and date_stop:\n return super(ReportSaleDetails, self).get_sale_details(date_start, date_stop, configs)\n else:\n return self.get_bandez_details()\n\n def get_bandez_details(self, state=['paid']):\n configs = self.env['pos.config'].search([])\n config_ids = []\n for config in configs:\n config_ids.append((4, config.id))\n\n orders = self.env['pos.order'].search([\n ('state', 'in', state),\n ('config_id', 'in', configs.ids)])\n\n user_currency = self.env.user.company_id.currency_id\n\n total = 0.0\n products_sold = {}\n taxes = {}\n order_ids = []\n for order in orders:\n order_ids.append((4, order.id))\n\n if user_currency != order.pricelist_id.currency_id:\n total += order.pricelist_id.currency_id.compute(order.amount_total, user_currency)\n else:\n total += order.amount_total\n currency = order.session_id.currency_id\n\n for line in order.lines:\n key = (line.product_id, line.price_unit, line.discount)\n products_sold.setdefault(key, 0.0)\n products_sold[key] += line.qty\n\n if line.tax_ids_after_fiscal_position:\n line_taxes = line.tax_ids_after_fiscal_position.compute_all(line.price_unit * (1 - (line.discount or 0.0) / 100.0), currency, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)\n for tax in line_taxes['taxes']:\n taxes.setdefault(tax['id'], {'name': tax['name'], 'total': 0.0})\n taxes[tax['id']]['total'] += tax['amount']\n\n st_line_ids = self.env[\"account.bank.statement.line\"].search([('pos_statement_id', 'in', orders.ids)]).ids\n if st_line_ids:\n self.env.cr.execute(\"\"\"\n SELECT aj.name, sum(amount) total\n FROM account_bank_statement_line AS absl,\n account_bank_statement AS abs,\n account_journal AS aj \n WHERE absl.statement_id = abs.id\n AND abs.journal_id = aj.id \n AND absl.id IN %s \n GROUP BY aj.name\n \"\"\", (tuple(st_line_ids),))\n payments = self.env.cr.dictfetchall()\n else:\n payments = []\n\n return {\n 'total_paid': user_currency.round(total),\n 'payments': payments,\n 'company_name': self.env.user.company_id.name,\n 'taxes': taxes.values(),\n 'products': sorted([{\n 'product_id': product.id,\n 'product_name': product.name,\n 'code': product.default_code,\n 'quantity': qty,\n 'price_unit': price_unit,\n 'discount': discount,\n 'uom': product.uom_id.name\n } for (product, price_unit, discount), qty in products_sold.items()], key=lambda l: l['product_name'])\n }\n","sub_path":"point_of_sale_plus/models/pos_order.py","file_name":"pos_order.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"410849354","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\snsg\\mate.py\n# Compiled at: 2018-03-27 22:48:03\n# Size of source mod 2**32: 4224 bytes\n\"\"\"\n@author: 张伟\n@time: 2018/3/7 9:28\n\"\"\"\nimport os, re\n\nclass Mate(object):\n data = set()\n text = str()\n stop = set()\n date_rg = '(\\\\d{4}年\\\\d{1,2}月\\\\d{1,2}日)|(\\\\d{4}\\\\S\\\\d{1,2}\\\\S\\\\d{1,2})|([A-Za-z0-9]*)'\n\n def __init__(self, list_file_extend=None, list_file_stop=None):\n \"\"\"\n 初始化\n :param list_file_extend 扩展字典,可以是链表也可以是文件地址\n :param list_file_stop 停用字典。可以是链表也可以是文件地址\n \"\"\"\n self.p = os.path.sep\n if type(list_file_extend) is list:\n self.load_list(list_file_extend)\n else:\n if type(list_file_extend) is str:\n self.load_file(list_file_extend)\n if type(list_file_stop) is list:\n self.stop.update(list_file_stop)\n elif type(list_file_stop) is str:\n with open(file=list_file_stop, encoding='utf-8-sig') as (f):\n self.stop.update([fs[:-1] for fs in f.readlines()])\n f.close()\n flags = list(range(47, 56)) + list(range(64, 90)) + list(range(96, 123))\n self.d = dict(map(lambda x: (x, True), flags))\n path = os.path.dirname(os.path.realpath(__file__))\n self.load_file(path + self.p + 'dict')\n\n def load_list(self, ls):\n \"\"\"\n 加载链表字典,注意如果链表中包含换行符,自动清除。\n :param ls: 链表,一维链表[key,key,.....]\n :return: None\n \"\"\"\n if '\\n' in ls[0]:\n self.data.update([f[:-1] for f in ls])\n else:\n self.data.update(ls)\n\n def load_file(self, file):\n \"\"\"\n 加载文件字典\n :param file: 词库文件地址\n :return: None\n \"\"\"\n with open(file=file, encoding='utf-8-sig') as (f):\n self.load_list(f.readlines())\n f.close()\n\n def __mate_num(self, obj):\n r = re.match(self.date_rg, obj, re.M | re.I)\n if r:\n start, end = r.span()\n w = obj[start:end]\n return w\n else:\n return ''\n\n def mate(self, input_string):\n \"\"\"\n 分词文本\n :param input_string: 输入文本字符串\n :return: 分割好的字符串\n \"\"\"\n self.text = input_string + ' '\n out_string = str()\n lens = len(self.text) + 1\n j = 0\n flag = 0\n while j < lens:\n for k in range(j + 2, lens):\n word = self.text[j:k]\n deviation = 0\n if self.d.get(ord(self.text[j])):\n word = self._Mate__mate_num(self.text[j:j + 10])\n if not len(word):\n break\n if self.text[flag:j] not in self.stop:\n out_string += self.text[flag:j] + self.p\n out_string += word + self.p\n j = j + len(word)\n flag = j\n break\n else:\n while word in self.data:\n deviation += 1\n word = self.text[j:k + deviation]\n\n if deviation != 0:\n if len(self.text[flag:j]):\n if self.text[flag:j] not in self.stop:\n out_string += self.text[flag:j] + self.p\n out_string += word[:-1] + self.p\n j = k + deviation - 1\n flag = j\n j -= 1\n break\n\n j += 1\n\n if self.text[flag:-1] not in self.stop:\n out_string += self.text[flag:-1]\n return out_string\n\n def to_list(self, split_words):\n return split_words.split(self.p)[:-1]","sub_path":"pycfiles/snsg-1.8.16-py3.6/mate.cpython-36.py","file_name":"mate.cpython-36.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"432037398","text":"\"\"\" Capstone WiFi Project - Main entry point\n\"\"\"\n\nfrom MacLookup import MacLookup\nimport sys\nif sys.platform == \"darwin\":\n from Platform.MacOS import MacOS\nelif sys.platform == \"win32\":\n from Platform.Win32 import Win32\n\n\nclass Main:\n\n def __init__(self):\n if sys.platform == \"darwin\":\n self.isMac = True\n self.isWin = False\n self.isLin = False\n elif sys.platform == \"win32\":\n self.isMac = False\n self.isWin = True\n self.isLin = False\n\n self.mac_lookup_obj = MacLookup()\n\n wireshark_records = self.read_wireshark_oui_records_from_json(\"wsharkoui.json\")\n # todo: remove hard coding for json file names\n\n if len(wireshark_records) < 1:\n wireshark_records = self.read_wireshark_oui_records_from_web()\n print (\"Loaded \" + str(wireshark_records) + \"+ records from Wireshark from Web\")\n else:\n print(\"Loaded \" + str(wireshark_records) + \"+ records from Wireshark data\")\n self.write_oui_records_to_json(self.mac_lookup_obj.return_lookup_item_list_json(\n self.mac_lookup_obj.lookup_item_list), \"wsharkoui.json\")\n\n nmap_records = self.read_wireshark_oui_records_from_json(\"nmapoui.json\")\n if len(nmap_records) < 1:\n nmap_records = self.read_wireshark_oui_records_from_web()\n print(\"Loaded \" + str(nmap_records) + \"+ records from NMAP data from Web\")\n self.write_oui_records_to_json(self.mac_lookup_obj.return_lookup_item_list_json(\n self.mac_lookup_obj.lookup_item_list_nmap), \"nmapoui.json\")\n else:\n print(\"Loaded \" + str(nmap_records) + \"+ records from NMAP data\")\n\n def read_wireshark_oui_records_from_json(self, _wireshark_filename):\n if self.isMac:\n platform_obj = MacOS()\n elif self.isWin:\n platform_obj = Win32()\n else:\n return -1\n return platform_obj.read_from_file(_wireshark_filename,\n platform_obj.get_app_data_dir())\n\n def read_wireshark_oui_records_from_web(self):\n return self.mac_lookup_obj.retrieve_oui_table_wireshark()\n\n def read_nmap_oui_records_from_json(self, _nmap_filename):\n if self.isMac:\n platform_obj = MacOS()\n elif self.isWin:\n platform_obj = Win32()\n else:\n return -1\n return platform_obj.read_from_file(_nmap_filename,\n platform_obj.get_app_data_dir())\n\n def read_nmap_oui_records_from_web(self):\n return self.mac_lookup_obj.retrieve_oui_table_nmap()\n\n def write_oui_records_to_json(self, record_list_string, _filename):\n if self.isMac:\n platform_obj = MacOS()\n elif self.isWin:\n platform_obj = Win32()\n else:\n return -1\n platform_obj.write_to_file(_filename,\n platform_obj.get_app_data_dir(),\n record_list_string)\n return 0\n\n \"\"\" Pre connect Scan Function\"\"\"\n def pre_connect_scan(self):\n if self.isMac:\n platform_obj = MacOS()\n elif self.isWin:\n platform_obj = Win32()\n wifi_scan_results = platform_obj.scan_wifi()\n self.print_pre_connect_scan_results(wifi_scan_results)\n\n \"\"\" Print function for pre connect scan\"\"\"\n def print_pre_connect_scan_results(self, results):\n if self.isWin:\n for result in results:\n print(\"SSID:\" + str(result.ssid))\n for bssid in result.bssid:\n if result.get_ssid() == bssid.get_ssid():\n print(\"\\tbssid:\" + str(bssid.get_bssid()) + \"\\tsignal:\" + str(bssid.get_signal()) + \"\\tchannel:\" + str(bssid.get_channel()))\n lookup_data = self.mac_lookup_obj.mac_lookup(bssid.get_bssid())\n if type(lookup_data[0]) != int:\n print(\"Short Name match (WireShark): \" + str(lookup_data[0].get_short_name()))\n print(\"Long Name match (WireShark): \" + str(lookup_data[0].get_long_name()))\n if type(lookup_data[1]) != int:\n print(\"Short Name match (NMAP): \" + lookup_data[1].get_short_name())\n # else:\n # print(\"Unable to find MAC OUI / BSSID\")\n # print(lookup_data)\n elif self.isMac:\n for result in results:\n print(\"SSID: \" + str(result.ssid()))\n print(\"\\tbssid: \" + result.bssid())\n print(\"\\trssi: \" + str(result.rssi()))\n print(\"\\tchannel: \" + str(result.channel()))\n lookup_data = self.mac_lookup_obj.mac_lookup(result.bssid())\n if type(lookup_data[0]) != int:\n print(\"Short Name match (WireShark): \" + str(lookup_data[0].get_short_name()))\n print(\"Long Name match (WireShark): \" + str(lookup_data[0].get_long_name()))\n if type(lookup_data[1]) != int:\n print(\"Short Name match (NMAP): \" + lookup_data[1].get_short_name())\n else:\n print(\"Unable to lookup MAC OUI / BSSID\")\n print(lookup_data)\n\n def main(self):\n pass\n self.pre_connect_scan()\n\n\nif __name__ == '__main__':\n _Main = Main()\n _Main.main()\n","sub_path":"Capstone/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"56919279","text":"from django.shortcuts import render, HttpResponse\n\nfrom app2.models import *\nfrom app2 import common\nfrom app2 import htmlgen\n\n\ndef Filter(before_func, after_func):\n def outer(main_func):\n def wrapper(request, *args, **kwargs):\n\n before_result = before_func(request, *args, **kwargs)\n if (before_result != None):\n return before_result\n\n main_result = main_func(request, *args, **kwargs)\n if main_result != None:\n return main_result\n\n after_result = after_func(request, *args, **kwargs)\n if after_result != None:\n return after_result\n\n return wrapper\n\n return outer\n\n\ndef after_index(request, *args, **kwargs):\n print(\"after\")\n\n\ndef before_index(request, *args, **kwargs):\n print(\"before\")\n\n\n# Create your views here.\n@Filter(before_index, after_index)\ndef index(request, *args, **kwargs):\n page = common.string_int(kwargs['page'], 1)\n\n cookies = request.COOKIES\n per_item = common.string_int(cookies['page_num'], 5)\n datas = Host.objects.all()\n count = datas.count()\n\n start, end, allpage = htmlgen.culpage(page, count, per_item)\n\n data = datas[start:end]\n\n htmlstr = htmlgen.gen_html(page, allpage, '/app2/index', 5)\n\n ret = {'data': data, 'count': count, 'htmllist': htmlstr}\n return render(request, 'app2/index2.html', ret)\n","sub_path":"app2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"594020000","text":"#\n# Imports which are standard for all test cases.\n#\nimport sys\nsys.path.insert(1, \"./\")\nfrom gaiatest import GaiaTestCase\nfrom OWDTestToolkit import *\n\n#\n# Imports particular to this test case.\n#\nfrom tests.mock_data.contacts import MockContacts\n\nclass test_19194(GaiaTestCase):\n _Description = \"[SMS] Try send a sms to a contact while airplane is enabled (from sms app - use contact option).\"\n \n _TestMsg = \"Test.\"\n \n def setUp(self):\n #\n # Set up child objects...\n #\n GaiaTestCase.setUp(self)\n self.UTILS = UTILS(self)\n self.messages = Messages(self)\n self.contacts = Contacts(self)\n \n \n #\n # Prepare the contact we're going to insert.\n #\n self.contact_1 = MockContacts().Contact_1\n\n #\n # Establish which phone number to use.\n #\n self.contact_1[\"tel\"][\"value\"] = self.UTILS.get_os_variable(\"GLOBAL_TARGET_SMS_NUM\")\n self.UTILS.logComment(\"Using target telephone number \" + self.contact_1[\"tel\"][\"value\"])\n \n #\n # Add this contact (quick'n'dirty method - we're just testing sms, no adding a contact).\n #\n self.data_layer.insert_contact(self.contact_1)\n \n #\n # Put the phone into airplane mode.\n #\n self.data_layer.set_setting('ril.radio.disabled', True)\n\n \n def tearDown(self):\n self.UTILS.reportResults()\n \n def test_run(self):\n \n #\n # Launch contacts app.\n #\n self.contacts.launch()\n \n #\n # View the details of our contact.\n #\n self.contacts.viewContact(self.contact_1['name'])\n \n #\n # Tap the sms button in the view details screen to go to the sms page.\n #\n smsBTN = self.UTILS.getElement(DOM.Contacts.sms_button, \"Send SMS button\")\n smsBTN.tap()\n \n #\n # Switch to the 'Messages' app frame (or marionette will still be watching the\n # 'Contacts' app!).\n #\n time.sleep(2)\n self.marionette.switch_to_frame()\n self.UTILS.switchToFrame(*DOM.Messages.frame_locator)\n \n #\n # TEST: this automatically opens the 'send SMS' screen, so\n # check the correct name is in the 'to' of this sms.\n #\n self.messages.checkIsInToField(self.contact_1['name'])\n \n #\n # Create SMS.\n #\n self.messages.enterSMSMsg(self._TestMsg)\n \n #\n # Click send.\n #\n self.messages.sendSMS()\n\n\n #\n # Check that popup appears.\n #\n self.messages.checkAirplaneModeWarning()\n \n #\n # Check that this last message is marked as failed.\n #\n x = self.messages.lastMessageInThisThread()\n self.UTILS.TEST( \"error\" in x.get_attribute(\"class\"),\n \"The last message in this thread is marked as 'error'.\")\n \n \n","sub_path":"tests/test_19194.py","file_name":"test_19194.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"186305051","text":"from tkinter import *\nfrom backend import Database\n\n\ndatabase = Database(\"books.db\")\n\nwindow= Tk()\nwindow.wm_title(\"BookStore\")\n\n\nl1 = Label(window, text =\"Title\")\nl1.grid(row=0, column=0)\n\n\nl2 = Label(window, text =\"Author\")\nl2.grid(row=0, column=2)\n\n\nl3 = Label(window, text =\"Year\")\nl3.grid(row=1, column=0)\n\n\nl4 = Label(window, text =\"ISBN\")\nl4.grid(row=1, column=2)\n\n#title entry field\ntitle_text = StringVar()\ne1 = Entry(window, textvariable=title_text)\ne1.grid(row=0, column=1)\n\n#year entry field\nyear_text = StringVar()\ny1 = Entry(window, textvariable=year_text)\ny1.grid(row=1, column=1)\n\n#author entry field\nauthor_text = StringVar()\na1 = Entry(window, textvariable=author_text)\na1.grid(row=0, column=3)\n\n\n\n#isbn entry field\nisbn_text = StringVar()\nisbn1 = Entry(window, textvariable=isbn_text)\nisbn1.grid(row=1, column=3)\n\n\n#list box\nlist1 =Listbox(window, height=6, width=35)\nlist1.grid(row=2, column=0, rowspan=6, columnspan=2)\n\n\n\n\n\n\ndef view_command():\n list1.delete(0,END)\n for row in database.view():\n list1.insert(END, row)\n\ndef search_command():\n list1.delete(0,END)\n for row in database.search(title_text.get(), author_text.get(), year_text.get(), isbn_text.get()):\n list1.insert(END,row)\n\ndef add_command():\n database.insert(title_text.get(), author_text.get(), year_text.get(), isbn_text.get())\n list1.delete(0,END)\n list1.insert(END,\n (title_text.get(), author_text.get(), year_text.get(), isbn_text.get()))\n\ndef get_selected_row(event):\n try:\n global selected_tuple\n index= list1.curselection()[0]\n selected_tuple = list1.get(index)\n #return (selected_tuple)\n e1.delete(0,END)\n e1.insert(END, selected_tuple[1])\n y1.delete(0, END)\n y1.insert(END, selected_tuple[2])\n a1.delete(0, END)\n a1.insert(END, selected_tuple[3])\n isbn1.delete(0, END)\n isbn1.insert(END, selected_tuple[4])\n except IndexError:\n pass\n\ndef delete_command():\n database.delete(selected_tuple[0])\n\ndef update_command():\n database.update(selected_tuple[0],title_text.get(), author_text.get(), year_text.get(), isbn_text.get())\n\n\n\n\n\n#buttons\nb1=Button(window, text=\"View all\", width=12, command=view_command)\nb1.grid(row=2, column=3)\n\nb1=Button(window, text=\"Search Entry\", width=12, command=search_command)\nb1.grid(row=3, column=3)\n\nb1=Button(window, text=\"Add Entry\", width=12, command=add_command)\nb1.grid(row=4, column=3)\n\nb1=Button(window, text=\"Update Selected\", width=12,command = update_command)\nb1.grid(row=5, column=3)\n\nb1=Button(window, text=\"Delete Selected\", width=12, command=delete_command)\nb1.grid(row=6, column=3)\n\nb1=Button(window, text=\"Close\", width=12,command=window.destroy)\nb1.grid(row=7, column=3)\n\n\n#scrollbar\nsb1=Scrollbar(window)\nsb1.grid(row=2, column=2, rowspan=6)\n\n#adding list and scrollbar\nlist1.configure(yscrollcommand=sb1.set)\nsb1.configure(command=list1.yview)\nlist1.bind('<>', get_selected_row)\n\nwindow.mainloop()","sub_path":"script1.py","file_name":"script1.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462169372","text":"from six.moves import configparser\nimport os\nimport collections\nimport tensorflow as tf\n\n\n# a very simple parser\ndef parse(string):\n if type(string) is not str:\n raise ValueError\n if string in ['true', 'True']:\n return True\n elif string in ['false', 'False']:\n return False\n elif string in ['float64', 'float32', 'float16', 'int64', 'int32', 'int16']:\n return getattr(tf, string)\n elif any([string.count(s) for s in '.eE']):\n try:\n return float(string)\n except:\n return string\n else:\n try:\n return int(string)\n except:\n return string\n\n\n# make the dictionary into a nested series of named tuples. This is what allows\n# accessing by attribute: settings.numerics.jitter\ndef namedtuplify(mapping): # thank you https://gist.github.com/hangtwenty/5960435\n if isinstance(mapping, collections.Mapping):\n for key, value in list(mapping.items()):\n mapping[key] = namedtuplify(value)\n try:\n mapping.pop('__name__')\n except:\n pass\n return collections.namedtuple('settings', dict(**mapping))(**mapping)\n return parse(mapping)\n\n\ndef read_config_file(path=None):\n c = configparser.ConfigParser()\n\n if path is None: # pragma: no cover\n # first look in the current directory,\n # then in the user's home directory,\n # then in the same directory as this file.\n locations = map(os.path.abspath, [os.curdir,\n os.path.expanduser('~'),\n os.path.dirname(os.path.realpath(__file__))])\n for loc in locations:\n # try both with and without preceeding 'dot' for hidden files (prefer non-hidden)\n if c.read(os.path.join(loc, 'gpflowrc')):\n break\n if c.read(os.path.join(loc, '.gpflowrc')):\n break\n else:\n assert (c.read(path))\n return c\n\n\nc = read_config_file()\nsettings = namedtuplify(c._sections)\n","sub_path":"GPflow/_settings.py","file_name":"_settings.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"122212678","text":"# -*-coding:utf-8 -*-\n\nimport codecs\n\n# 10100110\n\ndef read_file(infile):\n\tn = 0\n\tdata = []\n\twith codecs.open(infile, 'r', 'utf-8') as fi:\n\t\tfdata = fi.readlines()\n\tfor line in fdata:\n\t\tif n == 0:\n\t\t\tdata.append(-1)\n\t\telse:\n\t\t\tdata.append(int(line.strip()))\n\t\tn+=1\n\treturn data\n\ndef mwis(arr):\n\tdp_cache = []\n\n\t# initialize the first two of the dp_cache\n\tdp_cache.append(0)\n\tdp_cache.append(arr[1])\n\n\t# compute the dp_cache\n\tn = 2\n\twhile n < len(arr):\n\t\t# print(\"n: \", n, \"dp_cache: \", dp_cache)\n\t\tdp_cache.append(max(dp_cache[n-1], (dp_cache[n-2]+arr[n]))) \n\t\tn += 1\n\n\t# reconstruct to get the set:\n\tres_mwis = []\n\tn = len(arr)-1\n\twhile n>1:\n\t\tif (dp_cache[n-2]+arr[n])>dp_cache[n-1]:\n\t\t\tres_mwis.append(n)\n\t\t\tn -= 2\n\t\telse:\n\t\t\tn -= 1\n\tif n == 1:\n\t\tres_mwis.append(n)\n\n\t# test if vertices 1, 2, 3, 4, 17, 117, 517, and 997 are in the is:\n\tres_cache = [0]*8\n\tTEST_V = [1, 2, 3, 4, 17, 117, 517, 997]\n\tn = 0\n\twhile n < 8:\n\t\tif TEST_V[n] in res_mwis:\n\t\t\tres_cache[n] = 1\n\t\tn += 1\n\treturn ''.join([str(i) for i in res_cache])\n\nif __name__ == '__main__':\n\tfrom sys import argv\n\tinfile = argv[1]\n\tdata = read_file(infile)\n\tres = mwis(data)\n\tprint(\"the mwis is: \", res)\n\n\n","sub_path":"course3_wk3/dp_mwis.py","file_name":"dp_mwis.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308545575","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 1 23:48:57 2020\n\n@author: pepe\n\"\"\"\n\n\n#%% linear search\n\ndef linear_search(element_looking_for, lst):\n \n for element in lst:\n \n if element == element_looking_for:\n \n return True\n \n return False\n\nprint(linear_search(3, [1,2,3,4,5]))\n\nprint(linear_search(7, [1,2,3,4,5]))\n\nprint(linear_search(7, [1,2,3,4,5,7]))\n\n#%% binary search\n\ndef binary(elem, lst):\n\n left = 0\n right = len(lst) - 1\n \n middle = (left + right) // 2\n \n operations = 0\n \n while left <= right:\n \n operations = operations + 1\n \n middle = (left + right) // 2\n \n if lst[middle] < elem:\n \n left = middle + 1\n \n elif lst[middle] > elem:\n \n right = middle - 1\n \n else:\n print(operations)\n return True\n \n print(operations)\n return False\n\nprint(binary(3, [1,2,4]))\n\nprint(binary(10000000, range(0, 10000001)))\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"session10.py","file_name":"session10.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641328366","text":"from collections import OrderedDict\n\n\ndef import_pattern_list():\n\n freq_pattern_list = []\n\n for idx in range(5):\n\n with open(f'patterns/pattern-{str(idx)}.txt', 'r') as f:\n raw_text = f.readlines()\n\n freq_dict = OrderedDict(\n (':'.join(pattern), int(support))\n for support, *pattern in (line.split() for line in raw_text)\n )\n\n freq_pattern_list.append(freq_dict)\n\n return freq_pattern_list\n\n\n# this function will create max-pattern from pattern-i.txt\ndef generate_max_or_closed_pattern_output(*, is_closed_mining=False):\n\n topic_pattern = import_pattern_list()\n\n all_pattern = list()\n\n for topic_index, freq_dict in enumerate(topic_pattern):\n\n freq_list = list(freq_dict.keys())\n\n base_set = list(\n frozenset(pattern.split(':')) for pattern in freq_list\n )\n\n result_list = []\n\n for pattern in freq_list:\n\n is_max_or_closed_pattern = True\n\n for test_set in base_set:\n\n cur_pattern_set = set(pattern.split(':'))\n\n if cur_pattern_set < test_set: # is proper subset\n\n if is_closed_mining:\n\n if freq_dict.get(pattern) \\\n == freq_dict.get(':'.join(sorted(test_set, key=int))):\n is_max_or_closed_pattern = False\n break\n else:\n is_max_or_closed_pattern = False\n break\n\n if is_max_or_closed_pattern:\n result_list.append(pattern)\n\n result_list.sort(\n reverse=True,\n key=lambda t: freq_dict.get(t)\n )\n\n all_pattern.append(result_list)\n\n output = '\\n'.join(\n ' '.join(\n [\n str(freq_dict.get(pattern)),\n pattern.replace(':', ' ')\n ]\n )\n for pattern in result_list\n )\n\n if is_closed_mining:\n with open(f'max/max-{str(topic_index)}.txt', 'w') as f:\n f.write(output)\n else:\n with open(f'closed/closed-{str(topic_index)}.txt', 'w') as f:\n f.write(output)\n\n return all_pattern\n","sub_path":"mp3-frequent-pattern-mining/core/step5.py","file_name":"step5.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340304670","text":"from playlists.playlists.application.repository.repository_playlist import PlaylistRepository\nfrom playlists.playlists.domain.exceptions import DataBaseException, EmptyFieldsException\n\nclass DeletePlaylist:\n def __init__(self, repository: PlaylistRepository):\n self.repository = repository\n\n def execute(self, idPlaylist: int):\n if not idPlaylist:\n raise EmptyFieldsException(\"Empty Fields\")\n\n try:\n if self.repository.delete(idPlaylist):\n return True\n except DataBaseException as ex:\n raise DataBaseException(\"Database connection error\")\n ","sub_path":"playlists/playlists/application/use_cases/delete_playlist.py","file_name":"delete_playlist.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466389854","text":"#MESSAGE CLASS\n\n#Function init: initialize variables\nclass message(object):\n def __init__(self, ID, src, des, genT, size):\n self.src = int(src)\n self.des = int(des)\n self.ID = int(ID)\n self.size = int(size)\n self.genT = int(genT)\n self.last_sent = int(genT)\n self.parent = -1\n self.band_usage = [0, 0, 0, 0]\n\n\n\n def set(self, lastSent, parent):\n self.last_sent = lastSent\n self.parent = parent\n\n def band_used(self, s):\n self.band_usage[s] += 1\n\n","sub_path":"HotPotato/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281974360","text":"#\n# V-Ray For Blender\n#\n# http://chaosgroup.com\n#\n# Author: Andrei Izrantcev\n# E-Mail: andrei.izrantcev@chaosgroup.com\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.\n#\n\nimport bpy\n\nimport TexCommonParams\n\n\nTYPE = 'TEXTURE'\nID = 'TexNoise'\nNAME = 'Noise (Maya)'\nDESC = \"\"\n\nPluginParams = list(TexCommonParams.PluginParams)\n\nPluginParams.extend([\n {\n 'attr' : 'use_3d_mapping',\n 'desc' : \"\",\n 'type' : 'BOOL',\n 'default' : False,\n },\n\n {\n 'attr' : 'persistence',\n 'desc' : \"amplitude(i+1) = amplitude(i) / persistence\",\n 'type' : 'FLOAT',\n 'default' : 1,\n },\n {\n 'attr' : 'amplitude_ratio',\n 'desc' : \"amplitude(i+1) = amplitude(i) * amplitude_ratio\",\n 'type' : 'FLOAT',\n 'default' : 1,\n },\n {\n 'attr' : 'frequency_ratio',\n 'desc' : \"frequency(i+1) = frequency(i) * frequency_ratio\",\n 'type' : 'FLOAT',\n 'default' : 2,\n },\n {\n 'attr' : 'frequency1',\n 'name' : \"Frequency\",\n 'desc' : \"The starting frequency\",\n 'type' : 'FLOAT',\n 'default' : 1,\n },\n {\n 'attr' : 'amplitude1',\n 'name' : \"Amplitude\",\n 'desc' : \"The starting amplitude\",\n 'type' : 'FLOAT',\n 'default' : 1,\n },\n {\n 'attr' : 'octaves',\n 'desc' : \"\",\n 'type' : 'INT',\n 'default' : 3,\n },\n {\n 'attr' : 'noiseType',\n 'desc' : \"Noise type\",\n 'type' : 'ENUM',\n 'items' : (\n ('0', \"Noise\", \"\"),\n ('1', \"Perlin Noise\", \"\"),\n ('2', \"Inflected Perlin Noise\", \"\"),\n ('3', \"Marble (With Perlin)\", \"\")\n ),\n 'default' : '0',\n },\n {\n 'attr' : 'frequency_mult',\n 'desc' : \"\",\n 'type' : 'FLOAT',\n 'default' : 1,\n },\n {\n 'attr' : 'amplitude_mult',\n 'desc' : \"\",\n 'type' : 'FLOAT',\n 'default' : 1,\n },\n {\n 'attr' : 'inflection',\n 'desc' : \"Inflection\",\n 'type' : 'BOOL',\n 'default' : False,\n },\n # {\n # 'attr' : 'color1',\n # 'desc' : \"\",\n # 'type' : 'COLOR',\n # 'default' : (0, 0, 0),\n # },\n # {\n # 'attr' : 'color2',\n # 'desc' : \"\",\n # 'type' : 'COLOR',\n # 'default' : (1, 1, 1),\n # },\n {\n 'attr' : 'color1_tex',\n 'desc' : \"\",\n 'type' : 'TEXTURE',\n 'default' : (0.0, 0.0, 0.0),\n },\n {\n 'attr' : 'color2_tex',\n 'desc' : \"\",\n 'type' : 'TEXTURE',\n 'default' : (0.0, 0.0, 0.0),\n },\n # {\n # 'attr' : 'color1_tex_mult',\n # 'desc' : \"\",\n # 'type' : 'FLOAT',\n # 'default' : 1,\n # },\n # {\n # 'attr' : 'color2_tex_mult',\n # 'desc' : \"\",\n # 'type' : 'FLOAT',\n # 'default' : 1,\n # },\n {\n 'attr' : 'clamp',\n 'desc' : \"\",\n 'type' : 'BOOL',\n 'default' : True,\n },\n {\n 'attr' : 'dimensions',\n 'desc' : \"Two or Three dimensional noise\",\n 'type' : 'ENUM',\n 'items' : (\n ('2', \"2D\", \"\"),\n ('3', \"3D\", \"\"),\n ),\n 'default' : '2',\n },\n {\n 'attr' : 'time',\n 'desc' : \"The time of the noise, this will act as a third or fourth dimension to the noise generating function\",\n 'type' : 'FLOAT',\n 'default' : 0,\n },\n {\n 'attr' : 'threshold',\n 'desc' : \"Value added to the noise function, noise function values above 1.0 are clamped\",\n 'type' : 'FLOAT_TEXTURE',\n 'default' : 0,\n },\n {\n 'attr' : 'scale',\n 'desc' : \"Scale for the noise UVW coordinates\",\n 'type' : 'TEXTURE',\n 'default' : (0.0, 0.0, 0.0),\n },\n {\n 'attr' : 'origin',\n 'desc' : \"Translation for the noise UVW coordinates\",\n 'type' : 'TEXTURE',\n 'default' : (0.0, 0.0, 0.0),\n },\n {\n 'attr' : 'implode',\n 'desc' : \"Amount of implode performed on the UVW coordinates\",\n 'type' : 'FLOAT_TEXTURE',\n 'default' : 0,\n },\n {\n 'attr' : 'implode_center',\n 'desc' : \"The center of the implode effect\",\n 'type' : 'TEXTURE',\n 'default' : (0.0, 0.0, 0.0),\n },\n])\n\nPluginWidget = \"\"\"\n{ \"widgets\": [\n { \"layout\" : \"COLUMN\",\n \"attrs\" : [\n { \"name\" : \"noiseType\" }\n ]\n },\n\n { \"layout\" : \"SEPARATOR\" },\n\n { \"layout\" : \"ROW\",\n \"attrs\" : [\n { \"name\" : \"dimensions\", \"expand\" : true }\n ]\n },\n\n { \"layout\" : \"COLUMN\",\n \"attrs\" : [\n { \"name\" : \"clamp\" },\n { \"name\" : \"time\" }\n ]\n },\n\n { \"layout\" : \"SPLIT\",\n \"splits\" : [\n { \"layout\" : \"COLUMN\",\n \"align\" : true,\n \"attrs\" : [\n { \"name\" : \"frequency1\" },\n { \"name\" : \"frequency_ratio\", \"label\" : \"Ratio\" },\n { \"name\" : \"frequency_mult\", \"label\" : \"Mult\" }\n ]\n },\n { \"layout\" : \"COLUMN\",\n \"align\" : true,\n \"attrs\" : [\n { \"name\" : \"amplitude1\" },\n { \"name\" : \"amplitude_ratio\", \"label\" : \"Ratio\" },\n { \"name\" : \"amplitude_mult\", \"label\" : \"Mult\" }\n ]\n }\n ]\n },\n\n { \"layout\" : \"SPLIT\",\n \"splits\" : [\n { \"layout\" : \"COLUMN\",\n \"align\" : true,\n \"attrs\" : [\n { \"name\" : \"persistence\" }\n ]\n },\n { \"layout\" : \"COLUMN\",\n \"align\" : true,\n \"attrs\" : [\n { \"name\" : \"octaves\" }\n ]\n }\n ]\n },\n\n { \"layout\" : \"COLUMN\",\n \"attrs\" : [\n { \"name\" : \"inflection\" },\n { \"name\" : \"use_3d_mapping\" }\n ]\n },\n\n {TEX_COMMON}\n]}\n\"\"\"\nPluginWidget = PluginWidget.replace('{TEX_COMMON}', TexCommonParams.PluginWidget)\n","sub_path":"plugins/texture/TexNoise.py","file_name":"TexNoise.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197662928","text":"'''\nWritten by Debojit Kaushik (Timestamp)\n Animal Shelter: An animal shelter, which holds only dogs and cats, operates on a strictly\"first in, first\n out\" basis. People must adopt either the \"oldest\" (based on arrival time) of all animals at the shelter,\n or they can select whether they would prefer a dog or a cat (and will receive the oldest animal of\n that type). They cannot select which specific animal they would like. Create the data structures to\n maintain this system and implement operations such as enqueue, dequeueAny, dequeueDog,\n and dequeueCat. You may use the built-in Linked List data structure.\n'''\nimport os\nimport sys\nimport traceback\n\n\nclass AnimalShelter:\n\n class Node:\n def __init__(self, data):\n self.next = None\n self.animal = data\n\n def __init__(self, l):\n self.qu = None\n self.head = None\n\n def enqu(self, data):\n try:\n node = self.Node(data)\n if self.head is None:\n self.head = node\n else:\n node.next = self.head\n self.head = node\n print(self.head.animal, self.head.next)\n except Exception:\n print(traceback.format_exc())\n\n def dequAny(self):\n try:\n if self.head is None:\n raise ValueError(\"Queue is empty.\")\n else:\n qu = self.head\n while qu.next.next is not None:\n qu = qu.next\n data = qu.next\n qu.next = None\n return data\n except Exception:\n print(traceback.format_exc())\n\n def dequDog(self):\n try:\n assert self.head\n qu = self.head\n dogNode = None\n dogPrev = None\n while qu:\n if qu.animal is 'dog':\n dogNode = qu\n dogPrev = qu\n else:\n pass\n print(qu.animal)\n qu = qu.next\n dogPrev.next = dogNode.next\n return dogNode\n except AssertionError:\n print(\"Head is empty. Create a queue first!\")\n except Exception:\n print(traceback.format_exc())\n\n def dequCat(self):\n try:\n assert self.head\n qu = self.head\n catNode = None\n catPrev = None\n while qu:\n if qu.next.animal is 'cat':\n catNode = qu.next\n catPrev = qu\n qu = qu.next\n catPrev.next = catNode.next\n return catNode\n except AssertionError:\n print(\"Head is empty. Create a queue first!\")\n except Exception:\n print(traceback.format_exc())\n\n def showList(self):\n try:\n a = self.head\n while a is not None:\n print(a.animal)\n a = a.next\n except Exception:\n print(traceback.format_exc())","sub_path":"code_quests/animalShelter.py","file_name":"animalShelter.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"430150557","text":"import csv\nimport pandas as pd\nimport numpy as np\n#import pandas as pd\nimport csv\n#import numpy as np\nimport matplotlib.pyplot as plt\nimport heapq\n\n\n\n#function to preprocess data\ndef preprocessing():\n #read csv file\n csvReader = csv.reader(open('imdb.csv'), delimiter=',')\n headers = next(csvReader) # list of headers\n #open new csv file to write\n with open('topProcess.csv', 'w', newline='') as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',')\n csvWriter.writerow(headers)\n for row in csvReader:\n try:\n #if values in column imdbRating is not integer or float remove that column\n if float(row[5]):\n csvWriter.writerow(row)\n except:\n continue\n#function to get top 10 movies\ndef top10Movies():\n df = pd.read_csv('topProcess.csv', usecols=['title', 'imdbRating'])\n top = df.sort_values(by='imdbRating', ascending=False).head(10)\n print(top)\n\n#function to print which country watches which genre most\ndef genrePercentageCountry():\n\n csvReader = csv.reader(open('movie_metadata.csv', newline=''), delimiter=',')\n headers = next(csvReader)\n dict = {}\n count = 0\n key = ''\n for row in csvReader:\n #listOfGeneres is separated by '|'. Split it to make a list\n listOfGeneres = row[9].split('|')\n country = row[20]\n #create a dictionary data structure with key as country and genre and values with count of that genre\n for genres in listOfGeneres:\n # print(genres)\n key = country + ',' + genres\n # print(key)\n if key in dict:\n dict[key] = dict[key] + 1\n else:\n dict[key] = count + 1\n\n dictKeys = dict.keys()\n dictValues = dict.values()\n\n writeHeader = ['country', 'generes', 'count']\n #write this dictionary in csv file\n with open('dictGenre.csv', 'w', newline='') as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',')\n csvWriter.writerow(writeHeader)\n # rowWrite = []\n for k, v in dict.items():\n kSplit = k.split(',')\n kSplit.append(v)\n # print(kSplit)\n csvWriter.writerow(kSplit)\n #read csv file with pandas\n df = pd.read_csv('dictGenre.csv')\n\n idx = df.groupby(['country'])['count'].transform(max) == df['count']\n grp = df[idx]\n print(grp)\n a = df.groupby(['country'])[['country', 'count', 'generes']].sum().reset_index()\n\n #print(a)\n grpList = grp.values.T.tolist()\n aList = a.values.T.tolist()\n percentageList = []\n for i in range(0, len(grpList[0])):\n for j in range(0, len(aList)):\n if grpList[0][i] == aList[0][j]:\n percentage = (grpList[2][i] / aList[1][j]) * 100\n percentageList.append(percentage)\n\n x = grpList[1]\n\n\n N = len(x)\n ind = np.arange(N)\n y = percentageList\n # print(y)\n width = 0.35\n\n #plt.bar(ind + width, y, width, color='g')\n #plt.show()\n#function to preprocess data which removes rows having critic recviews column null\ndef preProcessCritic():\n csvReader = csv.reader(open('movie_metadata.csv'), delimiter=',')\n headers = next(csvReader) # list of headers\n #write in new csv file after deleting bad records\n with open('criticProcess.csv', 'w', newline='') as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',')\n csvWriter.writerow(headers)\n for row in csvReader:\n try:\n if row[2]:\n csvWriter.writerow(row)\n except:\n continue\n\n#function to show critic reviews of a movie\ndef criticReviews():\n reader = open('criticProcess.csv', newline='')\n csvReader = csv.reader(reader)\n headers = next(csvReader)\n\n movieName = input(\"Enter movie name to view critic review number \\n\")\n movieName = movieName.strip()\n movieName = movieName.lower()\n\n flag = 0\n numerOfReviews = 0\n for row in csvReader:\n if movieName == row[11].strip().lower():\n numerOfReviews = row[2]\n print(\"Number of critic reviews for a movie are: \", numerOfReviews)\n flag = 1\n break\n if flag == 0:\n print(\"Movie not found\")\n\n totalReviews = 0\n criticList = []\n reader.seek((0))\n next(csvReader)\n for values in csvReader:\n totalReviews = totalReviews + int(values[2])\n criticList.append(int(values[2]))\n\n percentage = (int(numerOfReviews) / totalReviews) * 100\n topFive = heapq.nlargest(5, criticList)\n print(\"Top 5 number of critic reviews are: \", topFive)\n labels = []\n topFive.append(int(numerOfReviews))\n labels = [\"Top\", \"Two\", \"Three\", \"Four\", \"Five\", movieName]\n # print(labels)\n sizes = topFive\n explode = (0, 0, 0, 0, 0, 0.1)\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.savefig(\"criticPie.png\")\n plt.show()\n\n#preprocessing function to delete rows where year of release is null\ndef releasePreProcess():\n csvReader = csv.reader(open('movie_metadata.csv'), delimiter=',')\n headers = next(csvReader) # list of headers\n with open('releaseProcess.csv', 'w', newline='') as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',')\n csvWriter.writerow(headers)\n for row in csvReader:\n try:\n if row[23]:\n csvWriter.writerow(row)\n except:\n continue\n\n#function to show how many movies released per year\ndef movieRelease():\n reader = open('releaseProcess.csv', newline='')\n csvReader = csv.reader(reader)\n headers = next(csvReader)\n dict = {}\n key = ''\n count = 0\n for row in csvReader:\n # print(row[23])\n key = row[23]\n # print(key)\n if key in dict:\n dict[key] = dict[key] + 1\n else:\n dict[key] = count + 1\n x = []\n y = []\n for key in dict:\n\n value = dict[key]\n key = int(key)\n v = int(value)\n x.append(key)\n y.append(v)\n\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of movies released\")\n\n\n plt.plot(x, y, \"o\")\n plt.savefig(\"movieReleased.png\")\n plt.show()\n\n\n\nif __name__ == '__main__':\n f = csv.writer(open(\"finalIMDB.csv\",\"wb\"))\n print(\"This application can perform following tasks. Please select one by entering number \\n\")\n choice = int(input(\"1. Top 10 movies by rating \\n2. Which country watches which genre most \\n\"\n \"3. Number of critic reviews for a movie \\n\"\n \"4. How many movies released per year \\n\"))\n if choice == 1:\n preprocessing()\n top10Movies()\n if choice == 2:\n genrePercentageCountry()\n if choice == 3:\n preProcessCritic()\n criticReviews()\n if choice == 4:\n releasePreProcess()\n movieRelease()\n\n","sub_path":"CSC 550 Project1 student ID 86408/recommendationSystem.py","file_name":"recommendationSystem.py","file_ext":"py","file_size_in_byte":7052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546276583","text":"import cv2 as cv2\r\nimport socket\r\nfrom datetime import date\r\nfrom datetime import datetime\r\nfrom google.cloud import storage\r\nfrom firebase import firebase\r\nimport os\r\nimport time\r\nREMOTE_SERVER = \"www.google.com\"\r\n\r\ndef is_connected(hostname):\r\n try:\r\n # see if we can resolve the host name -- tells us if there is\r\n # a DNS listening\r\n host = socket.gethostbyname(hostname)\r\n # connect to the host -- tells us if the host is actually\r\n # reachable\r\n s = socket.create_connection((host, 80), 2)\r\n s.close()\r\n return True\r\n except:\r\n pass\r\n return False\r\n\r\ndef convert(s): \r\n \r\n # initialization of string to \"\" \r\n new = \"\" \r\n \r\n # traverse in the string \r\n for x in s: \r\n new += x \r\n \r\n # return string \r\n return new \r\n\r\nf=is_connected(REMOTE_SERVER)\r\n\r\n\r\n\r\nif f== False or True:\r\n\r\n \r\n dt = datetime.now()\r\n #datetime.strftime(time)\r\n \r\n #print(\"Today's date:\", dt)\r\n #print('Formatted DateTime', dt.strftime(\"%m/%d/%y %H:%M:%S\"))\r\n user_name=\"user\"+dt.strftime(\"%m-%d-%y %H.%M.%S\")\r\n print(\"user_name=\",user_name)\r\n #print(type(user_name))\r\n camera = cv2.VideoCapture(0)\r\n return_value, image = camera.read()\r\n \r\n path='D:/downloads/torrent/Secret/'+user_name+'.jpg'\r\n cv2.imwrite(path,image)\r\n cv2.destroyAllWindows()\r\n camera.release()\r\n \r\n \t\r\n\r\n if f==True:\r\n \r\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"userdatarecord-91b776c797f3.json\"\r\n firebase = firebase.FirebaseApplication('https://userdatarecord.firebaseio.com/')\r\n client = storage.Client()\r\n bucket = client.get_bucket('userdatarecord.appspot.com')\r\n imagePath = path\r\n imageBlob = bucket.blob(user_name+\".jpg\")\r\n imageBlob.upload_from_filename(imagePath)\r\n \r\n while(True):\r\n import pygetwindow as gw\r\n k=gw.getAllTitles()\r\n s=convert(k)\r\n f=open(user_name+\"details.txt\",\"w+\")\r\n f.write(s)\r\n time.sleep(5)\r\n blob2 = bucket.blob(user_name+'details.txt')\r\n outfile=user_name+\"details.txt\"\r\n blob2.upload_from_filename(outfile)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"work/hel.pyw","file_name":"hel.pyw","file_ext":"pyw","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"383088121","text":"\n\n# שלב 1: כרגע הצלחתי לגשת לקובץ ולהדפיס אותו\n\n#שלב 2: המטרה כרגע תהיה ��אסוף את כל הלינקים ולרכז אותם לקבוץ אחד\n\n#שלב 3: על מנת שהספיידר יוכל לזחול על הלינקים יש להמיר אותם \n#utf-8\n#https://www.webatic.com/url-convertor\n#http://www.ask-tal.co.il/בלעדיות\n#http://www.ask-tal.co.il/%D7%91%D7%9C%D7%A2%D7%93%D7%99%D7%95%D7%AA\n\n#utf-8 decode\n#https://stackoverflow.com/questions/52470518/parsing-xml-file-using-python3-and-beautifulsoup\n\n\"\"\"\nThis program get xml map and break it into links to a text file\n1.grab an xml site map\n\n\"\"\"\n\n\nfrom bs4 import BeautifulSoup\n\n\n#Project Start\nprint('Start Scraping XML\\n')\n\n\n\n#Open the file and eneter it to soap object\nwith open(\"sitemap.xml\") as fp:\n soup = BeautifulSoup(fp,'xml')\n\n#print(soup.prettify())\n\n#Find all the links\n\nlinks_collection = soup.find_all(\"loc\")\n\n#now i got all the links and i need to throw them to a file\n\n\n#after throw them to a file do some string manipulation\n\n#print(links_collection[0])\n\n#print(type(links_collection))\n\n#Open a file.\nf = open(\"sitemap.txt\", \"w\")\n\n\n#get all the link and enter them to a file.\nfor line in links_collection:\n #print(line.get_text())\n f.write(line.get_text()+ '\\n')\nf.close()\n\n#open and read the file after the appending:\nf = open(\"sitemap.txt\", \"r\")\nprint(f.read())\n\n\n\n\n\n\n\n\n","sub_path":"site_map_scraper.py","file_name":"site_map_scraper.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288480916","text":"import setuptools\nfrom os.path import join, dirname, relpath\nimport glob\nfrom fmridenoise.pipelines import get_pipelines_paths\nfrom fmridenoise.parcellation import get_parcelation_file_path, get_distance_matrix_file_path\nfrom fmridenoise.utils.templates import get_all_templates\nfrom itertools import chain\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ndef get_requirements() -> list:\n return ['nibabel>=2.0',\n 'seaborn>=0.9.0',\n 'numpy>=1.11',\n 'nilearn>=0.4.0',\n 'pandas>=0.19',\n 'jsonschema>=3.0.1',\n 'traits>=5.0.0', \n 'nipype>=1.2.0',\n 'sklearn>=0.0',\n 'pydot>=1.4.1',\n 'pybids>=0.9.1',\n 'jinja2>=2.10.1']\n\ndef relative_paths(paths: list) -> list:\n return [ relpath(path, join(dirname(__file__), 'fmridenoise')) for path in paths ]\n\nparcelation_path = [get_parcelation_file_path(), get_distance_matrix_file_path()]\ntest = list(chain(relative_paths(get_pipelines_paths()), \n relative_paths(parcelation_path),\n relative_paths(get_all_templates())))\nsetuptools.setup(\n name=\"fmridenoise\",\n version=\"0.1.5\",\n author=\"Karolina Finc, Mateusz Chojnowski, Kamil Bona\",\n author_email=\"karolinafinc@gmail.com, zygfrydwagner@gmail.com, kongokou@gmail.com\",\n description=\"fMRIDenoise - automated denoising, denoising strategies comparison, and functional connectivity data quality control.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/nbraingroup/fmridenoise\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n packages=setuptools.find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\", \"*tests*\"]),\n install_requires=get_requirements(),\n package_data={'fmridenoise': list(chain(relative_paths(get_pipelines_paths()), \n relative_paths(parcelation_path),\n relative_paths(get_all_templates()))),\n '.': ['README.md', 'LICENSE']},\n scripts=['fmridenoise/scripts/fmridenoise']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479867232","text":"from setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nimport os\nimport sys\nimport setuptools\nfrom pip import locations\n\n\n# Determine whether the install is a user install\nis_user_install = \"--user\" in sys.argv[1:] \n\n# create string-like object that is evaluated \n# after pybind11 has been installed\nclass get_pybind_include(object):\n\n def __str__(self):\n pybind_include = os.path.dirname(locations.distutils_scheme('pybind11',is_user_install)['headers'])\n return pybind_include\n\n\next_modules = [\n Extension(\n 'pbtest',\n ['py/main.cpp'],\n include_dirs=[\n # Path to pybind11 headers\n get_pybind_include()\n ],\n language='c++',\n ),\n]\n\n# As of Python 3.6, CCompiler has a `has_flag` method.\n# cf http://bugs.python.org/issue26689\ndef has_flag(compiler, flagname):\n \"\"\"Return a boolean indicating whether a flag name is supported on\n the specified compiler.\n \"\"\"\n import tempfile\n with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:\n f.write('int main (int argc, char **argv) { return 0; }')\n try:\n compiler.compile([f.name], extra_postargs=[flagname])\n except setuptools.distutils.errors.CompileError:\n return False\n return True\n\ndef cpp_flag(compiler):\n \"\"\"Return the -std=c++[11/14] compiler flag.\n\n The c++14 is prefered over c++11 (when it is available).\n \"\"\"\n if has_flag(compiler, '-std=c++14'):\n return '-std=c++14'\n elif has_flag(compiler, '-std=c++11'):\n return '-std=c++11'\n else:\n raise RuntimeError('Unsupported compiler -- at least C++11 support is needed!')\n\n\nclass BuildExt(build_ext):\n \"\"\"A custom build extension for adding compiler-specific options.\"\"\"\n c_opts = {\n 'msvc': ['/EHsc'],\n 'unix': [],\n }\n\n if sys.platform == 'darwin':\n c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']\n\n def build_extensions(self):\n ct = self.compiler.compiler_type\n opts = self.c_opts.get(ct, [])\n if ct == 'unix':\n opts.append(cpp_flag(self.compiler))\n if has_flag(self.compiler, '-fvisibility=hidden'):\n opts.append('-fvisibility=hidden')\n for ext in self.extensions:\n ext.extra_compile_args = opts\n build_ext.build_extensions(self)\n\nsetup(\n name='pbtest',\n version='0.0.1',\n author='Sylvain Corlay',\n author_email='sylvain.corlay@gmail.com',\n url='https://github.com/pybind/pbtest',\n description='A test project using pybind11',\n long_description='',\n ext_modules=ext_modules,\n install_requires=['pybind11'],\n cmdclass={'build_ext': BuildExt},\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"177201315","text":"# -*- coding: utf-8 -*-\nimport os\n\nfrom bottle import get, post, request, run, hook, template, route\nimport subprocess\n\nfrom howdoi import howdoi\nimport sys\n# Test this command in a dos window if you are having trouble.\nHOW_DO_I_COMMAND = 'python -m howdoi.howdoi'\n\n\n@hook('before_request')\ndef strip_path():\n request.environ['PATH_INFO'] = request.environ['PATH_INFO'].rstrip('/')\n \n \n@route('/h/')\ndef h_handler():\n \"\"\"\n Example:\n /howdoi open file python\n \"\"\"\n text = query\n # adding default params\n args = {\n 'query': text.split(),\n 'pos': 1,\n 'all': False,\n 'link': False,\n 'clear_cache': False,\n 'version': False,\n 'num_answers': 1,\n 'color': False,\n }\n\n output = howdoi.howdoi(args)\n return template(output)\n \n@route('/hdi/')\ndef hdi_handler():\n \"\"\"\n Example:\n /howdoi open file python\n \"\"\"\n text = query\n\n # adding default params\n args = {\n 'query': text.split(),\n 'pos': 1,\n 'all': False,\n 'link': False,\n 'clear_cache': False,\n 'version': False,\n 'num_answers': 1,\n 'color': False,\n }\n\n output = howdoi.howdoi(args)\n return output\n\n@post('/howdoi')\ndef howdoi_handler():\n \"\"\"\n Example:\n /howdoi open file python\n \"\"\"\n postdata = request.body.read()\n text = postdata\n\n # adding default params\n args = {\n 'query': text.split(),\n 'pos': 1,\n 'all': False,\n 'link': False,\n 'clear_cache': False,\n 'version': False,\n 'num_answers': 1,\n 'color': False,\n }\n\n output = howdoi.howdoi(args)\n return output\n\n\n@route('/')\ndef index():\n QueryHowDoI(query)\n \n\ndef QueryHowDoI():\n '''\n Kicks off a subprocess to send the 'Query' to HowDoI\n Prints the result, which in this program will route to a gooeyGUI window\n :param Query: text english question to ask the HowDoI web engine\n :return: nothing\n '''\n \n Query = \"Rreverse a string in python\"\n howdoi_command = HOW_DO_I_COMMAND\n full_text_option = ' -a' \n t = subprocess.Popen(howdoi_command + ' \\\"'+ Query + '\\\" -n ' + str(1)+full_text_option, stdout=subprocess.PIPE)\n (output, err) = t.communicate()\n print('{:^88}'.format(Query.rstrip()))\n print('_'*60)\n print(output.decode(\"utf-8\") )\n return template(output.decode(\"utf-8\") )\n exit_code = t.wait()\n \n \nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n debug = os.environ.get('DEBUG', False)\n run(host='0.0.0.0', port=port, debug=debug)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"489093589","text":"# -*- coding: UTF-8 -*-\n\nimport codecs\n\nrenmin_label_map = {\n 'O': 'O',\n 'B_nr': 'B-PER',\n 'M_nr': 'I-PER',\n 'E_nr': 'I-PER',\n 'B_ns': 'B-LOC',\n 'M_ns': 'I-LOC',\n 'E_ns': 'I-LOC',\n 'B_nt': 'B-ORG',\n 'M_nt': 'I-ORG',\n 'E_nt': 'I-ORG'\n}\nrenmin_train_data_path = './renmin_train_data.txt'\nrenmin_train_label_path = './renmin_train_label.txt'\nrenmin_test_data_path = './renmin_test_data.txt'\nrenmin_test_label_path = './renmin_test_label.txt'\n\n\ndef save2file(datas, labels, save_data_path, save_label_path):\n with open(save_data_path, mode='w') as f:\n for data in datas:\n data_str = \" \".join(str(i) for i in data)\n f.write(data_str+'\\n') # write 写入\n\n with open(save_label_path, mode='w') as f:\n for label in labels:\n label_str = \" \".join(str(i) for i in label)\n f.write(label_str+'\\n') # write 写入\n\n\ndef renmin_data_prepare():\n # 训练集与测试集的比例为3:1\n rate = 0.75\n datas = list()\n labels = list()\n input_data = codecs.open('renmin_original.txt', 'r', 'utf-8')\n for line in input_data.readlines():\n line = line.split()\n linedata=[]\n linelabel=[]\n numNotO=0\n for word in line:\n word = word.split('/')\n linedata.append(word[0])\n linelabel.append(renmin_label_map[word[1]])\n if word[1] != 'O':\n numNotO += 1\n if numNotO != 0:\n datas.append(linedata)\n labels.append(linelabel)\n\n input_data.close()\n data_len = len(datas)\n data_slice_index = int(data_len*rate)\n # 保存到文件\n save2file(datas[:data_slice_index], labels[:data_slice_index], renmin_train_data_path, renmin_train_label_path)\n save2file(datas[data_slice_index:], labels[data_slice_index:], renmin_test_data_path, renmin_test_label_path)\n print('人民日报1998 数据长度:', len(datas))\n print('人民日报1998 训练集长度:', len(labels[:data_slice_index]))\n print('人民日报1998 测试集长度:', len(labels[data_slice_index:]))\n\n\nif __name__ == '__main__':\n renmin_data_prepare()\n","sub_path":"BiLSTM-CRF-ChineseNER/dataset/renMinRiBao/renmin_prepare.py","file_name":"renmin_prepare.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571946603","text":"from bluepy import btle\nimport sys\nimport re\nimport datetime\nimport math\n\nclass Aranet4Error(Exception):\n pass\n\nclass Aranet4HistoryDelegate(btle.DefaultDelegate):\n def __init__(self, handle, param):\n btle.DefaultDelegate.__init__(self)\n self.param = param\n self.handle = handle\n self.results = {}\n self.reading = True\n\n def handleNotification(self, handle, data):\n raw = bytearray(data)\n if self.handle != handle:\n print(\"ERROR: invalid handle. Got {:04X}, expected {:04X}\".format(handle, self.handle))\n return\n\n param = raw[0]\n if self.param != param:\n print(\"ERROR: invalid parameter. Got {:02X}, expected {:02X}\".format(param, self.param))\n return\n\n idx = raw[1] + (raw[2] << 8) - 1\n count = raw[3]\n pos = 4\n\n self.reading = count > 0\n\n while count > 0:\n step = 1 if param == Aranet4.PARAM_HUMIDITY else 2\n\n if len(raw) < pos + step:\n print(\"ERROR: unexpected end of data\")\n break\n\n result = self._process(raw, pos, param)\n self.results[idx] = result\n pos += step\n idx += 1\n count -= 1\n\n def _process(self, data, pos, param):\n if param == Aranet4.PARAM_TEMPERATURE:\n return Aranet4.checkReadingValues(Aranet4.PARAM_TEMPERATURE, data[pos] + (data[pos+1] << 8))\n elif param == Aranet4.PARAM_HUMIDITY:\n return Aranet4.checkReadingValues(Aranet4.PARAM_HUMIDITY, data[pos])\n elif param == Aranet4.PARAM_PRESSURE:\n return Aranet4.checkReadingValues(Aranet4.PARAM_PRESSURE, data[pos] + (data[pos+1] << 8))\n elif param == Aranet4.PARAM_CO2:\n return Aranet4.checkReadingValues(Aranet4.PARAM_CO2, data[pos] + (data[pos+1] << 8))\n return None\n\nclass Aranet4:\n # Param IDs\n PARAM_TEMPERATURE = 1\n PARAM_HUMIDITY = 2\n PARAM_PRESSURE = 3\n PARAM_CO2 = 4\n\n # Param return value if no data\n AR4_NO_DATA_FOR_PARAM = -1\n\n # Aranet UUIDs and handles\n # Services\n AR4_SERVICE = btle.UUID(\"f0cd1400-95da-4f4b-9ac8-aa55d312af0c\")\n GENERIC_SERVICE = btle.UUID(\"00001800-0000-1000-8000-00805f9b34fb\")\n COMMON_SERVICE = btle.UUID(\"0000180a-0000-1000-8000-00805f9b34fb\")\n\n # Read / Aranet service\n AR4_READ_CURRENT_READINGS = btle.UUID(\"f0cd1503-95da-4f4b-9ac8-aa55d312af0c\")\n AR4_READ_CURRENT_READINGS_DET = btle.UUID(\"f0cd3001-95da-4f4b-9ac8-aa55d312af0c\")\n AR4_READ_INTERVAL = btle.UUID(\"f0cd2002-95da-4f4b-9ac8-aa55d312af0c\")\n AR4_READ_SECONDS_SINCE_UPDATE = btle.UUID(\"f0cd2004-95da-4f4b-9ac8-aa55d312af0c\")\n AR4_READ_TOTAL_READINGS = btle.UUID(\"f0cd2001-95da-4f4b-9ac8-aa55d312af0c\")\n\n # Read / Generic servce\n GENERIC_READ_DEVICE_NAME = btle.UUID(\"00002a00-0000-1000-8000-00805f9b34fb\")\n\n # Read / Common servce\n COMMON_READ_MANUFACTURER_NAME = btle.UUID(\"00002a29-0000-1000-8000-00805f9b34fb\")\n COMMON_READ_MODEL_NUMBER = btle.UUID(\"00002a24-0000-1000-8000-00805f9b34fb\")\n COMMON_READ_SERIAL_NO = btle.UUID(\"00002a25-0000-1000-8000-00805f9b34fb\")\n COMMON_READ_HW_REV = btle.UUID(\"00002a27-0000-1000-8000-00805f9b34fb\")\n COMMON_READ_SW_REV = btle.UUID(\"00002a28-0000-1000-8000-00805f9b34fb\")\n COMMON_READ_BATTERY = btle.UUID(\"00002a19-0000-1000-8000-00805f9b34fb\")\n\n # Write / Aranet service\n AR4_WRITE_CMD= btle.UUID(\"f0cd1402-95da-4f4b-9ac8-aa55d312af0c\")\n\n # Subscribe / Aranet service\n AR4_SUBSCRIBE_HISTORY = 0x0032\n AR4_NOTIFY_HISTORY = 0x0031\n\n def __init__(self, address):\n if not re.match(\"[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\", address.lower()):\n raise Aranet4Error(\"Invalid device address\")\n\n self.address = address\n self.device = btle.Peripheral(address, btle.ADDR_TYPE_RANDOM)\n\n # This will not work. bluez returns up to 20 bytes per notification and rest of data is never received.\n # self.device.setMTU(247)\n\n # While in CO2 calibration mode Aranet4 did not take new measurements and stores Magic numbers in measurement history.\n # Here are history data converted with checking for Magic numbers.\n @staticmethod\n def checkReadingValues(metric, value):\n if value == None:\n return Aranet4.AR4_NO_DATA_FOR_PARAM\n\n if metric == Aranet4.PARAM_CO2:\n if (value & 0x8000) == 0x8000:\n return Aranet4.AR4_NO_DATA_FOR_PARAM\n elif metric == Aranet4.PARAM_TEMPERATURE:\n if value == 0x4000:\n return Aranet4.AR4_NO_DATA_FOR_PARAM\n elif value > 0x8000:\n # Negative temperatures are out of Aranet4 operating temperature range however device can return negative temperatures\n # return ((0xFFFF - value) * (-1)) / 20.0\n # For temperatures below 0 degrees return 0\n return 0\n else:\n return value / 20.0\n elif metric == Aranet4.PARAM_PRESSURE:\n if (value & 0x8000) == 0x8000:\n return Aranet4.AR4_NO_DATA_FOR_PARAM\n else:\n return value / 10.0\n elif metric == Aranet4.PARAM_HUMIDITY:\n if (value & 0x80) == 0x80:\n return Aranet4.AR4_NO_DATA_FOR_PARAM\n\n return value\n\n def currentReadings(self, details=False):\n readings = {\"temperature\": None, \"humidity\": None, \"pressure\": None, \"co2\": None, \"battery\": -1, \"ago\": -1, \"interval\": -1}\n s = self.device.getServiceByUUID(self.AR4_SERVICE)\n if details:\n c = s.getCharacteristics(self.AR4_READ_CURRENT_READINGS_DET)\n else:\n c = s.getCharacteristics(self.AR4_READ_CURRENT_READINGS)\n\n b = bytearray(c[0].read())\n\n readings[\"co2\"] = Aranet4.checkReadingValues(self.PARAM_CO2, self.le16(b, 0))\n readings[\"temperature\"] = Aranet4.checkReadingValues(self.PARAM_TEMPERATURE, self.le16(b, 2))\n readings[\"pressure\"] = Aranet4.checkReadingValues(self.PARAM_PRESSURE, self.le16(b, 4))\n readings[\"humidity\"] = Aranet4.checkReadingValues(self.PARAM_HUMIDITY, b[6])\n readings[\"battery\"] = b[7]\n\n if details:\n readings[\"interval\"] = self.le16(b, 9)\n readings[\"ago\"] = self.le16(b, 11)\n\n return readings\n\n def getInterval(self):\n s = self.device.getServiceByUUID(self.AR4_SERVICE)\n c = s.getCharacteristics(self.AR4_READ_INTERVAL)\n return self.le16(c[0].read())\n\n def getName(self):\n s = self.device.getServiceByUUID(self.GENERIC_SERVICE)\n c = s.getCharacteristics(self.GENERIC_READ_DEVICE_NAME)\n return c[0].read().decode(\"utf-8\")\n\n def getVersion(self):\n s = self.device.getServiceByUUID(self.COMMON_SERVICE)\n c = s.getCharacteristics(self.COMMON_READ_SW_REV)\n return c[0].read().decode(\"utf-8\")\n\n def getLastMeasurementDate(self, epoch=False):\n ago = self.getSecondsSinceUpdate()\n last = datetime.datetime.utcnow().replace(microsecond=0) - datetime.timedelta(seconds=ago)\n\n if epoch:\n return (last - datetime.datetime(1970,1,1)).total_seconds()\n else:\n return last\n\n def pullTimedInRange(self, start, end, params=\"thpc\"):\n last = self.getLastMeasurementDate(False)\n total = self.getTotalReadings()\n interval = self.getInterval()\n\n startAgo = math.ceil((last - start).total_seconds() / interval)\n endAgo = math.ceil((last - end).total_seconds() / interval)\n\n startIdx = int(total - startAgo)\n endIdx = int(total - endAgo)\n\n # swap\n if (startIdx > endIdx):\n startIdx, endIdx = endIdx, startIdx\n\n if endIdx < 1:\n return [] # range doesn't contain any records\n\n if endIdx > total:\n endIdx = total\n\n if startIdx < 1:\n startIdx = 1\n\n return self.pullTimedHistory(startIdx, endIdx, params, total)\n\n def pullTimedHistory(self, start=0x0001, end=0xFFFF, params=\"thpc\", total=False):\n interval = self.getInterval()\n\n if not total:\n total = self.getTotalReadings()\n\n # last measurement, epoch\n last = self.getLastMeasurementDate(True)\n\n resultsCO2 = {}\n resultsT = {}\n resultsP = {}\n resultsH = {}\n\n if \"c\" in params:\n resultsCO2 = self.pullHistory(self.PARAM_CO2, start, end)\n\n if \"t\" in params:\n resultsT = self.pullHistory(self.PARAM_TEMPERATURE, start, end)\n\n if \"p\" in params:\n resultsP = self.pullHistory(self.PARAM_PRESSURE, start, end)\n\n if \"h\" in params:\n resultsH = self.pullHistory(self.PARAM_HUMIDITY, start, end)\n\n results = []\n\n for i in range(start,end):\n delta = (total - (i + 1)) * interval\n epoch = last - delta\n r = {\n \"id\": i,\n \"time\": epoch,\n \"temperature\": resultsT.get(i, self.AR4_NO_DATA_FOR_PARAM),\n \"pressure\": resultsP.get(i, self.AR4_NO_DATA_FOR_PARAM),\n \"humidity\": resultsH.get(i, self.AR4_NO_DATA_FOR_PARAM),\n \"co2\": resultsCO2.get(i, self.AR4_NO_DATA_FOR_PARAM)\n }\n results.append(r)\n\n return results\n\n def pullHistory(self, param, start=0x0001, end=0xFFFF):\n start = start + 1\n if start < 1:\n start = 0x0001\n\n val = bytearray.fromhex(\"820000000100ffff\")\n val[1] = param\n self.writeLE16(val, 4, start)\n self.writeLE16(val, 6, end)\n\n s = self.device.getServiceByUUID(self.AR4_SERVICE)\n c = s.getCharacteristics(self.AR4_WRITE_CMD)\n rsp = c[0].write(val, True)\n\n # register delegate\n delegate = Aranet4HistoryDelegate(self.AR4_NOTIFY_HISTORY, param)\n self.device.setDelegate(delegate)\n\n rsp = self.device.writeCharacteristic(self.AR4_SUBSCRIBE_HISTORY, bytearray([1,0]), True)\n\n timeout = 3\n while timeout > 0 and delegate.reading:\n if self.device.waitForNotifications(1.0):\n continue\n timeout -= 1\n\n return delegate.results\n\n def getSecondsSinceUpdate(self):\n s = self.device.getServiceByUUID(self.AR4_SERVICE)\n c = s.getCharacteristics(self.AR4_READ_SECONDS_SINCE_UPDATE)\n return self.le16(c[0].read())\n\n def getTotalReadings(self):\n s = self.device.getServiceByUUID(self.AR4_SERVICE)\n c = s.getCharacteristics(self.AR4_READ_TOTAL_READINGS)\n return self.le16(c[0].read())\n\n def le16(self, data, start=0):\n raw = bytearray(data)\n return raw[start] + (raw[start+1] << 8)\n\n def writeLE16(self, data, pos, value):\n data[pos] = (value) & 0x00FF\n data[pos+1] = (value >> 8) & 0x00FF\n\n def dbgPrintChars(self):\n for s in self.device.getServices():\n print(s)\n for c in s.getCharacteristics():\n print(\" --> \", c)\n","sub_path":"aranet4/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":11183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284901801","text":"import random\r\ndef numba():\r\n count = 0\r\n random_number = random.randrange(1, 6)\r\n while count < 3:\r\n userGuess = input('Guess number 1 to 5: ')\r\n # if count < 3: # you don't need this, you already have while loop.\r\n if int(userGuess) == random_number:\r\n print(f'''Yaay you guessed right \r\n The secret number is {random_number}''')\r\n # User guessed it right! Stop the while loop.\r\n break\r\n elif int(userGuess) < random_number:\r\n print(f'''HINT\r\n Guessed number is less than secret number {random_number}''')\r\n elif int(userGuess) > random_number:\r\n print(f'''HINT\r\n Guessed number is more than secret number {random_number}''')\r\n else:\r\n print('so close yet so far!!')\r\n count +=1\r\nnumba()","sub_path":"guessalt.py","file_name":"guessalt.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209942347","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom NEmusic.items import Song\nfrom NEMbox.api import NetEase,encrypted_request\n\nimport hashlib\n\n\nclass fakePipeline(object):\n def __init__(self):\n self.mysession = NetEase()\n user = \"xhm900119@163.com\"\n pw = \"19900119\"\n pw_ = hashlib.md5(pw.encode('utf-8')).hexdigest()\n self.mysession.login(user, pw_)\n\n def fake(self, sid):\n url = 'http://music.163.com/weapi/feedback/weblog?csrf_token='\n text = {\n 'data': {\n 'logs': {\n 'action': \"play\",\n 'json': {\"type\": \"song\",\n \"wifi\": 0,\n \"download\": 0,\n \"id\": sid,\n \"time\": 600,\n \"end\": \"ui\",\n \"source\": \"list\",\n \"sourceId\": \"576900073\"}\n }\n }\n }\n data = encrypted_request(text)\n self.mysession.session.post(url=url, data=data)\n\n def process_item(self, item, spider):\n if item.__class__ == Song:\n self.fake(item[\"sid\"])\n return item\n","sub_path":"NEmusic/NEmusic/fake_pipeline.py","file_name":"fake_pipeline.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"292361836","text":"def gcd(a,b):\n '''\n Нахадит наименьший общий делитеть для чисел a и b при помощи алгоритма Евклида.\n Возвращает нод цисел.\n '''\n\n if a == b:\n return a\n elif a > b:\n return gcd(a-b, b)\n else: # a < b\n return gcd(b-a, a)\n\ndef gcd_new(a,b):\n if b == 0:\n return a\n else:\n return gcd_new(b, a%b)\n\ndef gcd_short(a,b):\n return a if b == 0 else gcd_short(b, a%b)\n\ndef test_gcd():\n test = gcd_short(7,3)\n\n if test == 1:\n print(\"Ok\")\n else:\n print(\"Fail\")\n\ntest_gcd()","sub_path":"euclideian_algorithm.py","file_name":"euclideian_algorithm.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"647022500","text":"from graphql.language import ast\n\nfrom ..query import QueryVisitor, Field\n\n\ndef _name(value):\n return ast.NameNode(value=value) if value is not None else None\n\n\ndef _encode(value):\n if value is None:\n return ast.NullValueNode()\n elif isinstance(value, bool):\n return ast.BooleanValueNode(value=value)\n elif isinstance(value, int):\n return ast.IntValueNode(value=str(value))\n elif isinstance(value, float):\n return ast.FloatValueNode(value=str(value))\n elif isinstance(value, str):\n return ast.StringValueNode(value=value)\n elif isinstance(value, list):\n return ast.ListValueNode(values=[_encode(v) for v in value])\n elif isinstance(value, dict):\n return ast.ObjectValueNode(fields=[\n ast.ObjectFieldNode(name=_name(key), value=_encode(val))\n for key, val in value.items()\n ])\n else:\n raise TypeError('Unsupported type: {!r}'.format(value))\n\n\nclass Exporter(QueryVisitor):\n\n def visit_field(self, obj: Field):\n arguments = None\n if obj.options:\n arguments = [ast.ArgumentNode(name=_name(key), value=_encode(val))\n for key, val in obj.options.items()]\n return ast.FieldNode(\n name=_name(obj.name),\n alias=_name(obj.alias),\n arguments=arguments,\n )\n\n def visit_link(self, obj):\n arguments = None\n if obj.options:\n arguments = [ast.ArgumentNode(name=_name(key), value=_encode(val))\n for key, val in obj.options.items()]\n return ast.FieldNode(\n name=_name(obj.name),\n alias=_name(obj.alias),\n arguments=arguments,\n selection_set=self.visit(obj.node),\n )\n\n def visit_node(self, obj):\n return ast.SelectionSetNode(\n selections=[self.visit(f) for f in obj.fields],\n )\n\n\ndef export(query):\n return ast.DocumentNode(definitions=[\n ast.OperationDefinitionNode(\n operation=ast.OperationType.QUERY,\n selection_set=Exporter().visit(query),\n )\n ])\n","sub_path":"hiku/export/graphql.py","file_name":"graphql.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"9691013","text":"#!/usr/bin/env python\n\nimport struct\nimport protobuf_wrapper\nfrom common import *\n\nclass Request(object):\n\n def __init__(self):\n self.header = None\n self.body = None\n self.client_expired = False\n self.socket = None\n self.addr = None\n self.is_valid = False\n\n def SetProtobufMessageHandler(self, pb_msg_handler):\n self.pb_msg_handler = pb_msg_handler\n\n def SetHeaderClass(self, header_class):\n self.header_class = header_class\n\n def ParseHeader(self, raw_header):\n self.header = self.header_class()\n self.header.ParseFromString(raw_header)\n self.raw_header = raw_header\n\n def ParseBody(self, raw_body):\n if self.header.is_valid:\n self.body = self.pb_msg_handler.ParseFromString(self.header.pb_msg_type, raw_body)\n self.raw_body = raw_body\n if self.body:\n self.is_valid = True\n\n def encode(self, apid, pb_msg_type, pb_msg_metadata):\n pb_message = self.pb_msg_handler.ParseFromMetadata(pb_msg_type, pb_msg_metadata)\n raw_body = pb_message.SerializeToString()\n header = Header(pb_msg_type, len(raw_body), apid)\n raw_header = header.SerializeToString()\n return raw_header, raw_body\n\n\nclass Header(object):\n\n def __init__(self, pb_msg_type=None, body_legnth=None, apid=None):\n self.pb_msg_type = pb_msg_type\n self.body_length = body_legnth\n self.apid = apid\n self.is_valid = False\n\n def ParseFromString(self, serialized_str):\n try:\n self.pb_msg_type, self.body_length, self.apid = struct.unpack(REQUEST_HEADER_FMT, serialized_str) \n self.is_valid = True\n return True\n except struct.error:\n self.is_valid = False\n return False\n\n def SerializeToString(self):\n try:\n return struct.pack(REQUEST_HEADER_FMT, self.pb_msg_type, self.body_length, self.apid)\n except struct.error:\n return None\n\n","sub_path":"server/libs/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155861561","text":"import pygame\nfrom classes.Screen import *\nfrom classes.Button import *\nimport sys\n\nclass GameOverScreen(Screen):\n def __init__(self, gm):\n super().__init__()\n self.mainmenu_button = Button(50, 100, 200, 50, \"Main Menu\", gm)\n self.quit_button = Button(300, 100, 200, 50, 'Quit Game', gm)\n\n def run(self,gm):\n\n click = False\n running = True\n pygame.mixer.Channel(0).play(pygame.mixer.Sound(\"sounds/Death.ogg\"))\n\n while running:\n \n gm.clock.tick(60)\n gm.fake_screen.fill((0,0,0))\n self.draw_text('Game Over', gm.font_big, (255, 255, 255), gm.fake_screen, (960,350))\n self.draw_text('Score: {}'.format(int(gm.game_screen.score)), gm.font_med, (255, 255, 255), gm.fake_screen, (960,550))\n \n mx, my = gm.mousepos\n \n #Avalia botão quit\n if self.quit_button.contour.collidepoint((mx, my)):\n self.quit_button.draw(True,gm)\n if click:\n gm.actual_screen = gm.quit_confirmation_screen\n running = False\n else:\n self.quit_button.draw(False,gm)\n \n #Avalia botão menu\n if self.mainmenu_button.contour.collidepoint((mx, my)):\n self.mainmenu_button.draw(True, gm)\n if click:\n gm.game_screen.new_game(gm)\n gm.actual_screen = gm.menu_screen\n running = False\n else:\n self.mainmenu_button.draw(False, gm)\n\n\n click = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gm.game_screen.onCleanup(gm)\n \n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n gm.actual_screen = gm.quit_confirmation_screen\n running = False\n \n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n \n self.run_screen(gm)\n \n ","sub_path":"src/classes/GameOverScreen.py","file_name":"GameOverScreen.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"203878263","text":"import os\r\nimport sys\r\n\r\nsys.path.append(os.path.join(os.path.dirname(__file__), \"ext\"))\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport urllib\r\n\r\nquery = 'linear search in python site:stackoverflow.com'\r\nquery = query.replace(\" \", \"+\")\r\n\r\n\r\nurl = \"https://www.google.com/search?q=\"+query+\"&gbv=1&sei=YwHNVpHLOYiWmQHk3K24Cw\"\r\nprint(url)\r\nrequest = urllib.request.Request(url,headers={'User-Agent':'Sublime Text'})\r\nr = urllib.request.urlopen(request).read()\r\nsoup = BeautifulSoup(r, \"html.parser\")\r\n\r\nfor item in soup.find_all('h3', attrs={'class' : 'r'}):\r\n url = item.a['href'][7:]\r\n break\r\n\r\nresponse = urllib.request.urlopen(url).read()\r\nsoup = BeautifulSoup(response, \"html.parser\")\r\n\r\nfor item in soup.find_all('div', attrs={'class' : 'answer'}):\r\n\ttry:\r\n\t\tprint(item.find('pre').find('code').text)\r\n\t\tbreak\r\n\texcept:\r\n\t\tcontinue","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522379237","text":"# -*- coding: utf-8 -*-\n\n\"\"\"bink_eat URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom bink_eat import views\nfrom engagement.views import WechatPayOperationView\n\nurlpatterns = [\n url(r'^user/', include('account.urls')),\n url(r'^eat/', include('engagement.urls')),\n url(r'^groups/', include('groups.urls')),\n url(r'^deposit/', include('deposit.urls')),\n url(r'^pay/', include('payment.urls')),\n url(r'^qrcode/$', views.BinkQrcodeView.as_view(), name='bink_qrcode'),\n url(r'^test/$', views.TestView.as_view(), name='test'),\n url(r'^about/$', views.AboutView.as_view(), name='about'),\n]\n","sub_path":"bink_eat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"286178381","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/lbusoni/git/palpao/test/types/deformable_mirror_status_test.py\n# Compiled at: 2018-10-01 02:57:02\n# Size of source mod 2**32: 837 bytes\nimport unittest, numpy as np\nfrom palpao.types.deformable_mirror_status import DeformableMirrorStatus\n\nclass DeformableMirrorStatusTest(unittest.TestCase):\n\n def testHappyPath(self):\n numberOfActs = 10\n numberOfModes = 8\n actuatorCommands = np.arange(numberOfActs)\n commandCounter = 42\n status = DeformableMirrorStatus(numberOfActs, numberOfModes, actuatorCommands, commandCounter)\n self.assertEqual(numberOfActs, status.numberOfActuators())\n self.assertEqual(numberOfModes, status.numberOfModes())\n self.assertTrue(np.allclose(actuatorCommands, status.actuatorCommands()))\n self.assertEqual(commandCounter, status.commandCounter())\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/palpao-0.16.0.tar/deformable_mirror_status_test.cpython-36.py","file_name":"deformable_mirror_status_test.cpython-36.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369191937","text":"import unittest\nfrom HTMLTestRunner import HTMLTestRunner\nimport time\nimport os\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom email.mime.multipart import MIMEMultipart\n# import logging\n\n\ndef new_report(test_report):\n\n lists = os.listdir(test_report)\n lists.sort(key=lambda fn: os.path.getmtime(test_report+'\\\\'+fn))\n file_new = os.path.join(test_report, lists[-1])\n return file_new\n\n\ndef send_email(file_new):\n\n username = 'zhangkai@tjmeiteng.com'\n password = '960811kai'\n sender = 'zhangkai@tjmeiteng.com'\n receiver = ['wanghui01@tjmeiteng.com']\n msg = MIMEMultipart('mixed')\n msg['Subject'] = Header(\"自动化测试报告\", 'utf-8')\n msg['From'] = 'zhangkai@tjmeiteng.com'\n msg['To'] = \";\".join(receiver) \n text = \"自动化测试完成,结果请查看附件\"\n text_plain = MIMEText(text, 'plain', 'utf-8') \n msg.attach(text_plain)\n sendfile = open(file_new, 'rb').read()\n text_att = MIMEText(sendfile, 'base64', 'utf-8')\n text_att[\"Content-Type\"] = 'application/octet-stream'\n text_att.add_header('Content-Disposition', 'attachment', filename='TestResult.html')\n msg.attach(text_att) \n smtp = smtplib.SMTP() \n smtp.connect('smtp.tjmeiteng.com')\n smtp.login(username, password) \n smtp.sendmail(sender, receiver, msg.as_string()) \n smtp.quit()\n\n\n# def get_windows_img(self):\n# self.logger = logging.getLogger(__name__)\n# file_path = 'E:\\\\screenshot'\n# rq = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n# screen_name = file_path + rq + '.png'\n# print(screen_name)\n# try:\n# self.driver.get_screenshot_as_file(screen_name)\n# self.logger.info(\"Had take screenshot and save to folder : /screenshots\")\n# except NameError as e:\n# self.logger.error(\"Failed to take screenshot! %s\" % e)\n# self.get_windows_img()\n\n\nif __name__ == \"__main__\": \n test_dir = \"C:\\\\Users\\\\LENOVO\\\\Desktop\\\\python\\\\美腾\\\\zhiyou_guanli\\\\创建正式项目与子项目\"\n test_report = \"E:\\\\html\"\n discover = unittest.defaultTestLoader.discover(test_dir, \n pattern='test_*.py')\n now = time.strftime('%Y-%m-%d_%H_%M_%S_')\n filename = test_report+'\\\\' + now + 'result.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner(stream=fp,\n title=u'测试报告',\n description=u'用例执行情况:')\n runner.run(discover)\n fp.close() \n # 取最新测试报告\n new_report = new_report(test_report)\n # 发送邮件,发送最新测试报告html\n send_email(new_report)\n","sub_path":"zhiyou_guanli/创建正式项目与子项目/sendemail1.py","file_name":"sendemail1.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"608132798","text":"A, B, W = list(map(int, input().split()))\nW *= 1000\ni = 1\nm = float('inf')\nM = -1\nwhile A <= W/i:\n if W/i <= B:\n m = min(m, i)\n M = max(M, i)\n i += 1\n\nif m == float('inf'):\n print('UNSATISFIABLE')\nelse:\n print(m, M)","sub_path":"old/ABC195/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"290850035","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as pyp\nfrom scipy.integrate import odeint\n\n# constants - assuming we are on earth at sea level)\nC = 0.47\nrho = 1.225\ng = 9.81\n# not actually constants but easier to work with\nA = 0.1\nm = 5\n\n# takes initial height, initial velocity, launch angle (in SI units, except theta in degrees)\ndef projectile(y0 = 0, v0 = 5, theta = 30):\n \n theta = theta * np.pi/180\n \n x0 = 0\n # y0 given\n vx0 = v0 * np.cos(theta)\n vy0 = v0 * np.sin(theta)\n w0 = [x0, y0, vx0, vy0]\n \n tf = ((vy0)+np.sqrt((vy0)**2+2*g*y0))/g # Didn't know how else to do it\n t = np.linspace(0, tf, 100)\n \n xvals = np.array([x0,vx0])\n yvals = np.array([y0,vy0])\n \n solvex = odeint(modelx, xvals, t)\n solvey = odeint(modely, yvals, t)\n \n xgraph = solvex[:,0]\n ygraph = solvey[:,0]\n \n xrange = max(solvex[:,0])\n hmax = max(solvey[:,0])\n \n pyp.plot(xgraph, ygraph)\n pyp.title('Trajectory')\n pyp.xlabel('x (m)')\n pyp.ylabel('y (m)')\n pyp.ylim(0,None)\n pyp.show()\n \n return [tf, xrange, hmax]\n\ndef modelx(xvals, t):\n \n xp = xvals[1]\n xpp = -C*rho*A*(xp)**2/(2*m)\n \n return np.array([xp, xpp])\n\ndef modely(yvals, t):\n \n yp = yvals[1]\n ypp = -g - C*rho*A*(yp)**2/(2*m)\n \n return np.array([yp, ypp])\n\n ","sub_path":"project 2/project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156734408","text":"\"\"\" calculating reduced quantity.\n\n reduced_quantity =\n quantity_1 * quantity_2 / (quantity_1 + quantity_2)\n\"\"\"\n\nfrom particula import u\n\n\ndef reduced_quantity(a_quantity, b_quantity):\n \"\"\" Returns the reduced mass of two particles.\n\n Examples:\n ```\n >>> reduced_quantity(1*u.kg, 1*u.kg)\n \n >>> reduced_quantity(1*u.kg, 20*u.kg).m\n 0.9523809523809523\n >>> reduced_quantity(1, 200)\n 0.9950248756218906\n >>> reduced_quantity([1, 2, 3], 200)\n array([0.99502488, 1.98019802, 2.95566502])\n >>> reduced_quantity([1, 2], [200, 300])\n array([0.99502488, 1.98675497])\n ```\n\n Parameters:\n a_quantity (float) [arbitrary units]\n b_quantity (float) [arbitrary units]\n\n Returns:\n (float) [arbitrary units]\n\n A reduced quantity is an \"effective inertial\" quantity,\n allowing two-body problems to be solved as one-body problems.\n \"\"\"\n\n a_q = a_quantity\n b_q = b_quantity\n\n if isinstance(a_q, u.Quantity):\n a_q = a_q.to_base_units()\n if not isinstance(b_q, u.Quantity):\n raise TypeError(\n f\"\\n\\t\"\n f\"{a_q} and {b_q} (dimensionless) not compatible!\\n\\t\"\n f\"Quantities must have same units to be reduced.\\n\\t\"\n f\"Try: {a_q} and {b_q} {a_q.units} for example.\\n\"\n )\n if a_q.units != b_q.to_base_units().units:\n raise TypeError(\n f\"\\n\\t\"\n f\"{a_q} and {b_q} not compatible!\\n\"\n f\"Quantities must have same units to be reduced.\\n\\t\"\n f\"Try: {a_q} and {b_q} {a_q.units} for example.\"\n )\n elif isinstance(b_q, u.Quantity):\n b_q = b_q.to_base_units()\n if not isinstance(a_q, u.Quantity):\n raise TypeError(\n f\"\\n\\t\"\n f\"{a_q} (dimensionless) and {b_q} not compatible!\\n\\t\"\n f\"Quantities must have same units to be reduced.\\n\\t\"\n f\"Try: {b_q} and {a_q} {b_q.units} for example.\"\n\n )\n if a_q.to_base_units().units != b_q.units:\n raise TypeError(\n f\"\\n\\t\"\n f\"{a_q} and {b_q} not compatible!\\n\\t\"\n f\"Quantities must have same units to be reduced.\\n\\t\"\n f\"Try: {b_q} and {a_q} {b_q.units} for example\"\n )\n\n if not isinstance(a_q, u.Quantity):\n a_q = u.Quantity(a_q, \" \")\n if not isinstance(b_q, u.Quantity):\n b_q = u.Quantity(b_q, \" \")\n\n return (a_q * b_q / (a_q + b_q)).to_base_units()\n","sub_path":"particula/util/reduced_quantity.py","file_name":"reduced_quantity.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"279852228","text":"\n# Copyright (c) 2020, Romain Rouvoy\n# \n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER\n# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport logging\nimport random\nimport time\nimport yaml\nfrom .distribution import PoissonDistribution, NoDistribution\n\nclass Workload:\n \"\"\"\n Core workload engine that spawns jobs following two given distributions\n for time and number of apps along X iterations.\n \"\"\"\n def __init__(self, config):\n self.backend = config.backend\n logging.info(f'Preparing the benchmark for {config.times} steps of {config.seconds} seconds...')\n self.dist = config.dist\n self.dist.init(config.mean,config.times)\n logging.debug(f'Generated app distribution for {config.times} steps: {self.dist.values}')\n self.wait = config.wait\n self.wait.init(config.seconds,config.times)\n logging.debug(f'Generated time distribution for {config.times} steps: {self.wait.values}')\n with open(config.input) as file:\n documents = yaml.full_load(file)\n self.apps = documents.items()\n\n def prepare(self):\n logging.info(\"Installing dependencies...\")\n for image in self.apps:\n self.backend.prepare(image[0])\n\n def run(self):\n commands = self.backend.commands(self.apps)\n timestamp = 0\n for n in self.dist.range():\n duration = self.wait.value(n)\n time.sleep(duration)\n timestamp += duration\n self.__start(timestamp, self.dist.value(n), commands)\n self.backend.complete()\n\n def __start(self, timestamp, nb, commands):\n apps = self.__select_apps(nb,commands)\n logging.info(f'T={timestamp}: Spawning {nb} apps: {apps}')\n for app in apps:\n self.backend.start(timestamp, app)\n\n def __select_apps(self,nb,commands):\n if (nb<=len(commands)):\n return random.sample(commands, nb)\n else:\n apps = []\n for i in range(nb):\n apps += random.sample(commands,1)\n return apps\n","sub_path":"pyworkload/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136980287","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 20 20:36:27 2013\n\n@author: amyskerry\n\"\"\"\n\nimport json\n\nrootdir='/Users/amyskerry/Documents/projects/trainapp/'\n\nothermax=30\ntimerange=[str(x) for x in range(0,4*60,30)]\nreprange=[str(x) for x in range(0,30)]\ncyclerange=[str(x) for x in range(0,20)]\nrouterange=[]\nfor x in range(7,14):\n for l in ['a','b','c','d']:\n routerange.append('5.'+str(x)+l)\nboulderrange=['v'+str(x) for x in range(0,12)]\n\ndefaultUIs={'WONAME':[], 'INDOUT':['radio',['indoor', 'outdoor']], 'WOTIME':['scroll',timerange], 'WOREPS': ['scroll', reprange], 'WOCYCLES': ['scroll', cyclerange], 'WOMAXAVGRANGE': ['textfield'], 'CLEANS':['textfield'], 'METRIC':[], 'OTHER1':[], 'OTHER2':[], 'OTHER3':[], 'OTHER4':[], 'COMMENTS': ['textbox']}\nWOsqlspecs={'WONAME':'varchar(30)','INDOUT':'varchar(20)', 'WOTIME':'float(4,2)', 'WOREPS': 'int', 'WOCYCLES': 'int', 'WOMAXAVGRANGE': 'varchar(5)', 'CLEANS':'int', 'METRIC':'varchar(30)', 'OTHER1':'varchar('+str(othermax)+')', 'OTHER2':'varchar('+str(othermax)+')', 'OTHER3':'varchar('+str(othermax)+')', 'OTHER4':'varchar('+str(othermax)+')','COMMENTS': 'text'}\n\n# one time specifications\nWOs=[]\nWOuispecs=[]\nWOs.append({'WONAME':'boulder', 'INDOUT':'outdoor?', 'WOTIME':'time', 'WOREPS': 'reps at max', 'WOCYCLES': [], 'WOMAXAVGRANGE': 'max grade', 'CLEANS':'clean at max', 'METRIC':'boulder (send)', 'OTHER1':[], 'OTHER2':[], 'OTHER3':[], 'OTHER4':[], 'COMMENTS': 'comments'})\nthisui=defaultUIs\nthisui['WOMAXAVGRANGE']=[['scroll'],boulderrange]\nWOuispecs.append(thisui)\nWOs.append({'WONAME':'TR', 'INDOUT':'outdoor?', 'WOTIME':'time', 'WOREPS': 'reps at max', 'WOCYCLES': [], 'WOMAXAVGRANGE': 'max grade', 'CLEANS':'clean at max', 'METRIC':'TR (clean)', 'OTHER1':[], 'OTHER2':[], 'OTHER3':[], 'OTHER4':[],'COMMENTS': 'comments'})\nthisui=defaultUIs\nthisui['WOMAXAVGRANGE']=[['scroll'],routerange]\nWOuispecs.append(thisui)\nWOs.append({'WONAME':'boulder pyramid','INDOUT':[], 'WOTIME':'time', 'WOREPS': 'reps', 'WOCYCLES': 'cycles', 'WOMAXAVGRANGE': 'max grade', 'CLEANS':'clean at max', 'METRIC':[], 'OTHER1':[], 'OTHER2':[], 'OTHER3':[], 'OTHER4':[],'COMMENTS': 'comments'})\nthisui=defaultUIs\nthisui['WOMAXAVGRANGE']=[['minmax'],boulderrange]\nWOuispecs.append(thisui)\n\n\nwith open(rootdir+'WOs_dict.json', 'w') as f:\n json.dump(WOs, f)\nwith open(rootdir+'WOuispecs_dict.json', 'w') as f:\n json.dump(WOuispecs, f)\n","sub_path":"trainapp/misc/workout_specs.py","file_name":"workout_specs.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"540647639","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 9 16:02:30 2020\n\n@author: myousuf\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Load scikit-learn's datasets\nfrom sklearn import datasets\n\n# Load digits dataset\ndigits = datasets.load_digits()\n\n# Create features matrix\nfeatures = digits.data\n\n# Create target vector\ntarget = digits.target\n\n# # Create function that adds 100 to something\nadd_100 = lambda i: i + 0\n\n# Create vectorized function\nvectorized_add_100 = np.vectorize(add_100)\n\n# Apply function to all elements in matrix\nFT=vectorized_add_100(features)\n\n\n# View first observation\nfeatures[0]\nplt.scatter(FT[:,20],FT[:,10], c=target)\nplt.show()\n","sub_path":"MLwP Codes/2.1 Bisection.py","file_name":"2.1 Bisection.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"578893215","text":"# Write your code here :-)\nfrom pprint import pprint\nrooms = {\n (0, 0): \"The hallway\",\n (1, 1): \"The Living Room\",\n (0, 1): \"The Kitchen\",\n (0, -1) : \"The garden\"\n}\npprint(rooms)\n\ndirections = {\n \"north\": (0, +1),\n \"south\": (0, -1),\n \"east\": (+1,0),\n \"west\": (-1,0)\n}\n\ncurrent_coord = (0, 0)\n\nallowed_verbs = [\"go\"]\nallowed_nouns = [\"north\", \"south\", \"east\" \"west\"]\n\ndef check_exits(current_coord):\n x,y = current_coord\n possible_direction = directions[direction]\n if possible_direction in rooms:\n print(\"You can go to :\", possible_direction)\n\nwhile True:\n room_name = rooms[current_coord]\n print(\"You are in\", room_name)\n print(\"Type 'Go' followed a direction\")\n\n command = input(\"What next? \").lower()\n command1=command.split()\n if len(command1) not in (1, 2):\n print(\"I can't handle this; my brain hurts\")\n continue\n\n verb = command1[0]\n print(verb)\n if len(command1) == 2:\n noun = command1[1]\n else:\n noun = None\n print(noun)\n\n if verb not in allowed_verbs:\n print(\"I don't know what\", verb ,\"is\")\n\n if noun not in allowed_nouns:\n print(\"I don't know what\", noun ,\"is\")\n\n if verb == 'go':\n noun_coordinates=directions[noun]\n x,y = current_coord\n dx, dy = noun_coordinates\n\n new_coord= ((x+dx),(y+dy))\n print(new_coord)\n if new_coord not in rooms:\n print(\"You can't go this way\")\n else:\n current_coord=new_coord\n\n","sub_path":"adventure.py","file_name":"adventure.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"13934273","text":"from django.shortcuts import render, Http404,redirect,reverse,HttpResponseRedirect,HttpResponse\nfrom haberler.models import News\nfrom django.db.models import Q\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom haberler.templatetags import menu\nfrom django.views.decorators.cache import cache_page\nfrom ayarlar.models import SiteSettings\nfrom social_django.models import UserSocialAuth\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm,AuthenticationForm\nfrom django.contrib.auth import update_session_auth_hash,login, authenticate, logout as log_out\nfrom django.contrib import messages\nfrom .forms import LoginForm\nfrom htmlmin.decorators import minified_response\nfrom ayarlar.models import AdminGorevler\nimport datetime\nimport json\n# for test page\nimport collections,urllib\nfrom bs4 import BeautifulSoup\nfrom django.utils.text import slugify\nfrom django.utils.crypto import get_random_string\nfrom PIL import Image\nimport os,cgi\nimport bs4 as bs\nimport requests\nimport ssl\nfrom urllib.parse import urlparse,urlunparse,parse_qs,urlencode\n\n\ndef test_page(request):\n Point = collections.namedtuple('Point', ['title', 'spot', 'image', 'desc', 'keys', 'newType'])\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n pageUrl = request.GET.get(\"url\")\n headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'}\n r = requests.get(pageUrl, headers=headers)\n soup = BeautifulSoup(r.content, 'html.parser')\n article = soup.find_all(\"div\", attrs={'class': 'articles'})[0].find(\"article\")\n title = article.h1.text\n spot = article.find(\"div\",attrs={\"class\":'spot'}).p.text\n image = str(soup.find('meta', attrs={'property': 'og:image'})[\"content\"])[2:]\n desc = article.find_all(\"section\")[1].find_all([\"div\",\"figure\"])\n for d in desc:\n if \"content--related\" in str(d) or d.find(\"div\",attrs={'class':'embed-responsive video-item'}):\n d.decompose()\n if d.find(\"a\"):\n for a in d.find_all(\"a\"):\n a[\"href\"] = \"%s%s\"% (\"/\",str(a[\"href\"]).split(\"/\")[-1])\n if d.find(\"img\"): # for desc images\n u = urlparse(str(d.img[\"src\"])[2:])\n query = parse_qs(u.query)\n query.pop('mode', None);query.pop('h', None);query.pop('w', None)\n u = u._replace(query=urlencode(query, True))\n # imageExtension = str(urlunparse(u)).split(\".\")[-1]\n # ResimYolu = str(slugify(title) + \"-\" + str(get_random_string(12, \"1234a512sda2\")) + \".\" + imageExtension + \"\")\n # urllib.request.urlretrieve(urlunparse(u), \"media/News/fotoGallery/\" + ResimYolu + \"\")\n # d.img[\"src\"] = \"/media/News/fotoGallery/\" + ResimYolu + \"\"\n # foo = Image.open(\"\" + BASE_DIR + \"/media/News/fotoGallery/\" + ResimYolu + \"\")\n # foo = foo.resize((770, 510), Image.ANTIALIAS)\n # foo.save(\"\" + BASE_DIR + \"/media/News/fotoGallery/\" + ResimYolu + \"\", quality=35)\n print(urlunparse(u))\n keywords = soup.find(\"ul\", attrs={'class': 'related__tags clearfix'}).find_all(\"li\")\n keyList = \"\"\n if keywords:\n for index, li in enumerate(keywords):\n for n in range(1):\n keyList += (\",\" + str(li.a.text))\n news = Point(title, spot, image, desc, keyList, 1)\n return render(request, 'test.html',)\n\n\n# @cache_page(60*2)\n# @minified_response\ndef home_view(request):\n template = \"site/index.html\"\n news = News.objects.filter(active=True).order_by(\"-id\")[:120].values(\"id\",\"title\",\"slug\",\"category__slug\",\"image\",\"category__id\",\"category__title\",\"date\",\"video_url\")\n tomorrow = datetime.date.today() + datetime.timedelta(days=1)\n lastMonth = tomorrow - datetime.timedelta(days=7)\n context = {\n 'category': menu.get_cache_menu(),\n 'task': AdminGorevler.objects.all(),\n 'news': news,\n 'videos_news': menu.get_videos_news(5),\n 'site': menu.get_site_config(),\n 'reklam': menu.reklamlar(),\n 'trendNews': News.objects.filter(active=True,date__range=[str(lastMonth),str(tomorrow)]).order_by(\"-viewed\")[:17].values('title','spot','slug','image','category__title','date','video_url')\n }\n return render(request, template, context)\n\n\ndef login_view(request):\n form = LoginForm(request.POST or None)\n context = {\n 'form': form,\n }\n if request.user.is_authenticated:\n return redirect(reverse(\"home_view\"))\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n login(request, user)\n if user:\n if request.GET.get(\"next\"):\n return HttpResponseRedirect(request.GET.get(\"next\"))\n else:\n return redirect(reverse(\"home_view\"))\n else:\n messages.add_message(request, messages.INFO, form.errors)\n\n return render(request, \"user/login.html\", context)\n\n\ndef logout(request):\n log_out(request)\n return redirect(reverse(\"home_view\"))\n\n\n@login_required\ndef log_settings(request):\n user = request.user\n try:\n github_login = user.social_auth.get(provider='github')\n except UserSocialAuth.DoesNotExist:\n github_login = None\n try:\n twitter_login = user.social_auth.get(provider='twitter')\n except UserSocialAuth.DoesNotExist:\n twitter_login = None\n\n try:\n facebook_login = user.social_auth.get(provider='facebook')\n except UserSocialAuth.DoesNotExist:\n facebook_login = None\n can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password())\n\n return render(request, 'user/settings.html', {\n 'github_login': github_login,\n 'twitter_login': twitter_login,\n 'facebook_login': facebook_login,\n 'can_disconnect': can_disconnect\n })\n\n\n@login_required\ndef log_password(request):\n if request.user.has_usable_password():\n PasswordForm = PasswordChangeForm\n else:\n PasswordForm = AdminPasswordChangeForm\n if request.method == 'POST':\n form = PasswordForm(request.user, request.POST)\n if form.is_valid():\n form.save()\n update_session_auth_hash(request, form.user)\n # if User.objects.filter(username=form.user.username).exists():\n # user = User.objects.get(username=form.user.username)\n # user.email =\n messages.success(request, 'Parolan kaydeildi')\n return redirect('password')\n else:\n messages.error(request, 'Please correct the error below.')\n else:\n form = PasswordForm(request.user)\n return render(request, 'user/password.html', {'form': form})\n\n\ndef search_view(request):\n query = request.GET.get('q')\n query = query\n news_list = None\n news = None\n if query:\n news_list = News.objects.filter(active=True).values('title','spot','slug','image','category__title','date','detail');\n news_list = news_list.filter(\n Q(title__icontains=query) |\n Q(spot__icontains=query) |\n Q(detail__icontains=query)\n\n ).distinct()\n\n if news_list:\n page = request.GET.get('sayfa', 1)\n paginator = Paginator(news_list, 15)\n try:\n news = paginator.page(page)\n except PageNotAnInteger:\n news = paginator.page(1)\n except EmptyPage:\n news = paginator.page(paginator.num_pages)\n context = {\n 'search_list': news,\n }\n return render(request, \"site/search.html\", context)\n\n\ndef takimimiz(request):\n return render(request, \"site/author/team.html\", {'user': User.objects.all()})\n\n\ndef author_detail(request, slug):\n template = \"site/author/author_details.html\"\n news = None ; count = 0\n try:\n user = User.objects.get(username=slug)\n except:\n user = None\n raise Http404\n if user:\n news = News.objects.filter(user=user,active=True)\n page = request.GET.get('sayfa', 1)\n try:\n page_size = SiteSettings.objects.get().page_size\n except:\n page_size = 10\n try:\n count =round(News.objects.filter(active=True,user=user).count() / page_size)\n except:\n count = 0\n paginator = Paginator(news, page_size)\n try:\n news = paginator.page(page)\n except PageNotAnInteger:\n news = paginator.page(1)\n except EmptyPage:\n news = paginator.page(paginator.num_pages)\n context = {\n 'user': user,\n 'news': news,\n 'count': count,\n }\n return render(request, template, context)\n\ndef ClearTRChars(text):\n return text.replace(\"ı\", \"i\").replace(\"ü\", \"u\").replace(\"ç\", \"c\").replace(\"ş\", \"s\").replace(\"ğ\",\"g\").replace(\"ö\", \"o\").replace(\"ş\",'s')\n\n\ndef tags_filter(request, slug):\n template = \"site/tags.html\"\n if slug == \"admin\":\n return redirect(\"/admin/\")\n news_list = None\n news = None\n if slug:\n news_list = News.objects.filter(active=True, tags__icontains=\",\" + str(slug).replace(\"-\",\" \") + \"\" + \",\").values('title','spot','slug','image','category__title','date');\n if news_list:\n page = request.GET.get('sayfa', 1)\n paginator = Paginator(news_list, 10)\n try:\n news = paginator.page(page)\n except PageNotAnInteger:\n news = paginator.page(1)\n except EmptyPage:\n news = paginator.page(paginator.num_pages)\n\n context = {'news': news, 'slug': slug}\n return render(request, template, context)\n\n\ndef notfoundpage(request):\n return render(request, \"site/error404.html\")\n\n\n@login_required\ndef AddTaskView(request):\n text = request.GET.get(\"datatext\")\n nesne = AdminGorevler.objects.create(title=str(text))\n data = json.dumps({\n 'id': str(nesne.id),\n 'title': str(nesne),\n })\n return HttpResponse(data)\n\n\n@login_required\ndef DeleteTaskView(request):\n text = request.GET.get(\"datatext\")\n try:\n nesne = AdminGorevler.objects.get(id=text)\n nesne.delete()\n except:\n nesne = None\n data = json.dumps({\n 'nesne': str(nesne),\n })\n return HttpResponse(data)\n\n@login_required\ndef ChangeStateTask(request):\n text = request.GET.get(\"datatext\")\n try:\n nesne = AdminGorevler.objects.get(id=text)\n if nesne.active:\n nesne.active = False\n else:\n nesne.active = True\n nesne.save(update_fields=['active'])\n except:\n nesne = None\n data = json.dumps({\n 'nesne': str(nesne.active),\n })\n return HttpResponse(data)\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"299426460","text":"\"\"\"\nReturn first pair of mismatching nodes (first pair as in in-order) given two pre-order traversal arrays of BSTs.\n\nExample 1:\n\nInput: pre1 = [5, 4, 2, 4, 8, 6, 9], pre2 = [5, 3, 2, 4, 8, 7, 9]\nOutput: [4, 3]\nExplanation:\nTree 1:\n\t 5\n 4 8\n2 4 6 9\n\nTree 2:\n\t 5\n 3 8\n2 4 7 9\n\ninorder1 = [2, 4, 4, 5, 6, 8, 9]\ninorder2 = [2, 3, 4, 5, 7, 8, 9]\n\nExample 2:\n\nInput: pre1 = [2, 1, 3], pre2 = [1, 2]\nOutput: [3, null]\nExplanation:\nTree 1:\n 2\n1 3\n\nTree 2:\n\t1\n\t 2\n\ninorder1 = [1, 2, 3]\ninorder2 = [1, 2]\n\n\nExample 3:\n\nInput: pre1 = [2, 1, 3], pre2 = [1, 2, 3]\nOutput: []\nExplanation:\nTree 1:\n\t2\n 1 3\n\nTree 2:\n\t1\n\t 2\n\t\t 3\n\ninorder1 = [1, 2, 3]\ninorder2 = [1, 2, 3]\n\nThere is no mismatch because the in-order sequence for both is exactly the SAME,\ndespite the trees are structurally different.\n\n\"\"\"\n\n\nclass TreeNode:\n\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\ndef find_mismatch(root1, root2):\n stack1, stack2 = [], []\n result = []\n\n while stack1 or root1:\n while root1 or root2:\n if root1:\n stack1.append(root1)\n root1 = root1.left\n if root2:\n stack2.append(root2)\n root2 = root2.left\n\n root1 = stack1.pop() if stack1 else None\n root2 = stack2.pop() if stack2 else None\n\n if root1 and root2 and root1.val != root2.val:\n result += [root1.val, root2.val]\n elif root1 and not root2:\n result += [root1.val, None]\n elif root2 and not root1:\n result += [root2.val, None]\n\n root1 = root1.right if root1 else None\n root2 = root2.right if root2 else None\n\n return result\n\n\nif __name__ == '__main__':\n \"\"\"\n Input: pre1 = [2, 1, 3], pre2 = [1, 2, 3]\n Output: []\n Explanation:\n Tree 1:\n 2\n 1 3\n \n Tree 2:\n 1\n 2\n 3\n \n inorder1 = [1, 2, 3]\n inorder2 = [1, 2, 3]\n \"\"\"\n root1 = TreeNode(2)\n root1_left = TreeNode(1)\n root1_right = TreeNode(3)\n\n root1.left = root1_left\n root1.right = root1_right\n\n root2 = TreeNode(1)\n root2_right = TreeNode(2)\n root2_right_right = TreeNode(3)\n\n root2.right = root2_right\n root2_right.right = root2_right_right\n\n print(find_mismatch(root1, root2))\n\n \"\"\"\n Example 2:\n Input: pre1 = [2, 1, 3], pre2 = [1, 2]\n Output: [3, null]\n Explanation:\n Tree 1:\n 2\n 1 3\n \n Tree 2:\n 1\n 2\n \n inorder1 = [1, 2, 3]\n inorder2 = [1, 2]\n \"\"\"\n\n root1 = TreeNode(2)\n root1_left = TreeNode(1)\n root1_right = TreeNode(3)\n\n root1.left = root1_left\n root1.right = root1_right\n\n root2 = TreeNode(1)\n root2_right = TreeNode(2)\n root2.right = root2_right\n\n print(find_mismatch(root1, root2))\n","sub_path":"Problems/companies/Facebook/Find_first_mismatchin_nodes.py","file_name":"Find_first_mismatchin_nodes.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301120006","text":"from .base import Node, check_abort\nimport numpy as np\n\nfrom .color import ColorNode\nfrom ..exceptions import NodeError\nfrom visual.modules.numeric.kernels import glyph_kernel\n\n\nclass LayerNode(Node):\n \"\"\"The node represents the construction of a 2D slice.\"\"\"\n data = {\n 'structure': {\n 'title' : {\n 'type': 'display',\n 'value' : 'layer',\n },\n\n 'appearance' : {\n 'type': 'select',\n 'choices': ['solid', 'transparent'],\n 'value' : 'solid',\n },\n },\n\n 'in': {\n 'plane': {\n 'required': True,\n 'multipart': False\n },\n 'dataset': {\n 'required': True,\n 'multipart': False\n }\n },\n 'out': {\n 'layer': {\n 'required': True,\n 'multipart': False\n },\n },\n }\n\n parsing = {}\n \n title = 'layer'\n \n def __init__(self, id, data, notebook_code, message):\n \"\"\"\n Inicialize new instance of layer node.\n :param self: instance of LayerNode\n :param id: id of node\n :param data: dictionary of node parameters, \n has to contain values from Node.data['structure']\n :param notebook_code: code of the notebook containing the node\n :param message: lambda with signature (string): none; \n has to send messages back to user\n \"\"\"\n self.id = id\n\n fields = ['appearance']\n self.check_dict(fields, data, self.id, self.title)\n self._appearance = data['appearance']\n\n\n def __call__(self, indata, message, abort): \n \"\"\"\n Construct the 2D slice.\n :param self: instance of LayerNode\n :param indata: data coming from connected nodes\n :param message: lambda with signature (string): none; \n has to send messages back to user\n :param abort: object for chacking the abort flag,\n check is done by using the check_abort method\n \"\"\" \n\n fields = ['dataset', 'plane']\n self.check_dict(fields, indata, self.id, self.title)\n\n check_abort(abort)\n values = glyph_kernel(indata['dataset'], indata['plane']['data'])\n points = indata['plane']['data']\n\n\n #layer meta... \n meta = {\n 'colormap': ColorNode.get_default_cm(),\n 'appearance': self._appearance,\n 'geometry': indata['plane']['meta'],\n }\n\n #return all flatenned\n return {'layer' : {'values': values, 'points': points, 'meta': meta}}\n\n\n","sub_path":"flow/visual/modules/editor/nodes/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5584095","text":"from unittest import TestCase\n\n\ndef equal_matrices(matrix_1: list, matrix_2: list) -> bool:\n \"\"\"\n Check if two matrices are equal\n\n :param matrix_1:\n :param matrix_2:\n :return:\n \"\"\"\n if len(matrix_1) != len(matrix_2):\n return False\n\n for row_1, row_2 in matrix_1, matrix_2:\n for e_1, e_2 in row_1, row_2:\n if e_1 != e_2:\n return False\n\n return True\n\n\ndef rotate_matrix(matrix: list) -> list:\n \"\"\"\n Given an NxN matrix, rotate by 90*.\n This will be an O(N^2) runtime\n\n Trivially approached to make a new matrix\n\n :return: the rotated matrix...\n \"\"\"\n pass\n # copy each column, and make as row...\n # first column becomes first row..., etc...\n rotated = []\n col_i = 0\n while col_i < len(matrix):\n row_i = len(matrix) - 1\n new_row = []\n while row_i >= 0:\n new_row.append(matrix[row_i][col_i])\n row_i -= 1\n rotated.append(new_row)\n col_i += 1\n\n return rotated\n\n\nclass TestRotateMatrix(TestCase):\n\n def setUp(self) -> None:\n self.t1_matrix = [[1, 2], [3, 4]]\n self.t1_result = [[3, 1], [4, 2]]\n\n def test_rotate_matrix(self):\n self.assertEqual(self.t1_result, rotate_matrix(self.t1_matrix))\n","sub_path":"session1/rotate_matrix.py","file_name":"rotate_matrix.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"172313001","text":"from ..base_asset import Asset\nfrom docker.types import IPAMConfig\nfrom ...core.response import JsonResponse\nfrom ...core.exceptions import IncompatibleObject\n\n\nclass Networks(Asset):\n class SCOPES(object):\n LOCAL = \"local\"\n GLOBAL = \"global\"\n SWARM = \"swarm\"\n _AVILABLE = [\n LOCAL,\n GLOBAL,\n SWARM\n ]\n\n def create(self, name, driver=None, options=None, ipam=None, check_duplicate=None, internal=None, labels=None,\n enable_ipv6=None, attachable=None, scope=None, ingress=None):\n # for true use boolean for false use None for serialization\n if options is None:\n options = {}\n\n if ipam and type(ipam) is not IPAMConfig:\n raise IncompatibleObject(\"ipam config type must be docker.types.IPAMConfig\")\n\n if scope and scope not in Networks.SCOPES._AVILABLE:\n raise IncompatibleObject(\"network scope must be Networks.SCOPES.\")\n\n kwargs = {\n \"name\": name,\n \"driver\": driver,\n \"options\": options,\n \"ipam\": ipam,\n \"check_duplicate\": check_duplicate,\n \"internal\": internal,\n \"labels\": labels,\n \"enable_ipv6\": enable_ipv6,\n \"attachable\": attachable,\n \"scope\": scope,\n \"ingress\": ingress\n }\n\n return self.requests.post(\"/docker/networks/create\", parameters=kwargs, ResponseObject=JsonResponse)\n","sub_path":"vcore_api/python/vcore_api/assets/docker/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"514460601","text":"from flask import Flask, jsonify, request\r\nfrom flasgger import Swagger\r\napp = Flask(__name__)\r\n\r\nSwagger(app)\r\n\r\n@app.route('/courses', methods=['POST'])\r\ndef my_awesome_endpoint():\r\n\t\"\"\"\r\n\tThis endpoint creates a course\r\n\t---\r\n\ttags:\r\n\t - create_course\r\n\tresponses:\r\n\t parameters:\r\n\t - name: course\r\n\t in: formData\r\n\t description: course name\r\n\t required: true\r\n\t type: string\r\n\t 200:\r\n\t description: course created\r\n\t schema:\r\n\t id: return_test\r\n\t properties:\r\n\t data:\r\n\t type: object\r\n\t properties:\r\n\t course:\r\n\t type: string\r\n\t description: course name\r\n\t default: 'operating systems'\r\n\t status:\r\n\t type: string\r\n\t description: created or fail\r\n\t default: 'created'\r\n\t\"\"\"\r\n\tdata = request.json\r\n\treturn jsonify(data=data, info={\"status\": \"created\"})\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0',port=5000,debug='True')","sub_path":"python/02_intro_swagger/sources/01_intro_swagger/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12062419","text":"#%% [markdown]\n# # PR HW3\n\n#%%\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n\nDATADIR_1 = \"money\"\nDATADIR_2 = \"transformations\"\n\nCATEGORIES = [\"100\", \"500\", \"1000\"]\n\nIMG_SIZE = 128\n\n\n#%% [markdown]\n# # 1. read data\n\n#%%\ndata = []\n\ndef create_data(DATADIR):\n for category in CATEGORIES: \n\n path = os.path.join(DATADIR,category) \n class_num = CATEGORIES.index(category) \n\n for img in os.listdir(path): # iterate over each image\n try:\n img_array = load_img(os.path.join(path,img))\n img_array = img_to_array(img_array)\n img_array = img_array.reshape((1,) + img_array.shape)\n\n data.append([img_array, class_num])\n\n except Exception as e: # in the interest in keeping the output clean...\n pass\n\ncreate_data(DATADIR_1)\ncreate_data(DATADIR_2)\n\nprint(\"total data: \", len(data))\n# print(data[0])\n\n\n#%% [markdown]\n# # 2. shuffle & split train and test\n#%%\nimport random\n\nrandom.shuffle(data)\n\ntrain_size = int(len(data)*0.8)\n\ntrain = data[:train_size]\ntest = data[train_size:] \n\n\n#%%\n# print(len(test))\n# for sample in test[:30]:\n# print(sample[1])\n# print(train[0][0].shape)\n# print(train[15][1])\n\n#%%\nX_train = []\ny_train = []\n\nfor features,label in train:\n X_train.append(features/255.)\n y_train.append(label)\n\nX_test = []\ny_test = []\n\nfor features,label in test:\n X_test.append(features/255.)\n y_test.append(label)\n\n\n\n# X_train = np.array(X_train).reshape(IMG_SIZE, IMG_SIZE, 3)\nX_train = np.array(X_train)\nX_train = X_train.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\nX_test = np.array(X_test)\nX_test = X_test.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n# y_train = np.array(y_train)\n# y_test = np.array(y_test)\ny_train = tf.keras.utils.to_categorical(y_train)\ny_test = tf.keras.utils.to_categorical(y_test)\n\n#%%\n# print(y_train.shape)\n\n#%%\n# print(X_train[0])\n\n\n#%% [markdown]\n# # 3. CNN model\n\n#%%\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\n\n#%%\ndef solve_cudnn_error():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n\nsolve_cudnn_error()\n\n#%%\nmodel = Sequential()\n\nmodel.add(Conv2D(filters=32, \n kernel_size=(3, 3),\n strides=(1, 1), \n padding=\"same\", \n input_shape=(IMG_SIZE, IMG_SIZE, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2 ,2),\n strides=2))\n\nmodel.add(Conv2D(filters=64,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='valid'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2),\n strides=2))\n\n\nmodel.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors\nmodel.add(Dense(64, activation=\"relu\"))\n\nmodel.add(Dropout(0.25)) # dropout\n\nmodel.add(Dense(3))\nmodel.add(Activation('softmax'))\n\nmodel.summary()\n\n# Hyper Parameters\nepochs = 10\nbatch_size = 2\nlr = 0.0001\ndecay = 1e-6\n# optimizer = tf.optimizers.RMSprop(lr=lr, decay=decay)\n# model.compile(optimizer=optimizer, \n# loss=\"categorical_crossentropy\", \n# metrics=[\"accuracy\"])\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nmodel.fit(\n X_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs, \n # validation_data=(X_test, y_test),\n)\n\ny_pred = model.predict(X_test)\ny_pred_label = np.argmax(y_pred, axis=1)\ny_test_label = np.argmax(y_test, axis=1)\n\n\n# %%\ncount = 0\nfor label, predict in zip(y_test_label, y_pred_label):\n if label == predict:\n count += 1\n \nprint(\"accruacy: \", count/y_test_label.shape[0])\n\n# %%\n","sub_path":"HW3/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486706903","text":"from tensorboard.backend.event_processing import event_accumulator\nimport re\nimport os\nfrom tampc import cfg\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nimport socket\n\n\ndef batch_size(run):\n b = run[run.index('batch') + len('batch'):]\n return int(re.search(r'\\d+', b).group())\n\n\nruns_basedir = os.path.join(cfg.ROOT_DIR, 'scripts/runs')\nPOINTS_PER_EPOCH = 50 * 200 * 0.9\nMAX_POINTS = 3000\nMAX_EPOCH = 3000\nname_prefix = socket.gethostname() # 'armconjunction'\nlargest_epoch_encountered = 0\nname_contains = None\nignore_cache = False\n\nymag = 6.68 # for push validation\n\n# scalar name combinations\nseries = {\n 'rex_extract_2_eval': {'name': 'ours', 'color': 'green'},\n 'notransform_eval': {'name': 'feedforward baseline', 'color': 'red'},\n}\n# losses = {'percent_match': {'name': 'match', 'pos': 0}, 'percent_reconstruction': {'name': 'reconstruction', 'pos': 1}}\nlosses = {'mse_loss': {'name': 'MSE', 'pos': 0}}\n# losses = {'percent_match': {'name': 'MSE', 'pos': 0}}\n# datasets = {'validation': {'name': '(a) validation', 'pos': 0},\n# 'validation_10_10': {'name': '(b) validation (10,10)', 'pos': 1}, 'test0': {'name': '(c) test', 'pos': 2}}\ndatasets = {'validation': {'name': '(a) validation', 'pos': 0},\n 'validation_10_10': {'name': '(b) validation (10,10)', 'pos': 1}}\n\nruns = os.listdir(runs_basedir)\nruns_assignment = {s: [] for s in series.keys()}\n\nfor r in runs:\n run_dir = os.path.join(runs_basedir, r)\n run = os.path.join(run_dir, os.listdir(run_dir)[0])\n try:\n name = r[r.index(name_prefix) + len(name_prefix):]\n except ValueError:\n continue\n\n if name_contains is not None and name_contains not in name:\n print(\"Ignoring {} since it does not contain {}\".format(name, name_contains))\n continue\n\n run_series = None\n for s in series.keys():\n if name.startswith(s):\n run_series = s\n runs_assignment[s].append(name)\n\n if run_series is None:\n print(\"Ignoring {} since it's not a recognized series\".format(name))\n continue\n\n print(name)\n cache = os.path.join(cfg.DATA_DIR, 'run_cache', name + '.pkl')\n # load from cache if possible since it could take a while\n if not ignore_cache and os.path.isfile(cache):\n with open(cache, 'rb') as f:\n to_add = pickle.load(f)\n print('loaded cache for {}'.format(name))\n else:\n ea = event_accumulator.EventAccumulator(run, size_guidance={\n event_accumulator.COMPRESSED_HISTOGRAMS: 1,\n event_accumulator.IMAGES: 1,\n event_accumulator.AUDIO: 1,\n event_accumulator.SCALARS: MAX_POINTS,\n event_accumulator.HISTOGRAMS: 1,\n })\n ea.Reload()\n tags = sorted(ea.Tags()['scalars'])\n\n to_add = []\n for loss in losses:\n for dataset in datasets:\n t = (loss, dataset)\n try:\n data = ea.Scalars('{}/{}'.format(*t))\n\n steps = np.array([d.step for d in data])\n max_epoch = steps[-1] * batch_size(run) // POINTS_PER_EPOCH\n\n values = np.array([d.value for d in data])\n if loss == 'mse_loss':\n values /= ymag\n steps_per_epoch = steps[-1] / max_epoch\n epochs = steps / steps_per_epoch\n except KeyError as e:\n print(\"-1 padding for missing key: {}\".format(e))\n epochs = np.linspace(0, MAX_EPOCH, MAX_EPOCH)\n values = np.ones_like(epochs) * -1\n to_add.append((t, epochs, values))\n\n # save to cache\n if not os.path.exists(os.path.dirname(cache)):\n try:\n os.makedirs(os.path.dirname(cache))\n except OSError as exc: # Guard against race condition\n import errno\n\n if exc.errno != errno.EEXIST:\n raise\n with open(cache, 'wb') as f:\n pickle.dump(to_add, f)\n print('cached {}'.format(name))\n\n for t, epochs, values in to_add:\n if largest_epoch_encountered < epochs[-1]:\n largest_epoch_encountered = epochs[-1]\n if t not in series[run_series]:\n series[run_series][t] = []\n series[run_series][t].append((epochs, values))\n\nf, axes = plt.subplots(len(losses), len(datasets), figsize=(10, 3), constrained_layout=True)\nif len(losses) is 1:\n axes = axes.reshape(1, -1)\nplt.pause(0.1)\n\nfor s, run_names in runs_assignment.items():\n print('---')\n print(s)\n for n in run_names:\n print(n)\n\nfor s in series:\n for t in series[s]:\n if type(t) is str:\n continue\n\n loss, dataset = t\n i = losses[loss]['pos']\n j = datasets[dataset]['pos']\n ax = axes[i, j]\n\n all = series[s][t]\n # filter out series with insufficient points (need equal number of points for all of them)\n lens = [len(pair[0]) for pair in all]\n all = [pair for pair in all if len(pair[0]) == max(lens)]\n\n # just use the first series as epoch\n epochs = all[0][0]\n values = [pair[1] for pair in all]\n\n # average across seeds\n m = np.mean(values, axis=0)\n std = np.std(values, axis=0)\n c = color = series[s]['color']\n ax.semilogx(epochs, m, label=series[s]['name'], color=c)\n ax.fill_between(epochs, m - std, m + std, alpha=0.3, color=c)\n ax.set_xlim(left=0, right=3200)\n ax.set_ylim(bottom=0, top=1.5)\n\nfor dataset_pairs in datasets.values():\n axes[0, dataset_pairs['pos']].set_title(dataset_pairs['name'])\n\naxes[0, 0].set_ylabel('Relative MSE')\n# axes[1, 0].set_ylabel('reconstruction loss')\naxes[-1, 0].set_xlabel('epochs')\naxes[-1, 1].set_xlabel('epochs')\naxes[-1, 0].legend()\n\nplt.show()\n","sub_path":"scripts/plot_run_result.py","file_name":"plot_run_result.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"564619540","text":"from collections import OrderedDict\n\nglossary = OrderedDict()\n\nglossary['title'] = 'Первая буква каждого слова - прописная, остальные - строчные'\nglossary['upper'] = 'Вся строка прописная'\nglossary['lower'] = 'Вся строка строчная'\nglossary['rstrip'] = 'Удаление пропусков в конце строки'\nglossary['lstrip'] = 'Удаление пропусков в начале строки'\n \nfor determination in glossary:\n print(\"\\n\" + determination + \":\\n\\t\" + glossary[determination])","sub_path":"9-13.py","file_name":"9-13.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"169569231","text":"##Logan Douglas\n##Player Inventory\n##11/18\n\nimport os\nimport random\n\ndef hud():\n print(\"Stats: \",player_stats)\n print(\"inventory: \",inventory)\n print(\"Equiped: \", equiped)\nchest_items = [\"gold\",\"gems\",\"bow\",\"boots\",\"funky hat\"]\nplayer_health = 100\nplayer_armor = 1250\nplayer_attack = 250\nplayer_money = 0\ninventory = [\"Magic Staff\",\"Mana Potion\",\"Small healing potion\",\"Old Cloak\"]\nmax_inventory = 15\nequiped = []\nplayer_stats = [\"health\",player_health,\"aromor\",player_armor,\"attack\",\n player_attack,\"money\",player_money]\n\nprint(\"As you start out as an apprentice you have the following\")\nprint(\"Player stats\")\nprint(player_stats)\nprint()\nprint(\"Your items include:\")\nfor item in inventory:\n print(item)\ninput(\"\\n[Press enter to continue]\")\nos.system('cls')\nhud()\n\ninput(\"\\n[Press enter to continue]\")\n\nprint(\"You have\", len(inventory),\"/\",max_inventory,\"Items in your possession.\")\nprint(\"SO you can picek up\",max_inventory-len(inventory),\"more items\")\ninput(\"\\n[Press enter to continue]\")\nos.system('cls')\nhud()\n\nprint(\"You tripped over a rock and took some damage\")\nplayer_stats[1] -= 24\ninput(\"\\n[Press enter to continue]\")\n\ninput(\"\\nYou have taken some damage\"+\n \" your health is at \"+str(player_stats[1])+\"\\n\"+\n \"you need to use your healing potion \\n[Press enter to continue]\")\n\nif \"Small healing potion\" in inventory:\n print(\"You have used the healing potion.\")\n player_stats[1]+=16\n inventory.remove(\"Small healing potion\")\ninput(\"\\n[Press enter continue]\")\nos.system('cls')\nhud()\n\n\nfor i in range(len(inventory)):\n print(str(i),inventory[i])\nprint(\"Equip some armor!\")\nindex = int(input(\"\\nEnter the index number for an armor item in your inventory\"))\n\nwhile index > len(inventory)-1 or index < 0 and index != \"\":\n print(\"That number is our of range\")\n index = int(input(\"\\nEnter the index number for an armor item in your inventory\"))\nprint(\"You equip your:\",inventory[index])\nequiped.append(inventory[index])\ninventory.remove(inventory[index])\n\nif \"Old Cloak\" in equiped:\n player_stats[3] += 500\ninput(\"\\n[Press enter continue]\")\nos.system('cls')\nhud()\n\nchest = []\n\nfor i in range(random.randrange(len(chest_items))):\n item = random.choice(chest_items)\n chest.append(item)\n\nprint(\"You find a chest which contains:\")\nprint(item)\nprint()\nprint(\"You add the contents of the chest to your inventory\")\n\nif len(inventory)+len(chest) 100:\n print(\"Your trade in your sword for a bow gaining 20 gold for the sword and buying the bow for 100\")\n player_stats[7]+=20\n player_stats[7]-=100\n inventory[0] = \"bow\"\ninput(\"\\n[Press enter continue]\")\nos.system('cls')\nhud()\n\nprint(\"You trade in the last 2 items from your inventory\")\ninventory[len(inventory)-2:len(inventory)] = [\"orb of mana\"]\ninput(\"\\n[Press enter continue]\")\nos.system('cls')\nhud()\n\nprint(\"In a great battle you lost your cloak\")\nfor i in range(len(inventory)):\n if inventory[i] == \"Old Cloak\":\n del inventoryp[i]\nfor i in range(len(equiped)):\n if equiped[i] == \"Old Cloak\":\n del equiped[i]\ninput(\"\\n[Press enter continue]\")\nos.system('cls')\nhud()\n\nprint(\"Your first 2 items were stolen by thieves\")\ndel inventory[:2]\nprint(\"Your inventory now has:\")\nprint(inventory)\ninput(\"\\n[Press enter continue]\")\nos.system('cls')\n\n \n \n\n\n\n\n\n\n","sub_path":"Inventory/Inventory.py","file_name":"Inventory.py","file_ext":"py","file_size_in_byte":3767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"514242216","text":"from openpyxl import Workbook , load_workbook\nfrom openpyxl.styles import Alignment\nfrom openpyxl.utils import get_column_letter\nfrom orionsdk import SwisClient\nimport requests , abc\n\nclass ExcelSheet ( ):\n\n\t'''\n\tClass name : ExcelSheet\n\tClass Purpose : To handle reading and writing to Excel spreadsheet\n\t'''\n\n\tdef __init__ ( self , workbook ):\n\n\t\t'''\n\t\tMethod name: __init__\n\t\tMethod Purpose: To initialize an excel instance\n\n\t\tParameters:\n\t\t\t- workbook (string): The name of the workbook\n\n\t\tReturns: None\n\t\t'''\n\t\t\n\t\tself.workbookName = workbook\n\t\ttry:\n\t\t\tself.workbook = self.openWorkbook ( workbook )\n\t\texcept Exception as detail:\n\t\t\tself.workbook = Workbook ( workbook )\n\t\t\t\n\t\tself.worksheet = self.getWorkbookActiveSheet ( )\n\t\tself.columns = self.worksheet.max_column\n\t\tself.rows = self.worksheet.max_row\n\n\tdef readFullWorkbook ( self , numRows , numColumns , sheet , startRow=1 ):\n\n\t\t'''\n\t\tMethod name: readFromWorkbook\n\t\tMethod Purpose: To read from the inputted workbook\n\n\t\tParameters:\n\t\t\t- numRows (integer): The number of rows to read\n\t\t\t- numColumns (integer): The number of columns to read\n\t\t\t- startRow (integer): The row to start reading from\n\t\t\t- sheet (unknown): The worksheet to read from\n\n\t\tReturns: A list of all data\n\t\t'''\n\n\t\tspreadsheet_data = []\n\t\tfor sheet_row in range ( *self.__getReadRange ( sheet.max_row , numRows , startRow ) ):\n\t\t\trowData = self.readRowFromWorkbook ( sheet_row , numColumns , sheet )\n\t\t\tspreadsheet_data.append ( rowData )\n\t\treturn spreadsheet_data\n\n\tdef readRowFromWorkbook ( self , rowNum , numColumns , sheet ):\n\n\t\t'''\n\t\tMethod name: readRowFromWorkbook\n\t\tMethod Purpose: To read an individual row from Workbook\n\n\t\tParameters:\n\t\t\t- rowNum (integer): The numbered row to read\n\t\t\t- numColumns (integer): The number of columns to read from row\n\t\t\t- sheet (unknown): The sheet that we are reading from\n\n\t\tReturns: A list of the data in the row\n\t\t'''\n\n\t\trowData = \t[\n\t\t\t\t\t\tsheet.cell (\n\t\t\t\t\t\t\trow=rowNum if rowNum < sheet.max_row + 1 else sheet.max_row, \\\n\t\t\t\t\t\t\tcolumn=column\n\t\t\t\t\t\t).value for column in range ( \\\n\t\t\t\t\t\t\t*self.__getReadRange ( sheet.max_column , numColumns )\n\t\t\t\t\t\t)\n\t\t\t\t\t]\n\t\treturn rowData\n\n\tdef writeToWorkbook ( self , sheet , row , rowData , columnStart=0 ):\n\n\t\t'''\n\t\tMethod name: writeToWorkbook\n\t\tMethod Purpose: To write to the inputted workbook\n\n\t\tParameters: \n\t\t\t- sheet (WorkSheet): The worksheet we are writing to\n\t\t\t- row (integer): The row number to write to\n\t\t\t- rowData (list): A list of data for row\n\t\t\t- columnStart (integer): The starting spot to place the column\n\n\t\tReturns: None\n\t\t'''\n\n\t\tcolNum = columnStart\n\t\tfor counter , data in enumerate ( rowData ):\n\t\t\tsheet.cell ( row=row , column=colNum ).value = data\n\t\t\tsheet.cell ( row=row , column=colNum ).alignment = Alignment ( wrap_text = False )\n\t\t\tcolNum = colNum + 1\t\n\t\tself.saveWorkbook ( )\n\t\t\n\tdef saveWorkbook ( self ):\n\t\n\t\t'''\n\t\tMethod name: saveWorkBook\n\t\tMethod Purpose: To save the workbook\n\n\t\tParameters: None\n\n\t\tReturns: None\n\t\t'''\n\t\n\t\tself.workbook.save ( self.workbookName )\n\t\t\n\tdef removeSheetFromWorkbook ( self , sheetName ):\n\n\t\t'''\n\t\tMethod name: removeSheetFromWorkbook\n\t\tMethod Purpose: To remove an inputted sheet from the inputted workbook\n\n\t\tParameters:\n\t\t\t- sheetName (string): The name of the sheet to remove\n\n\t\tReturns: None\n\t\t'''\n\n\t\ttry:\n\t\t\tself.workbook.remove ( self.workbook.get_sheet_by_name ( sheetName ) )\n\t\texcept:\n\t\t\tprint ( \"The sheet is unable to be deleted because the name is incorrect.\" )\n\n\tdef __getReadRange ( self , maxCell , inputCell , startCell=1 ):\n\n\t\t'''\n\t\tMethod name: __findReadRange\n\t\tMethod Purpose: To find the range being read from Excel Worksheet;\n\t\t\t\t\t\teither the row or column range\n\n\t\tParameters:\n\t\t\t- maxCell (integer): The max number of cells that can be read\n\t\t\t- startCell (integer): The starting cell (row or column)\n\t\t\t- inputCell (integer): The user inputted range to be read\n\n\t\tReturns: A tuple containing the read range\n\t\t'''\n\n\t\tcellRange = (\n\t\t\t\tstartCell if startCell < inputCell and startCell > 0\n\t\t\t\t\t\t\telse 1 ,\n\t\t\t\tinputCell + 1 if inputCell < maxCell + 1 and inputCell < maxCell + 1 \\\n\t\t\t\t\t\t\telse maxCell + 1\n\t\t\t)\n\t\treturn cellRange\n\n\tdef getWorkbookActiveSheet ( self ):\n\n\t\t'''\n\t\tMethod name: __getWorkbookActiveSheet\n\t\tMethod Purpose: Get the Workbooks active sheet\n\n\t\tParameters: None\n\n\t\tReturns: The active sheet of the workbook\n\t\t'''\n\n\t\treturn self.workbook.active\n\n\tdef openWorkbook ( self , workbookName ):\n\n\t\t'''\n\t\tMethod name: _openWorkbook\n\t\tMethod Purpose: To open the workbook if it is not already\n\n\t\tParameters:\n\t\t\t- workbookName (string): The name of the workbook to open\n\n\t\tReturns: None\n\t\t'''\n\n\t\ttry:\n\t\t\tworkbook = load_workbook ( workbookName )\n\t\texcept FileNotFoundError:\n\t\t\traise Exception ( \"The file name entered could not be found in the file system.\" )\n\t\telse:\n\t\t\treturn workbook\n\t\t\t\n\tdef setRowHeight ( self , rowNum , height , worksheet ):\n\t\n\t\t'''\n\t\tMethod name: setRowHeight\n\t\tMethod Purpose: To set the row height\n\n\t\tParameters:\n\t\t\t- rowNum (integer):The row we are adjusting\n\t\t\t- height (integer): The new height of the rwo\n\t\t\t- worksheet (worksheet): The worksheet we are adjusting\n\n\t\tReturns: None\n\t\t'''\n\t\t\n\t\tworksheet.row_dimensions [ rowNum ].height = height\n\t\n\tdef setColumnWidth ( self , colNum , width , worksheet ):\n\t\n\t\t'''\n\t\tMethod name: setColumnWidth\n\t\tMethod Purpose: To set the column width\n\n\t\tParameters:\n\t\t\t- colNum (integer): The column we are adjusting\n\t\t\t- width (integer): The new width of the column\n\t\t\t- worksheet (worksheet): The worksheet we are adjusting\n\n\t\tReturns: None\n\t\t'''\n\t\t\n\t\tworksheet.column_dimensions [ get_column_letter ( colNum ) ].width = width\n\t\t\n\tdef getRowHeight ( self , rowNum , worksheet ):\n\t\n\t\t'''\n\t\tMethod name: setRowHeight\n\t\tMethod Purpose: To set the row height\n\n\t\tParameters:\n\t\t\t- rowNum (integer):The row we are adjusting\n\t\t\t- worksheet (worksheet): The worksheet we are adjusting\n\n\t\tReturns: None\n\t\t'''\n\t\t\n\t\theight = worksheet.row_dimensions [ rowNum ].height\n\t\treturn height\n\t\t\n\tdef getColumnWidth ( self , colNum , worksheet ):\n\t\n\t\t'''\n\t\tMethod name: setColumnWidth\n\t\tMethod Purpose: To set the column width\n\n\t\tParameters:\n\t\t\t- colNum (integer): The column we are adjusting\n\t\t\t- worksheet (worksheet): The worksheet we are adjusting\n\n\t\tReturns: None\n\t\t'''\n\n\t\twidth = worksheet.column_dimensions [ get_column_letter ( colNum ) ].width\n\t\treturn width\n\nclass SolarwindsEntity ( abc.ABC ):\n\n\t'''\n\tClass name: SolarwindsEntity\n\tClass Purpose: To serve as base class for all Solarwinds entities\n\t'''\n\t\t\n\tdef __init__ ( self , domain , username , password ):\n\t\n\t\t'''\n\t\tMethod name: __init__\n\t\tMethod Purpose: To start a solarwinds instance\n\n\t\tParameters:\n\t\t\t- domain (string): The domain to log into\n\t\t\t- username (string): The username to log into the system\n\t\t\t- password (string): The password for the associated username\n\n\t\tReturns: None\n\t\t'''\n\t\t\n\t\t# the server is unverified --> allow without warning\n\t\tverify = False\n\t\tif not verify:\n\t\t\tfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\t\t\trequests.packages.urllib3.disable_warnings ( InsecureRequestWarning )\t\n\t\tself._solarwinds = SwisClient ( domain , username , password )\n\t\t\nclass PortEntity ( SolarwindsEntity ):\n\n\t'''\n\tClass name: PortEntity\n\tClass Purpose: To obtain individual port details from Solarwinds\n\t'''\n\n\tdef __init__ ( self , domain , username , password , ipAddress , nodeName=None ):\n\n\t\t'''\n\t\tMethod name: __init__\n\t\tMethod Purpose: To initialize a port detail instance\n\n\t\tParameters:\n\t\t\t- ipAddress (string): The ip address the port is bound to\n\t\t\t- username (string): The username of the user in solarwinds\n\t\t\t- password (string): The password of the user in solarwinds\n\n\t\tReturns: None\n\t\t'''\n\n\t\tsuper ( ).__init__ ( domain , username , password )\n\t\tself.__ipAddress = ipAddress\n\t\tself.__nodeName = nodeName\n\t\n\tdef getPortInfo ( self ):\n\n\t\t'''\n\t\tMethod name: getPortInfo\n\t\tMethod Purpose: To get the important port information from Solarwinds\n\n\t\tParameters: None\n\n\t\tReturns: A dictionary of the details of the query if it is found\n\t\t\t\t 'None' if results are not found\n\t\t'''\n\t\t\n\t\tsearchCondition = \"e.IPAddresses.IPAddress='{}'\".format ( self.__ipAddress ) \\\n\t\t\t\t\t\t if self.__nodeName == None else \\\n\t\t\t\t\t\t \"(e.IPAddresses.IPAddress='{}' and e.Ports.Port.Node.Caption='{}')\".format ( self.__ipAddress , self.__nodeName )\n\t\tportQueryResults = self._solarwinds.query ( \t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSELECT\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\te.Ports.Port.Name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\te.Ports.Port.PortDescription,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\te.Ports.Port.Speed,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\te.Ports.Port.Duplex,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\te.Ports.Port.Node.Caption\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tFROM\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tOrion.UDT.Endpoint e\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tWHERE\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t( e.Ports.ConnectionType=1 or e.Ports.ConnectionType=2 )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tand {}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\".format ( searchCondition )\n\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\n\t\treturn portQueryResults [ 'results' ]\n","sub_path":"Solar_Scripts/PortInfo/portinfo.py","file_name":"portinfo.py","file_ext":"py","file_size_in_byte":8931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"64793998","text":"#!/usr/bin/env python\n# coding=utf-8\n# Author: bloke\n# test logging model\n\nimport logging\n\nlogger = logging.Logger(__name__)\nformatter = logging.Formatter(fmt='%(asctime)s [%(levelname)s] %(message)s', datefmt='%y/%m/%d %H:%M:%S')\n\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\nch.setFormatter(formatter)\n\nfh = logging.FileHandler('test.log', 'a', 'utf-8')\nfh.setLevel(logging.WARNING)\nfh.setFormatter(formatter)\n\nlogger.addHandler(ch)\nlogger.addHandler(fh)\n\nlogger.info('info.test')\nlogger.warning('warning.test')\nlogger.error('error.test')\n\n\n","sub_path":"my_test/日志.py","file_name":"日志.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"218972802","text":"# coding=utf-8\n\"\"\"Tests that perform actions over modular Errata repositories.\"\"\"\nimport unittest\nfrom collections import defaultdict\nfrom urllib.parse import urljoin\n\nfrom packaging.version import Version\nfrom pulp_smash import api, config, utils\nfrom pulp_smash.pulp2.constants import REPOSITORY_PATH\nfrom pulp_smash.pulp2.utils import (\n publish_repo,\n search_units,\n sync_repo,\n upload_import_erratum,\n)\n\nfrom pulp_2_tests.constants import (\n MODULE_ERRATA_RPM_DATA,\n MODULE_FIXTURES_ERRATA,\n RPM_WITH_MODULES_FEED_URL,\n)\nfrom pulp_2_tests.tests.rpm.api_v2.utils import (\n gen_distributor,\n gen_repo,\n get_repodata,\n get_xml_content_from_fixture,\n)\n\n\nclass ManageModularErrataTestCase(unittest.TestCase):\n \"\"\"Manage Modular Errata content testcase.\n\n This test targets the following issues:\n\n * `Pulp-2-Tests #94 `_.\n * `Pulp #3919 `_\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Create class wide variables.\"\"\"\n cls.cfg = config.get_config()\n if cls.cfg.pulp_version < Version('2.18'):\n raise unittest.SkipTest('This test requires Pulp 2.18 or newer.')\n cls.client = api.Client(cls.cfg, api.json_handler)\n\n def test_sync_publish_update_info(self):\n \"\"\"Test sync,publish of Modular RPM repo and checks the update info.\n\n This testcase reads the updateinfo of the repository that is published\n and compares that against the ``updateinfo.xml`` present in the feed\n url.\n\n Steps involved:\n\n 1. Create a repository with feed url containing modules.\n 2. The repository should have a distributor to publish it.\n 3. Once the repository is created, it is synced and published.\n 4. Get the ``updateinfo`` from the repodata of the published repo.\n 5. Compare this against the ``update_info.xml`` in the fixtures repo.\n \"\"\"\n _, update_list = self._set_repo_and_get_repo_data()\n\n # getting the update info from the fixtures repo\n update_info_fixtures = get_xml_content_from_fixture(\n fixture_path=RPM_WITH_MODULES_FEED_URL,\n data_type='updateinfo',\n )\n self.assertEqual(\n self._get_errata_rpm_mapping(update_list),\n self._get_errata_rpm_mapping(update_info_fixtures),\n 'mismatch in the module packages.'\n )\n\n def test_collection_field(self):\n \"\"\"Test the collection field in the update info.\n\n This test provides the following:\n\n 1. Check whether all the modules in the published repo contains a\n collection field.\n 2. Check whether the collection field has proper name. The collection\n name computation is as below.\n\n The collection name is created using the information from fixtures that\n is stored in a set ``{:}``.\n\n First, the set information is used in computing a set\n ``collections_from_fixtures`` that maps the repo_id to the\n collection-name.\n\n The collection-name set is computed using the logic\n ``__``. The module name is ``default``\n and the index is 0 for ursine RPMs.\n\n The set is created using set-comprehension and x-path. After creating\n the set, it appears as in the example below.\n\n .. code:: python\n\n collections_from_fixtures = {\n 'RHEA..1' : 'repo_id_1_duck',\n 'RHEA..2' : 'repo_id_2_duck',\n 'RHEA..3' : 'repo_id_0_default'\n }\n\n This set is compared against the collection-name from the published\n repo's ``updateinfo``.\n \"\"\"\n repo, update_list = self._set_repo_and_get_repo_data()\n\n # getting the updateinfo from the fixtures repo\n update_info_fixtures = get_xml_content_from_fixture(\n fixture_path=RPM_WITH_MODULES_FEED_URL,\n data_type='updateinfo',\n )\n\n # Errata ID to collection name map in updateinfo of published repo.\n collection_update_list = {\n update.find('./id').text:\n update.find('.//collection').attrib['short']\n for update in update_list.findall('update')\n }\n\n collections_from_fixtures = {\n update.find('id').text:\n 'default' if update.find('.//module') is None\n else update.find('.//module').attrib['name']\n for update in\n update_info_fixtures.findall('.//update')\n }\n\n # indexes is used to increase the index of the module in the\n # collections\n indexes = defaultdict(lambda: 1)\n for key, val in tuple(collections_from_fixtures.items()):\n if val in indexes:\n indexes[val] += 1\n collections_from_fixtures[key] = (\n '{}_0_default'.format(repo['id'])\n if val == 'default'\n else '{}_{}_{}'.format(repo['id'], indexes[val], val)\n )\n\n self.assertEqual(\n collections_from_fixtures,\n collection_update_list,\n 'collection names not proper'\n )\n\n def test_copy_errata(self):\n \"\"\"Test whether Errata modules are copied.\n\n This Test does the following:\n\n 1. It creates, syncs, and publishes a modules rpm repository.\n 2. Creates another repo with no feed.\n 3. Recursively copies an errata from one repo to another.\n 4. Checks whether the errata information in the new repo is\n correct.\n \"\"\"\n repo_1, _ = self._set_repo_and_get_repo_data()\n\n # Creating an empty repo2\n body = gen_repo(distributors=[gen_distributor(auto_publish=True)])\n repo_2 = self.client.post(REPOSITORY_PATH, body)\n self.addCleanup(self.client.delete, repo_2['_href'])\n\n criteria = {\n 'filters': {\n 'unit': {\n 'id': MODULE_FIXTURES_ERRATA['errata_id']\n }},\n 'type_ids': ['erratum']\n }\n\n # Copy errata data recursively from repo1 to repo2\n self.client.post(urljoin(repo_2['_href'], 'actions/associate/'), {\n 'source_repo_id': repo_1['id'],\n 'override_config': {'recursive': True},\n 'criteria': criteria\n })\n repo_2 = self.client.get(repo_2['_href'], params={'details': True})\n\n self.assertEqual(\n repo_2['total_repository_units'],\n MODULE_FIXTURES_ERRATA['total_available_units'],\n repo_2\n )\n\n self.assertEqual(\n search_units(self.cfg, repo_1, criteria)[0]['metadata']['pkglist'],\n search_units(self.cfg, repo_2, criteria)[0]['metadata']['pkglist'],\n 'Copied erratum doesn''t contain the same module/rpms'\n )\n\n def test_upload_errata(self):\n \"\"\"Upload errata and check whether it got published in the repo.\n\n This Test does the following.\n 1. Create and sync a repo with RPM_WITH_MODULES_FEED_URL.\n 2. Upload a custom modular erratum to the repo. The custom\n module erratum is obtained from ``_get_erratum()``.\n Make sure that the erratum uploaded has a corresponding\n module in the feed url.\n 3. Publish the repo after uploading the custom erratum.\n 4. Verify whether the uploaded erratum is present in the\n published repo and also contains the modules in it.\n \"\"\"\n # Step 1\n body = gen_repo(\n importer_config={'feed': RPM_WITH_MODULES_FEED_URL},\n distributors=[gen_distributor()]\n )\n repo_initial = self.client.post(REPOSITORY_PATH, body)\n self.addCleanup(self.client.delete, repo_initial['_href'])\n sync_repo(self.cfg, repo_initial)\n # getting the update info from the published repo\n repo_initial = self.client.get(\n repo_initial['_href'],\n params={'details': True}\n )\n\n # Step 2\n unit = self._gen_modular_errata()\n upload_import_erratum(self.cfg, unit, repo_initial)\n repo = self.client.get(\n repo_initial['_href'],\n params={'details': True}\n )\n\n # Step 3\n publish_repo(\n self.cfg, repo,\n {\n 'id': repo['distributors'][0]['id'],\n 'override_config': {'force_full': True},\n })\n\n # Step 4\n # upload_info_file - The ``uploadinfo.xml`` of the published repo.\n update_info_file = get_repodata(\n self.cfg,\n repo['distributors'][0],\n 'updateinfo'\n )\n\n # errata_upload - get the errata is uploaded in step 2\n # from the updateinfo.xml.\n errata_upload = [\n update\n for update in update_info_file.findall('update')\n if update.find('id').text == unit['id']\n ]\n\n self.assertEqual(\n repo_initial['content_unit_counts']['erratum'] + 1,\n repo['content_unit_counts']['erratum'],\n 'Erratum count mismatch after uploading.'\n )\n self.assertGreater(len(errata_upload), 0)\n self.assertIsNotNone(errata_upload[0].find('.//module'))\n\n def _set_repo_and_get_repo_data(self):\n \"\"\"Create and Publish the required repo for this class.\n\n This method does the following:\n\n 1. Create, sync and publish a repo with\n ``RPM_WITH_MODULES_FEED_URL``\n 2. Get ``updateinfo.xml`` of the published repo.\n\n :returns: A tuple containing the repo that is created, along with\n the ``updateinfo.xml`` of the created repo.\n \"\"\"\n body = gen_repo(\n importer_config={'feed': RPM_WITH_MODULES_FEED_URL},\n distributors=[gen_distributor(auto_publish=True)]\n )\n repo = self.client.post(REPOSITORY_PATH, body)\n self.addCleanup(self.client.delete, repo['_href'])\n sync_repo(self.cfg, repo)\n\n # getting the updateinfo from the published repo\n repo = self.client.get(repo['_href'], params={'details': True})\n return repo, get_repodata(\n self.cfg,\n repo['distributors'][0], 'updateinfo'\n )\n\n @staticmethod\n def _get_errata_rpm_mapping(xml):\n mapper = {}\n for update in xml.findall('update'):\n mapper[update.find('id').text] = [\n package.text for package in update.findall('.//filename')\n ]\n return mapper\n\n @staticmethod\n def _gen_modular_errata():\n \"\"\"Generate and return a modular erratum with a unique ID.\"\"\"\n return {\n 'id': utils.uuid4(),\n 'status': 'stable',\n 'updated': MODULE_ERRATA_RPM_DATA['updated'],\n 'rights': None,\n 'from': MODULE_ERRATA_RPM_DATA['from'],\n 'description': MODULE_ERRATA_RPM_DATA['description'],\n 'title': MODULE_ERRATA_RPM_DATA['rpm_name'],\n 'issued': MODULE_ERRATA_RPM_DATA['issued'],\n 'relogin_suggested': False,\n 'restart_suggested': False,\n 'solution': None,\n 'summary': None,\n 'pushcount': '1',\n 'version': '1',\n 'references': [],\n 'release': '1',\n 'reboot_suggested': None,\n 'type': 'enhancement',\n 'severity': None,\n 'pkglist': [{\n 'name': MODULE_ERRATA_RPM_DATA['collection_name'],\n 'short': '0',\n 'module': {\n 'name': MODULE_ERRATA_RPM_DATA['rpm_name'],\n 'stream': MODULE_ERRATA_RPM_DATA['stream_name'],\n 'version': MODULE_ERRATA_RPM_DATA['version'],\n 'arch': MODULE_ERRATA_RPM_DATA['arch'],\n 'context': MODULE_ERRATA_RPM_DATA['context']\n },\n 'packages': []\n }]\n }\n","sub_path":"pulp_2_tests/tests/rpm/api_v2/test_modular_errata.py","file_name":"test_modular_errata.py","file_ext":"py","file_size_in_byte":12027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270658508","text":"\"\"\"Conforming Crouzeix-Raviart elements on simplices.\n\nThis element's definition appears in https://doi.org/10.1051/m2an/197307R300331\n(Crouzeix, Raviart, 1973)\n\"\"\"\n\nimport sympy\nfrom ..finite_element import CiarletElement\nfrom ..polynomials import polynomial_set\nfrom ..functionals import PointEvaluation\nfrom ..symbolic import x\n\n\nclass ConformingCrouzeixRaviart(CiarletElement):\n \"\"\"Conforming Crouzeix-Raviart finite element.\"\"\"\n\n def __init__(self, reference, order):\n if reference.vertices != reference.reference_vertices:\n raise NotImplementedError()\n assert reference.name == \"triangle\"\n\n poly = polynomial_set(reference.tdim, 1, order)\n\n poly += [\n x[0] ** i * x[1] ** (order - i) * (x[0] + x[1])\n for i in range(1, order)\n ]\n\n dofs = []\n for i, v in enumerate(reference.vertices):\n dofs.append(PointEvaluation(v, entity=(0, i)))\n if order >= 2:\n for i, edge in enumerate(reference.edges):\n for p in range(1, order):\n v = tuple(sympy.Rational((order - p) * a + p * b, order) for a, b in zip(\n reference.vertices[edge[0]], reference.vertices[edge[1]]))\n dofs.append(PointEvaluation(v, entity=(1, i)))\n for i in range(1, order):\n for j in range(1, order + 1 - i):\n point = (\n sympy.Rational(3 * i - 1, 3 * order),\n sympy.Rational(3 * j - 1, 3 * order)\n )\n dofs.append(PointEvaluation(point, entity=(2, 0)))\n\n super().__init__(reference, order, poly, dofs, reference.tdim, 1)\n\n names = [\"conforming Crouzeix-Raviart\", \"conforming CR\"]\n references = [\"triangle\"]\n min_order = 1\n continuity = \"L2\"\n","sub_path":"symfem/elements/conforming_crouzeix_raviart.py","file_name":"conforming_crouzeix_raviart.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67320093","text":"# -*- coding: utf-8 -*-\n\n\ninithooks = [\"server\", \"chan\", \"lastpubmsg\"]\nargs = [\"!asc\"]\n\ndef init(server, chan, la):\n s = la.split(\"!asc \")[1]\n ascii = []\n for char in s:\n ascii.append(\"&#\" + str(ord(char)) + \";\")\n server.privmsg(chan, \"Here's the ascii character version of the text inputted: %s \" % \"\".join(ascii))","sub_path":"modules/ascii.py","file_name":"ascii.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607999321","text":"# -*- coding:utf-8 -*-\n\n\nclass Series(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, series_data=None):\n # General properties\n self.id = None\n self.name = None\n self.description = None\n self.year_start = None\n self.year_end = None\n # Metadata\n self.genres = []\n self.authors = []\n self.producers = []\n self.directors = []\n self.genre_main = None\n # Content\n self.season_count = None\n self.movie_count = None\n self.seasons = []\n self.episodes = []\n # Load data if possible\n if series_data is not None:\n self.load(series_data)\n\n def load(self, src_data):\n \"\"\" \"\"\"\n try:\n # Load general properties\n self.id = src_data.get('id', None)\n self.name = src_data.get('series', None)\n self.description = src_data.get('description', None)\n self.year_start = src_data.get('start', 0)\n self.year_end = src_data.get('end', 0)\n # Load metadata\n _data = src_data.get('data', None)\n if _data is not None:\n self.genres = _data.get('genre', [])\n self.authors = _data.get('author', [])\n self.producers = _data.get('producer', [])\n self.directors = _data.get('director', [])\n self.genre_main = _data.get('main_genre', None)\n # Content\n self.season_count = int(src_data.get('seasons', 0))\n self.movie_count = src_data.get('movies', 0)\n except KeyError as keyerr:\n print('[x] Error while loading Series data: {0}'.format(keyerr))\n","sub_path":"bsapi/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"476030885","text":"#https://leetcode.com/problems/validate-binary-search-tree/\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def __init__(self):\n self.cache = {}\n \n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n if root:\n if root.left:\n min_, max_ = self.range(root.left)\n if max_ >= root.val:\n return False\n \n if root.right:\n min_, max_ = self.range(root.right)\n if min_ <= root.val:\n return False\n \n if root.left:\n if not self.isValidBST(root.left):\n return False\n \n if root.right:\n if not self.isValidBST(root.right):\n return False\n \n return True\n \n def range(self, root):\n if id(root) in self.cache:\n return self.cache[id(root)]\n \n min_, max_ = root.val, root.val\n \n if root.left:\n a, b = self.range(root.left)\n min_ = min(a, min_)\n max_ = max(b, max_)\n \n if root.right:\n a, b = self.range(root.right)\n min_ = min(a, min_)\n max_ = max(b, max_)\n \n if id(root) not in self.cache:\n self.cache[id(root)] = (min_, max_)\n \n return min_, max_\n","sub_path":"py/validate-binary-search-tree.py","file_name":"validate-binary-search-tree.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170712339","text":"import ast\nfrom pathlib import Path\nfrom typing import Callable\n\nwanted_name = \"Filter\"\n\n\ndef verify_no_prohibited_calls(module_path):\n # prohibited_names = {\"filter\", \"print\"}\n prohibited_names = {\"print\"}\n\n with open(module_path, \"r\") as module:\n code = module.read()\n\n tree = ast.parse(code)\n\n for node in ast.walk(tree):\n if not isinstance(node, ast.Name):\n continue\n\n assert node.id not in prohibited_names, (\n f\"`{node.id}` detected in {module_path}\"\n f\" at {node.lineno}:{node.col_offset}\"\n f\" - must not use\"\n )\n\n\ndef wants_path(pth: Path) -> bool:\n return \"lesson05\" in pth.name\n\n\ndef wants_module(module) -> bool:\n return hasattr(module, wanted_name)\n\n\ndef verify(module):\n verify_no_prohibited_calls(module.__file__)\n\n assert hasattr(module, wanted_name), f\"no `{wanted_name}` defined in {module}\"\n\n r = getattr(module, wanted_name)\n\n assert isinstance(r, Callable), f\"`{wanted_name}` is not callable\"\n\n assert r(bool, [0, 1, 2]) == [1, 2]\n assert r(bool, []) == []\n\n def kek(s):\n return \"kek\" in s\n\n assert r(kek, [\"ke\", \"kek\", \"keke\", \"ekek\", \"lul\"]) == [\"kek\", \"keke\", \"ekek\"]\n","sub_path":"lessons/05/tests/test_level4_filter.py","file_name":"test_level4_filter.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"49290690","text":"# Copyright 2014 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom collector.test.base import DbTest\n\nfrom sqlalchemy.exc import IntegrityError\n\nfrom collector.api.app import db\nfrom collector.api.db.model import ActionLog\n\n\nclass TestModelActionLog(DbTest):\n\n def test_unique_constraints(self):\n db.session.add(\n ActionLog(master_node_uid='master_node_uid', external_id=1))\n db.session.add(\n ActionLog(master_node_uid='master_node_uid', external_id=1))\n self.assertRaises(IntegrityError, db.session.flush)\n\n def test_non_nullable_fields(self):\n db.session.add(ActionLog())\n self.assertRaises(IntegrityError, db.session.flush)\n db.session.rollback()\n\n db.session.add(ActionLog(master_node_uid='master_node_uid'))\n self.assertRaises(IntegrityError, db.session.flush)\n db.session.rollback()\n","sub_path":"collector/collector/test/db/test_model_action_logs.py","file_name":"test_model_action_logs.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"518661892","text":"import numpy as np\nimport pickle\nimport wget\nimport os\nimport tarfile\n\ndef get_data(data_cfg):\n url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n os.makedirs(data_cfg[\"outdir\"])\n wget.download(url, data_cfg[\"outdir\"])\n\n tar = tarfile.open(data_cfg[\"outdir\"]+\"/cifar-10-python.tar.gz\", \"r:gz\")\n tar.extractall(path=data_cfg[\"outdir\"])\n tar.close()\n return\n\ndef unpickle(file):\n with open(file, 'rb') as f:\n data = pickle.load(f, encoding=\"latin1\")\n return data\n\n\ndef load_training_data(dataset_path):\n train_images = np.zeros([50000, 3072])\n train_labels = np.zeros([50000])\n\n start = 0\n n_images_in_a_file = 10000\n for i in range(1, 6):\n path = os.path.join(dataset_path,\"cifar-10-batches-py/data_batch_{}\".format(i))\n data_dict = unpickle(path)\n train_images[start: start + n_images_in_a_file, :] = data_dict[\"data\"]\n train_labels[start: start + n_images_in_a_file] = data_dict[\"labels\"]\n start += n_images_in_a_file\n\n return np.asarray(train_images, dtype=np.int), np.asarray(train_labels, dtype=np.int)\n\n\ndef load_test_data(dataset_path):\n\n path = os.path.join(dataset_path, \"cifar-10-batches-py/test_batch\")\n datadict = unpickle(path)\n test_images = datadict[\"data\"]\n test_labels = datadict[\"labels\"]\n return np.asarray(test_images, dtype=np.int), np.asarray(test_labels, dtype=np.int)","sub_path":"src/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"422486177","text":"\"\"\"\nTest Addresses:\n[{\"zipcode\": \"89128\", \"address\": \"7904 Verde Springs Dr\"},\n{\"zipcode\": \"33473\", \"address\": \"8691 Flowersong Cv\"},\n{\"zipcode\": \"78255\", \"address\": \"22905 Cielo Vis\"},\n{\"zipcode\": \"03076\", \"address\": \"16 Thomas Ave\"},\n{\"zipcode\": \"22314\", \"address\": \"1111 Oronoco St Unit 441\"},\n{\"zipcode\": \"55407\", \"address\": \"2718 16th Ave S\"},\n{\"zipcode\": \"95023\", \"address\": \"590 Foothill Rd\"},\n{\"zipcode\": \"81211\", \"address\": \"30737 County Road 356-6\"},\n{\"zipcode\": \"60606\", \"address\": \"333 N Canal St Apt 2901\"},\n{\"zipcode\": \"48162\", \"address\": \"3466 Erie Shore Dr\"}]\n\"\"\"\n\nimport os\nimport requests\nimport sys\nfrom os import path\nsys.path.append(os.getcwd())\n#sys.path.append('')\nfrom src.py.pg import PGWriter\nPG = PGWriter()\n\n#test_api_key = \"test_IM833EYJ7XGC4ZOGFXWU\"\n#test_api_secret = \"VcQm0uRgVlMmcX2jGa7Lc1T0ygqFsXt9\"\nHOUSECANARY_KEY = os.environ.get('HOUSECANARY_KEY')\nHOUSECANARY_SECRET = os.environ.get('HOUSECANARY_SECRET')\nURL_BASE = \"https://api.housecanary.com/v2/\"\n\n# Makes REST Request to HouseCanary\ndef do_hc_request(url, params=None):\n '''Request to HouseCanary API'''\n try:\n response = requests.get(url, params=params,\n auth=(HOUSECANARY_KEY, HOUSECANARY_SECRET))\n return response.json()\n except:\n return None\n\n# Mavu - 20170517\ndef get_housecanary_data(property_name, params):\n \"\"\"Get housecanary data\"\"\"\n hc_data = {\n 'detail' : None,\n 'census' : None,\n 'sales_history' : None,\n 'zip_details' : None,\n 'school' : None,\n 'geocode' : None\n }\n\n hc_data['detail'] = PG.get_housecanary_details(property_name)\n if hc_data['detail'] is None:\n hc_details_result = do_hc_request(URL_BASE + 'property/details', params)\n PG.save_housecanary_details(property_name, hc_details_result)\n hc_data['detail'] = PG.get_housecanary_details(property_name)\n\n hc_data['census'] = PG.get_housecanary_census(property_name)\n if hc_data['census'] is None:\n hc_census_result = do_hc_request(URL_BASE + 'property/census', params)\n PG.save_housecanary_census(property_name, hc_census_result)\n hc_data['census'] = PG.get_housecanary_census(property_name)\n\n hc_data['zip_details'] = PG.get_housecanary_zip_details(property_name)\n if hc_data['zip_details'] is None:\n hc_zip_details_result = do_hc_request(URL_BASE + 'property/details',\n params)\n PG.save_housecanary_zip_details(property_name, hc_zip_details_result)\n hc_data['zip_details'] = PG.get_housecanary_zip_details(property_name)\n\n hc_data['sales_history'] = PG.get_housecanary_sales_history(property_name)\n if len(hc_data['sales_history']) == 0:\n hc_sales_history_result = do_hc_request(\n URL_BASE + 'property/sales_history',\n params)\n PG.save_housecanary_sales_history(property_name,\n hc_sales_history_result)\n hc_data['sales_history'] = PG.get_housecanary_sales_history(\n property_name)\n\n hc_data['school'] = PG.get_housecanary_school(property_name)\n if hc_data['school'] is None:\n hc_school_result = do_hc_request(URL_BASE + 'property/school', params)\n PG.save_housecanary_school(hc_school_result, property_name)\n hc_data['school'] = PG.get_housecanary_school(property_name)\n\n hc_data['geocode'] = PG.get_housecanary_geocode(params['zipcode'])\n if hc_data['geocode'] is None:\n hc_geocode_result = do_hc_request(URL_BASE + 'property/geocode', params)\n PG.save_housecanary_geocode(hc_geocode_result)\n hc_data['geocode'] = PG.get_housecanary_geocode(params['zipcode'])\n\n return hc_data\n","sub_path":"src/py/housecanary.py","file_name":"housecanary.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"119539122","text":"#coding:utf-8\nimport numpy as np\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.signal.pool import pool_2d\n\n########## Layer(super class) ##########\nclass Layer( object ):\n def __init__( self, weight, bias, gamma, beta, Wshape=None, Bshape=None, Wini=0.01, floatX = theano.config.floatX ):\n self.weight = weight\n self.bias = bias\n self.gamma = gamma\n self.beta = beta\n\n if self.weight:\n self.W = theano.shared( np.array( Wini * np.random.standard_normal( Wshape ), dtype = floatX ) )\n self.dW = theano.shared( np.zeros( Wshape, dtype = floatX ) )\n if self.bias:\n self.b = theano.shared( np.zeros( Bshape, dtype = floatX ) )\n self.db = theano.shared( np.zeros( Bshape, dtype = floatX ) )\n\n\n\n########## Relu Layer ##########\nclass ReluLayer( Layer ):\n\n def __init__( self, weight=False, bias=False, gamma=False, beta=False ):\n Layer.__init__( self, weight, bias, gamma, beta )\n\n def output ( self, X, train_flag ):\n #return T.switch( X > 0, X, 0 )\n return T.nnet.relu(X)\n\n\n########## Softmax Layer ##########\nclass SoftmaxLayer( Layer ):\n\n def __init__( self, bias=False, weight=False, gamma=False, beta=False ):\n Layer.__init__( self, weight, bias, gamma, beta )\n\n def output ( self, X, train_flag ):\n return T.nnet.softmax( X )\n\n\n########## Convolution Layer ##########\nclass ConvLayer( Layer ):\n\n def __init__( self, Xdim, Wdim, bias=True, weight=True, gamma=False, beta=False, Heinit=True, floatX = theano.config.floatX ):\n Layer.__init__( self, weight, bias, gamma, beta, Wshape=Wdim, Bshape=Wdim[0] ,Wini=weight_init( Heinit, Xdim ) )\n self.Xshape = Xdim\n self.Wshape = Wdim\n\n\n def output( self, X, train_flag ):\n # X: Ndat x Xshape, Y: Ndat x Yshape\n Xs = ( None, self.Xshape[0], self.Xshape[1], self.Xshape[2] )\n Ws = self.Wshape\n Z = T.nnet.conv.conv2d( X, self.W, image_shape = Xs, filter_shape = Ws )\n if self.bias : Z += self.b.dimshuffle( 'x', 0, 'x', 'x' ) # 1 x nch x 1 x 1\n\n return Z\n\n\n########## Pooling Layer ##########\nclass PoolLayer( Layer ):\n\n def __init__( self, Xdim, Ydim, ds, bias=False, weight=False, gamma=False, beta=False, st = None, floatX = theano.config.floatX ):\n Layer.__init__( self, weight, bias, gamma, beta, Bshape=Xdim[0] )\n\n # parameters of the pooling layer\n self.ds = ds\n self.st = st\n self.ignore_border = False\n\n\n\n def output( self, X, train_flag ):\n Z = pool_2d(input = X, ws = self.ds, ignore_border = self.ignore_border, stride = self.st, mode = 'max')\n if self.bias : Z += self.b.dimshuffle( 'x', 0, 'x', 'x' ) # 1 x nch x 1 x 1\n\n return Z\n\n\n########## Affine Layer ##########\nclass AffineLayer( Layer ):\n\n def __init__( self, Din, Nunit, bias=True, weight=True, Heinit=True, gamma=False, beta=False, floatX = theano.config.floatX, T4toMat = False ):\n Layer.__init__( self, weight, bias, gamma, beta, Wshape=( Nunit, Din ), Bshape=Nunit, Wini=weight_init( Heinit, Din ) )\n self.T4toMat = T4toMat\n\n self.dropout = 0.5\n self.Nunit = Nunit\n #self.mask = T.shared_randomstreams.RandomStreams(0).uniform(( Nunit, )) <= self.dropout\n\n\n def output( self, X, train_flag ):\n if self.T4toMat : X = X.reshape( ( X.shape[0], -1 ) )\n Z = T.dot( X, self.W.T )\n if self.bias : Z += self.b\n\n\n mask = T.shared_randomstreams.RandomStreams(0).uniform(( self.Nunit, )) <= self.dropout\n\n if train_flag:\n return Z * mask\n else:\n return Z * self.dropout\n\n\nclass BatchNormLayer( Layer ):\n\n def __init__( self, Xdim, momentum=0.9, running_mean=None, running_var=None, bias=False, weight=False, gamma=True, beta=True, floatX = theano.config.floatX ):\n #gamma=False\n #beta=False\n Layer.__init__( self, weight, bias, gamma, beta )\n # BNa : gamma, BNb : beta\n #self.BNgamma = theano.shared( np.ones( Xdim, dtype = floatX ) )\n #self.dBNgamma = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n #self.BNbeta = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n #self.dBNbeta = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n\n self.BNmu = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n self.dBNmu = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n self.BNsig2 = theano.shared( np.ones( Xdim, dtype = floatX ) )\n self.dBNsig2 = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n self.BNeps = 0.01\n\n self.BNgamma = theano.shared( np.ones( Xdim, dtype = floatX ) )\n self.BNbeta = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n self.dBNgamma = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n self.dBNbeta = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n\n self.BNmean = theano.shared( np.zeros( Xdim, dtype = floatX ) )\n self.BNvar = theano.shared( np.ones( Xdim, dtype = floatX ) )\n\n\n def output( self, X, train_flag ):\n X_tmp = X\n\n if train_flag:\n #Z, _, _, self.BNmean, self.BNvar = T.nnet.bn.batch_normalization_train( X, gamma=self.BNgamma, beta=self.BNbeta, running_mean=self.BNmean, running_var=self.BNvar )\n BNmu = T.mean( X, axis=0 )\n X -= BNmu\n BNsig2 = T.mean( T.sqr(X), axis=0 ) + self.BNeps\n X /= T.sqrt( BNsig2 )\n self.dBNmu = BNmu\n self.dBNsig2 = BNsig2\n self.BNmu = 0.9 * self.BNmu + (1-0.9) * BNmu\n self.BNsig2 = 0.9 * self.BNsig2 + (1-0.9) * BNsig2\n\n #Z, _, _ = T.nnet.bn.batch_normalization_train( inputs=X_tmp, gamma=self.BNgamma, beta=self.BNbeta )\n #print(a)\n #Z = a\n Z = self.BNgamma * X + self.BNbeta\n\n\n else:\n Z = T.nnet.bn.batch_normalization_test( X, gamma=self.BNgamma, beta=self.BNbeta, mean=self.BNmean, var=self.BNvar )\n #Z = T.nnet.bn.batch_normalization_test( inputs=X, gamma=self.BNgamma, beta=self.BNbeta, mean=self.BNmean, var=self.BNvar )\n #Z = self.BNa * ( X - self.BNmu ) / T.sqrt( self.BNsig2 ) + self.BNb\n\n\n return Z\n\n\n\n\ndef weight_init( Heinit, Xdim ):\n return np.sqrt( 2.0 / np.prod( Xdim ) ) if Heinit == True else 0.01\n","sub_path":"source/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"250193640","text":"import sys\nimport cv2\n\ncat = cv2.imread('cat.bmp', cv2.IMREAD_GRAYSCALE)\n\nif cat is None:\n print(\"Image load failed\")\n sys.exit()\n\n\ncv2.imwrite('cat_gray.png', cat)\ncv2.namedWindow('cat') # 영상의 사이즈를 조정할 필요 없는 경우 사용필요 없다.\ncv2.imshow('cat', cat)\nkey = cv2.waitKey()\nprint(key)\n\n# cv2.destroyWindow('cat')\ncv2.destroyAllWindows()","sub_path":"ComputerVision/OpenCV/OpenCV Basic/영상 인식, 출력, 저장/img_show(2).py","file_name":"img_show(2).py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"521307704","text":"from django.contrib.auth.models import User\nfrom score import NUMBER_VERIFICATION_BONUS\nfrom django.shortcuts import render, redirect\nfrom django.core.urlresolvers import reverse_lazy\nfrom unauth_forms import ResetForgettersPasswordForm\nfrom account_kit_config_manager import account_kit_handshake\nfrom redis4 import save_careem_data, get_temp_order_data, place_order, save_order_data, retrieve_uname #log_referrer, save_number_verification_error_data\nfrom tasks import save_consumer_credentials, set_user_binding_with_twilio_notify_service, increase_user_points\nfrom redis3 import save_basic_ad_data, someone_elses_number, get_temporarily_saved_ad_data, get_user_csrf, get_user_verified_number#, get_buyer_snapshot\nfrom redis5 import get_personal_group_target_id_and_csrf, get_personal_group_anon_state, set_personal_group_mobile_num_cooloff, can_change_number\nfrom group_views import enter_personal_group\nfrom models import UserProfile\n\t\n\ndef get_requirements(request, csrf, careem=False, csrf_omitted=False):\n\tstatus = request.GET.get('status', None)\n\tauth_code = request.GET.get('code', None) #authorization code which our server may exchange for a user access token.\n\tstate = request.GET.get('state', None) #to verify that FB's servers returned with the response\n\tif careem:\n\t\treturn account_kit_handshake(csrf=request.session[\"csrf_careem\"], state=state, status=status, auth_code=auth_code, csrf_omitted=csrf_omitted)\n\telse:\n\t\treturn account_kit_handshake(csrf=csrf, state=state, status=status, auth_code=auth_code, csrf_omitted=csrf_omitted)\n\n\ndef verify_careem_applicant(request,*args,**kwargs):\n\tif 'phonenumber' in request.session:\n\t\tcar_phonenumber = request.session['phonenumber']\n\t\tcar_firstname = request.session['firstname']\n\t\tcar_lastname = request.session['lastname']\n\t\tcar_car = request.session['car']\n\t\tcar_city = request.session['city']\n\t\tcar_license = request.session['license']\n\t\tcareem_data = {'firstname':car_firstname,'lastname':car_lastname,'car':car_car,\\\n\t\t'city':car_city,'license':car_license,'phonenumber':car_phonenumber,'user_id':request.user.id}\n\t\tsaved = save_careem_data(careem_data)\n\t\trequest.session.pop('firstname',None) \n\t\trequest.session.pop('lastname',None)\n\t\trequest.session.pop('car',None)\n\t\trequest.session.pop('city',None)\n\t\trequest.session.pop('license',None)\n\t\trequest.session.pop('phonenumber',None)\n\t\trequest.session.pop('csrf_careem',None)\n\t\tif saved:\n\t\t\treturn render(request,\"careem_application_submitted.html\",{})\n\t\telse:\n\t\t\treturn render(request,\"careem_number_already_used.html\",{})\n\telse:\n\t\treturn render(request,\"404.html\",{})\n\n\n\ndef verify_forgetter_number(request,*args,**kwargs):\n\tuser_id, MN_data, err = get_requirements(request=request,csrf=None, csrf_omitted=True)\n\tif user_id and MN_data:\n\t\tmob_nums = get_user_verified_number(user_id)\n\t\tif MN_data['national_number'] in mob_nums:\n\t\t\t# prompt user to change password\n\t\t\treturn render(request,\"unauth/set_new_password.html\",{'user_id':user_id,'form':ResetForgettersPasswordForm()})\n\t\telse:\n\t\t\treturn render(request,\"unverified_number.html\",{'referrer':'login','from_ecomm':False})\n\telif user_id == 'generic' or user_id == 'used' or user_id == 'expired' or user_id == 'invalid':\n\t\treturn render(request,\"unverified_number.html\",{'referrer':'login','reason':user_id,'from_ecomm':False})\n\telif err['status'] == \"NOT_AUTHENTICATED\":\n\t\treturn render(request,\"dont_worry_just_authenticate.html\",{'csrf':user_id,'referrer':'login','type':'forgetter','from_ecomm':False})\n\telse:\n\t\treturn render(request, \"try_again.html\",{'type':'forgetter','from_ecomm':False})\n\n\n\ndef verify_user_number(request,*args,**kwargs):\n\tif request.mobile_verified:\n\t\treturn render(request,\"already_verified.html\",{})\n\telse:\n\t\tuser_id = request.user.id\n\t\tcsrf = get_user_csrf(user_id=str(user_id))\n\t\tif csrf:\n\t\t\tAK_ID, MN_data, err = get_requirements(request=request,csrf=csrf)\n\t\t\tif AK_ID and MN_data:\n\t\t\t\tif someone_elses_number(national_number=MN_data['national_number'], user_id=user_id):\n\t\t\t\t\treturn render(request,\"wrong_number.html\",{'referrer':'home','from_ecomm':False})\n\t\t\t\telse:\n\t\t\t\t\tsave_consumer_credentials.delay(AK_ID, MN_data, user_id)\n\t\t\t\t\tincrease_user_points.delay(user_id=user_id, increment=NUMBER_VERIFICATION_BONUS)\n\t\t\t\t\treturn render(request,\"verification/reward_earned.html\",{})\n\t\t\telif AK_ID == 'generic' or AK_ID == 'used' or AK_ID == 'expired' or AK_ID == 'invalid':\n\t\t\t\treturn render(request,\"unverified_number.html\",{'referrer':'home','reason':AK_ID,'from_ecomm':False})\n\t\t\telif err['status'] == \"NOT_AUTHENTICATED\":\n\t\t\t\treturn render(request,\"dont_worry_just_authenticate.html\",{'csrf':csrf,'referrer':'home','type':'user','from_ecomm':False})\n\t\t\telif err['status'] == \"PARTIALLY_AUTHENTICATED\":\n\t\t\t\treturn render(request,\"try_again.html\",{'type':'user','from_ecomm':False})\n\t\t\telse:\n\t\t\t\treturn render(request,\"unverified_number.html\",{'referrer':'home','from_ecomm':False})\n\t\telse:\n\t\t\treturn render(request,\"try_again.html\",{'type':'user','from_ecomm':False})\n\n\n# def verify_consumer_number(request,*args,**kwargs):\n# \tuser_id = request.user.id\n# \tdata = get_buyer_snapshot(user_id=str(user_id))\n# \tif data:\n# \t\tAK_ID, MN_data, err = get_requirements(request=request,csrf=data[\"csrf\"])\n# \t\tif AK_ID and MN_data:\n# \t\t\tif someone_elses_number(national_number=MN_data['national_number'], user_id=user_id):\n# \t\t\t\treturn render(request,\"wrong_number.html\",{'referrer':data[\"referrer\"],'from_ecomm':True})\n# \t\t\telse:\n# \t\t\t\tsave_consumer_credentials.delay(AK_ID, MN_data, user_id)\n# \t\t\t\tif data[\"redirect_to\"]:\n# \t\t\t\t\treturn redirect(\"show_seller_number\")\n# \t\t\t\telse:\n# \t\t\t\t\treturn redirect(\"classified_listing\")\n# \t\telif AK_ID == 'generic' or AK_ID == 'expired' or AK_ID == 'used' or AK_ID == 'invalid':\n# \t\t\treturn render(request,\"unverified_number.html\",{'referrer':'classified_listing','reason':AK_ID,'from_ecomm':True})\n# \t\telif err['status'] == \"NOT_AUTHENTICATED\":\n# \t\t\treturn render(request,\"dont_worry_just_authenticate.html\",{'csrf':data[\"csrf\"],'referrer':data[\"referrer\"],'type':'consumer','from_ecomm':True})\n# \t\telse:\n# \t\t\treturn render(request,\"unverified_number.html\",{'referrer':'classified_listing','from_ecomm':True})\n# \telse:\n# \t\treturn render(request,\"try_again.html\",{'type':'consumer','from_ecomm':True})\n\n\ndef verify_basic_item_seller_number(request,*args,**kwargs):\n\tuser_id = request.user.id\n\tCSRF = get_temporarily_saved_ad_data(user_id=str(user_id),only_csrf=True)\n\tAK_ID, MN_data, err = get_requirements(request=request, csrf=CSRF)\n\tif AK_ID and MN_data:\n\t\tif someone_elses_number(national_number=MN_data[\"national_number\"],user_id=user_id):\n\t\t\treturn render(request,\"wrong_number.html\",{'referrer':reverse_lazy(\"show_user_ads\"),'from_ecomm':True})\n\t\telse:\n\t\t\tdata = get_temporarily_saved_ad_data(user_id=str(user_id),full_ad=True)\n\t\t\tcontext={'desc':data[\"desc\"],'is_new':data[\"is_new\"], 'ask':data[\"ask\"],'is_barter':data[\"is_barter\"],'ad_id':data[\"ad_id\"],\\\n\t\t\t'seller_name':data[\"seller_name\"],'city':data[\"city\"],'AK_ID':AK_ID,'MN_data':MN_data,'user_id':user_id,'username':request.user.username,\\\n\t\t\t'town':data[\"town\"],'submission_device':data[\"submission_device\"],'on_fbs':data[\"on_fbs\"]}\n\t\t\t# register with Twilio's Notify service\n\t\t\tset_user_binding_with_twilio_notify_service.delay(user_id=user_id, phone_number=MN_data[\"number\"])\n\t\t\tsaved = save_basic_ad_data(context)\n\t\tif saved:\n\t\t\treturn render(request,\"basic_item_ad_submitted.html\",{})\n\t\telse:\n\t\t\tpass\n\telif AK_ID == 'expired' or AK_ID == 'used' or AK_ID == 'generic' or AK_ID == 'invalid':\n\t\treturn render(request,\"unverified_number.html\",{'referrer':'classified_listing','reason':AK_ID,'from_ecomm':True})\n\telif err['status'] == 'NOT_AUTHENTICATED':\n\t\treturn render(request,\"dont_worry_just_authenticate.html\",{'csrf':CSRF,'type':'seller','from_ecomm':True})\n\telif CSRF is None:\n\t\treturn render(request,\"try_again.html\",{'type':'seller','from_ecomm':True})\n\telse:\n\t\treturn render(request,\"unverified_number.html\",{'referrer':'classified_listing','from_ecomm':True})\n\n####################################################################################\n\n\ndef verify_personal_group_user(request):\n\t\"\"\"\n\tProcesses mobile verification of personal group user\n\t\"\"\"\n\tuser_id = request.user.id\n\ttid, csrf = get_personal_group_target_id_and_csrf(user_id)\n\tif csrf:\n\t\tAK_ID, MN_data, err = get_requirements(request=request,csrf=csrf)\n\t\tif AK_ID and MN_data:\n\t\t\tif someone_elses_number(national_number=MN_data['national_number'], user_id=user_id):\n\t\t\t\treturn render(request,\"wrong_number.html\",{'referrer':'personal_group','tid':tid,'from_ecomm':False})\n\t\t\telif not can_change_number(user_id):\n\t\t\t\treturn render(request,\"already_verified.html\",{'from_personal_group':True,'tid':target_id})\n\t\t\telse:\n\t\t\t\tsave_consumer_credentials.delay(AK_ID, MN_data, user_id)\n\t\t\t\tset_personal_group_mobile_num_cooloff(user_id)\n\t\t\t\town_anon_status, their_anon_status, group_id = get_personal_group_anon_state(user_id, tid)\n\t\t\t\tif their_anon_status is None:\n\t\t\t\t\treturn redirect('home')\n\t\t\t\telse:\n\t\t\t\t\treturn render(request,\"personal_group/sms_settings/personal_group_successful_mob_verification.html\",{'tid':tid,\\\n\t\t\t\t\t\t'avatar':None if their_anon_status else UserProfile.objects.filter(user_id=tid).values_list('avatar',flat=True)[0],\\\n\t\t\t\t\t\t'their_anon':their_anon_status,'name':retrieve_uname(tid,decode=True)})\n\t\telif AK_ID == 'generic' or AK_ID == 'used' or AK_ID == 'expired' or AK_ID == 'invalid':\n\t\t\treturn render(request,\"unverified_number.html\",{'referrer':'personal_group','tid':tid,'reason':AK_ID,'from_ecomm':False})\n\t\telif err['status'] == \"NOT_AUTHENTICATED\":\n\t\t\treturn render(request,\"dont_worry_just_authenticate.html\",{'csrf':csrf,'tid':tid,'type':'personal_group_chatter','from_ecomm':False})\n\t\telif err['status'] == \"PARTIALLY_AUTHENTICATED\":\n\t\t\treturn render(request,\"try_again.html\",{'type':'personal_group_chatter','from_ecomm':False,'tid':tid})\n\t\telse:\n\t\t\treturn render(request,\"unverified_number.html\",{'referrer':'personal_group','tid':tid,'from_ecomm':False})\n\telse:\n\t\treturn render(request,\"try_again.html\",{'type':'personal_group_chatter','from_ecomm':False,'tid':tid})\n\n\t\n\n####################################################################################\ndef verify_buyer_number(request):\n\t# try:\n\t# \tcsrf = request.session[\"csrf\"]\n\t# except:\n\t# \treturn redirect(\"mobile_shop\")\n\tcsrf = request.session[\"csrf\"]\t\n\tAK_ID, MN_data, err = get_requirements(request=request,csrf=csrf)\n\tif AK_ID and MN_data:\n\t\tuser_id = request.user.id\n\t\tif someone_elses_number(national_number=MN_data['national_number'], user_id=user_id):\n\t\t\t# mp.track(request.user.id, 'M_S_7.2 someelses Phone')\n\t\t\treturn render(request,\"wrong_number.html\",{'referrer':'mobile_shop'})\n\t\telse:\n\t\t\t# mp.track(request.user.id, 'M_S_7.3 Phone confirmed')\n\t\t\tsave_consumer_credentials.delay(AK_ID, MN_data, user_id)\n\t\t\torder_data = get_temp_order_data(user_id)\n\t\t\torder_data['phonenumber']=MN_data['number']\n\t\t\tmerch_id = order_data['merch_id']\n\t\t\tsaved = save_order_data(order_data)\n\t\t\tif saved:\n\t\t\t\t# mp.track(request.user.id, 'M_S_7 On confirm order')\n\t\t\t\treturn redirect(\"confirm_order\")#,{'merch_id':merch_id})\n\t\t\telse:\n\t\t\t\t# mp.track(request.user.id, 'M_S_E phone verification 2')\n\t\t\t\treturn render(request,\"404.html\",{})\n\telse:\n\t\t# mp.track(request.user.id, 'M_S_7.4 canceled phone verification')\n\t\treturn render(request,\"dont_worry_just_authenticate.html\",{'csrf':csrf,'type':'mobile_buyer','from_ecomm':False})\n\n\t\t####replace with page reminding importance of number verification\n\n\t# next step: now check if this is a uniquely new number, or is this someone else's number (use func someone_elses_number from redis3)\n\t# next step: now that you're sure the number hasn't been used before, use save_consumer_number (redis3) to save the number to our DB\n\t# next step: render the template that helps use complete the order\n\n\t# else:\n\t# \tprint \"data not received\"\n\t# \treturn render(request,\"careem_number_already_used.html\",{})\n\n#\t\t\tif someone_elses_number(national_number=MN_data['national_number'], user_id=user_id):\n#\t\t\t\treturn render(request,\"wrong_number.html\",{'referrer':data[\"referrer\"]})\n#\t\t\telse:\n#\t\t\tsave_consumer_credentials.delay(AK_ID, MN_data, user_id)\n#\t\t\tif data[\"redirect_to\"]:\n\t# \t\t\treturn redirect(\"show_seller_number\")\n\t# \t\telse:\n\t# \t\t\treturn redirect(\"classified_listing\")\n\t# \telif err['status'] == \"NOT_AUTHENTICATED\":\n\t# \t\treturn render(request,\"dont_worry_just_authenticate.html\",{'csrf':data[\"csrf\"],'referrer':data[\"referrer\"],'type':'consumer'})\n\t# \telse:\n\t# \t\tsave_number_verification_error_data(user_id=user_id, err_data=err, err_type='1', on_fbs=request.META.get('HTTP_X_IORG_FBS',False), is_auth=request.user.is_authenticated(),which_flow='consumer')\n\t# \t\treturn render(request,\"unverified_number.html\",{})\n\t# else:\n\t# \treturn render(request,\"try_again.html\",{'type':'consumer'})\n\n\n#\treturn render(request,\"buyer_verification.html\")\n\n\n\n\n","sub_path":"links/number_verification.py","file_name":"number_verification.py","file_ext":"py","file_size_in_byte":12840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"408417753","text":"def rob(self, root: TreeNode) -> int:\n def robTree(node):\n \"\"\"\n now: max money earned if input node is robbed\n later: max money earned if input node is not robbed\n \"\"\"\n if not node:\n return 0, 0\n\n left, right = robTree(node.left), robTree(node.right)\n\n now = node.val + left[1] + right[1]\n\n later = max(left) + max(right)\n\n return now, later\n\n res = robTree(root)\n\n return max(res)\n","sub_path":"algo/dynamic/robbery/tree/x.py","file_name":"x.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"36585716","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom .models import Customer, Order, ProductsInOrder\nfrom products.models import Product\nfrom .forms import CustomerLoginForm, CustomerRegisterForm\n\n\ndef login_view(request):\n form = CustomerLoginForm(request.POST or None)\n next_ = request.GET.get('next')\n print(next_)\n\n if form.is_valid():\n data = form.cleaned_data\n\n email = data['email']\n password = data['password']\n\n user = User.objects.get(email=email)\n username = user.username\n\n user = authenticate(username=username, password=password)\n\n login(request, user=user)\n next_post = request.POST.get('next')\n redirect_path = next_ or next_post or 'home'\n\n request.session['cart'] = {}\n\n return redirect(redirect_path)\n\n context = {'form': form}\n\n return render(request, 'customers/registration/login.html', context)\n\n\n@login_required(login_url='login')\ndef logout_view(request):\n if 'cart' in request.session:\n del request.session['cart']\n\n logout(request)\n return redirect('home')\n\n\ndef signup_view(request):\n if request.method == 'POST':\n register_form = CustomerRegisterForm(request.POST)\n\n if register_form.is_valid():\n register_form.save()\n return redirect('login')\n else:\n register_form = CustomerRegisterForm()\n\n context = {'form': register_form}\n\n return render(request, 'customers/registration/signup.html', context)\n\n\n@login_required(login_url='login')\ndef order_view(request):\n if request.method == 'POST':\n customer_pk = request.user.customer.pk\n customer = Customer.objects.get(pk=customer_pk)\n\n cart = request.session['cart']\n\n if len(cart) > 0:\n order = Order.objects.create(customer=customer)\n\n for key, value in cart.items():\n product = Product.objects.get(pk=key)\n quantity = value['quantity']\n ProductsInOrder.objects.create(order=order,\n product=product,\n quantity=quantity,\n )\n request.session['cart'] = {}\n request.session.modified = True\n\n messages.success(request,\n f\"Спасибо, {customer}! Ваш заказ оформлен.\")\n\n return redirect('cart')\n","sub_path":"diploma_store/customers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"605189707","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 18 16:02:44 2020\r\nError handling for student grade assignment\r\n@author: Ashish\r\n\"\"\"\r\ndata_valid = False\r\ngrade1 = float(input(\"Type the grade of the first test: \"))\r\ngrade2 = float(input(\"Type the grade of the second test: \"))\r\nabsences = int(input(\"Type the number of absences: \"))\r\ntotal_classes = int(input(\"Type the total number of classes: \"))\r\n\r\n# error handline\r\n# check if t\r\navg_grade = (grade1+grade2)/2\r\nattendance = (total_classes-absences)/total_classes\r\n\r\nprint(\"Average grade: \", round(avg_grade,2))\r\nprint(\"Attendance Rate: \", str(round(attendance*100,2))+\"%\")\r\n\r\nif avg_grade>=6 and attendance >=0.8:\r\n print(\"The student is approved\")\r\nelif avg_grade<6 and attendance <0.8:\r\n print(\"The student has failed due to an avg grade lower than 0.6 and attandance lower than 0.8\")\r\nelif attendance >=0.8:\r\n print(\"The student has failed due to an average grade lower than 6.0\")\r\nelse:\r\n print(\"The student has failed due to attendance rate lower than 80%\")\r\n","sub_path":"scripts/fundamentals/err_handling_student_grade_assigner.py","file_name":"err_handling_student_grade_assigner.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381376251","text":"import ssl\nimport socket\nimport json\n\n\ndef send_msg(msg, port):\n message = json.dumps(msg)\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # ssl_context = ssl.create_default_context()\n # ssl_sock = ssl_context.wrap_socket(sock)\n # ssl_sock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, ciphers=\"ADH-AES256-SHA\")\n try:\n key_file_path = './certificates/CA/client-key.pem'\n cert_file_path = './certificates/CA/client_cert.pem'\n ssl_sock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, ciphers=\"TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256\", keyfile=key_file_path, certfile=cert_file_path)\n except:\n ssl_sock = sock\n ssl_sock.connect(('127.0.0.1', port))\n except:\n print(\"User Could not make a connection to the server at localhost:\" + str(port))\n return\n\n ssl_sock.sendall(str.encode(message))\n reply_message = ssl_sock.recv(4096)\n reply_message = str(reply_message.decode(\"utf-8\"))\n\n reply_message = json.loads(reply_message)\n ssl_sock.close()\n return reply_message\n","sub_path":"Connection/client_connection.py","file_name":"client_connection.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226312828","text":"#!/usr/bin/env python3\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport sys\n\n\nif __name__ == '__main__':\n assert len(sys.argv) == 4\n\n cov_df = pd.read_table(sys.argv[1], index_col=False, header=None)\n cov = cov_df.iloc[:, 2]\n\n bin_len = int(sys.argv[2])\n x = list(range(0, len(cov), bin_len))\n plt.plot(x, [cov[i:i + bin_len].mean() for i in x]) #, bin_len)\n plt.xlabel('Position')\n plt.ylabel('Coverage')\n plt.gca().set_xlim(left=0)\n plt.gca().set_ylim(bottom=0)\n plt.savefig(sys.argv[3])\n\n print('Mean coverage: {:.3f}'.format(cov.mean()))\n print('{:.2f}% of genome is covered'.format((cov > 0).sum() / len(cov) * 100))\n","sub_path":"4/main/cov_stats.py","file_name":"cov_stats.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316584877","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 15 12:40:38 2020\r\n\r\nAsks user for number between 0 and 100\r\nProgram then uses bisection search to guess their number\r\nProgram has an infinite number of tries\r\n\r\n@author: Jun\r\n\"\"\"\r\nlow = 0\r\nhigh = 100\r\nans = round((high + low)/2)\r\nprint(\"Please think of a number between 0 and 100!\")\r\nuser_input = ''\r\nwhile user_input != 'c': \r\n if user_input == 'h':\r\n high = ans\r\n ans = int((high + low)/2)\r\n elif user_input == 'l':\r\n low = ans\r\n ans = int((high + low)/2)\r\n print(\"Is your secret number\", str(ans), \"?\")\r\n user_input = input(\"Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly.\")\r\n if user_input not in ['h', 'l', 'c']:\r\n print(\"Sorry, I did not understand your input\")\r\nprint(\"Game over. Your secret number was:\", str(ans))","sub_path":"MIT edX Lecture Exercises/NumberGuessingGame.py","file_name":"NumberGuessingGame.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429400493","text":"print(\"********** Upper and Lower Case **********\\n\\n\\n\")\ndef up_low(args):\n up=0\n low=0\n for i in args:\n if i.islower():\n low+=1\n elif i.isupper():\n up+=1\n else:\n pass\n return print(f\"There are {up} upper case and {low} lower case letters.\")\nwhile True:\n data=input(\"Please enter the word or phrase you would like to find how many lower and uppercase letters it contains.\\n\")\n up_low(data)\n while True:\n q=input(\"\"\"Would you like to quit? If yes please press \"q\", if no please press \"ENTER\".\n\"\"\")\n if q.lower()==\"q\":\n print(\"Program terminated.\")\n quit()\n elif not q:\n break\n else:\n print(\"You made an incorrect entry. Please check and read instructions!!!\")\n \n \n","sub_path":"Homework 9 - Upper and Lower Case.py","file_name":"Homework 9 - Upper and Lower Case.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"412494717","text":"import random\nimport select\nimport pygame\nimport time\nimport os\nimport json\nimport math\nimport sys\nfrom snack_class_final import *\nfrom snack_utils import *\nfrom buff_class import *\nfrom chat_utils import *\nfrom camera import *\nfrom pygame.locals import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\n\nclass Racing:\n\n def __init__(self, s, display=(800, 600), max_distance=50):\n\n self.camera = Camera()\n self.display = display\n self.max_distance = max_distance\n self.object_speed = 2\n self.camera.speed = 0.2\n self.s = s\n\n pygame.init()\n\n pygame.display.set_mode(self.display, DOUBLEBUF | OPENGL) # | FULLSCREEN)\n\n gluPerspective(45, (self.display[0] / self.display[1]), 0, self.max_distance)\n\n glTranslatef(0, 0, self.camera.z)\n\n # Colors\n self.white = (255, 255, 255)\n self.black = (0, 0, 0)\n self.red = (255, 0, 0)\n self.light_red = (255, 153, 153)\n self.yellow = (230, 230, 0)\n self.light_yellow = (255, 255, 153)\n self.green = (0, 255, 0)\n self.light_green = (153, 255, 153)\n self.royal_blue = (65, 105, 225)\n self.light_black = (105, 105, 105)\n\n # Fonts\n self.small_font = pygame.font.SysFont(\"comicsansms\", 25)\n self.med_font = pygame.font.SysFont(\"comicsansms\", 50)\n self.large_font = pygame.font.SysFont(\"comicsansms\", 80)\n\n self.vertices = (\n (1, -1, -1),\n (1, 1, -1),\n (-1, 1, -1),\n (-1, -1, -1),\n (1, -1, 1),\n (1, 1, 1),\n (-1, -1, 1),\n (-1, 1, 1),\n )\n\n self.edges = (\n (0, 1),\n (0, 3),\n (0, 4),\n (2, 1),\n (2, 3),\n (2, 7),\n (6, 3),\n (6, 4),\n (6, 7),\n (5, 1),\n (5, 4),\n (5, 7),\n )\n\n self.surfaces = (\n (0, 1, 2, 3),\n (3, 2, 7, 6),\n (6, 7, 5, 4),\n (4, 5, 1, 0),\n (1, 5, 7, 2),\n (4, 0, 3, 6),\n )\n\n self.colors = (\n (1, 0, 0),\n (0, 1, 0),\n (0, 0, 1),\n (0, 1, 0),\n (1, 1, 1),\n (0, 1, 1),\n (1, 0, 0),\n (0, 1, 0),\n (0, 0, 1),\n (1, 0, 0),\n (1, 1, 1),\n (0, 1, 1),\n )\n\n self.ground_vertices = (\n (-10, -0.1, 20),\n (10, -0.1, 20),\n (-10, -0.1, -300),\n (-10, -0.1, -300),\n )\n\n # def ground(self):\n # glBegin(GL_QUADS)\n # for vertex in ground_vertices:\n # glColor3fv((0, 0.5, 0.5))\n # glVertex3fv(vertex)\n #\n # glEnd()\n\n def message_to_whole_screen(self, msg, color, y_displacement=0, size='small'):\n text_surface, text_rec = self.text_object(msg, color, size)\n text_rec.center = self.display[0] / 2, self.display[1] / 2 + y_displacement\n self.gameDisplay.blit(text_surface, text_rec)\n\n def text_object(self, text, color, size):\n if size == \"small\":\n text_surface = self.small_font.render(text, True, color)\n elif size == \"medium\":\n text_surface = self.med_font.render(text, True, color)\n elif size == \"large\":\n text_surface = self.large_font.render(text, True, color)\n else:\n pass\n\n return text_surface, text_surface.get_rect()\n\n def cube(self, vertices):\n glBegin(GL_QUADS)\n\n for surface in self.surfaces:\n x = 0\n for vertex in surface:\n x += 1\n glColor3fv(self.colors[x])\n glVertex3fv(vertices[vertex])\n glEnd()\n\n def set_vertices(self, max_distance, min_distance=-20, camera_x=0, camera_y=0):\n\n camera_x = -1 * int(camera_x)\n camera_y = -1 * int(camera_y)\n\n x_change = random.randrange(camera_x - 10, camera_x + 10)\n y_change = random.randrange(camera_y - 10, camera_y + 10) # -1\n z_change = random.randrange(-1 * max_distance, min_distance)\n\n new_vertices = []\n\n for vert in self.vertices:\n new_vert = list()\n\n new_vert.append(vert[0] + x_change)\n new_vert.append(vert[1] + y_change)\n new_vert.append(vert[2] + z_change)\n\n new_vertices.append(new_vert)\n return new_vertices\n\n def game_over(self):\n\n game_over = True\n\n while game_over:\n # self.message_to_whole_screen(\"Game over\",\n # self.red,\n # y_displacement=-90,\n # size=\"large\")\n # self.message_to_whole_screen(\"Distance: \" + str(self.camera.z),\n # self.green,\n # y_displacement=10,\n # size=\"medium\")\n # self.message_to_whole_screen(\"Press C to play again or Q to quit.\",\n # self.black,\n # y_displacement=90,\n # size=\"small\")\n # pygame.display.update()\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n game_over = False\n to_send = {\"action\": \"update\", \"continue\": False}\n mysend(self.s, json.dumps(to_send))\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_c:\n to_send = {\"action\": \"update\", \"continue\": True}\n mysend(self.s, json.dumps(to_send))\n self.__init__(self.s, self.me)\n self.game_loop()\n\n replay = \"waiting\"\n\n read, write, error = select.select([self.s], [], [], 0)\n if self.s in read:\n raw_recv = myrecv(self.s)\n if raw_recv:\n recv = json.loads(raw_recv)\n replay = recv[\"continue\"]\n\n if replay is False:\n game_over = False\n if replay is True:\n self.game_loop()\n\n def game_loop(self):\n main_loop = True\n x_move = 0\n y_move = 0\n\n self.camera.x = 0\n self.camera.y = 0\n\n cube_dict = {}\n\n for cube_idx in range(40):\n cube_dict[cube_idx] = self.set_vertices(self.max_distance)\n\n while main_loop:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n x_move = self.camera.speed\n if event.key == pygame.K_RIGHT:\n x_move = - self.camera.speed\n if event.key == pygame.K_UP:\n y_move = - self.camera.speed\n if event.key == pygame.K_DOWN:\n y_move = self.camera.speed\n if event.key == pygame.K_ESCAPE:\n pygame.display.quit()\n pygame.display.init()\n pygame.display.set_mode(self.display, DOUBLEBUF | OPENGL)\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n x_move = 0\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n y_move = 0\n\n # if event.type == pygame.MOUSEBUTTONDOWN:\n # if event.button == 4:\n # glTranslatef(0, 0, 1.0)\n # if event.button == 5:\n # glTranslatef(0, 0, -1.0)\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n self.camera.x += x_move\n self.camera.y += y_move\n self.camera.z += self.object_speed\n glTranslatef(x_move, y_move, self.object_speed)\n\n x = glGetDoublev(GL_MODELVIEW_MATRIX)\n\n camera_x = x[3][0]\n camera_y = x[3][1]\n camera_z = x[3][2]\n\n # ground()\n\n for cube_idx in cube_dict:\n self.cube(cube_dict[cube_idx])\n\n for cube_idx in cube_dict:\n if camera_z <= cube_dict[cube_idx][0][2] and \\\n cube_dict[cube_idx][0][0] - 2 <= camera_x <= cube_dict[cube_idx][0][0] and \\\n cube_dict[cube_idx][0][1] <= camera_y <= cube_dict[cube_idx][0][1] + 2:\n self.camera.life -= 10\n if camera_z <= cube_dict[cube_idx][0][2]:\n new_max = int(self.max_distance * 2 - camera_z)\n cube_dict[cube_idx] = self.set_vertices(new_max, int(camera_z - self.max_distance),\n self.camera.x, self.camera.y)\n\n if self.camera.life <= 0:\n self.game_over()\n main_loop = False\n\n pygame.display.flip()\n\n pygame.quit()\n","sub_path":"racing.py","file_name":"racing.py","file_ext":"py","file_size_in_byte":9361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"40156984","text":"\"\"\"\nSplit String - Maximum Value\nThe program accept a string S containing only 0s and 1s as the input. The program must split the string S into two substring values so that the concatenation of the number of 0s in the left substring and the number of 1s in the right substring is maximum. Then the program must print those two substring values of S as the output.\n\nBoundary Condition(s):\n1 <= Length of S <= 100\n\nInput Format:\nThe first line contains S.\n\nOutput Format:\nThe first line contains the two substring values of S as per the given condition.\n\nExample Input/Output 1:\nInput:\n001110111\n\nOutput:\n001110 111\n\nExplanation:\nHere S = 001110111.\nAll possible ways to split the string S are given below.\n0 01110111 -> 16\n00 1110111 -> 26\n001 110111 -> 25\n0011 10111 -> 24\n00111 0111 -> 23\n001110 111 -> 33\n0011101 11 -> 32\n00111011 1 -> 31\nThe maximum value is 33 (3 zeroes in 001110 & 3 ones in 111).\nHence the output is\n001110 111\n\nExample Input/Output 2:\nInput:\n101010\n\nOutput:\n1010 10\n\n\"\"\"\n\nstring=input().strip()\ntemp=0\nele=0\nif(string=='10'):\n print(1,0)\n quit()\nfor index in range(1,len(string)):\n if int(str(string[:index].count('0'))+str(string[index:].count('1')))>temp:\n temp=int(str(string[:index].count('0'))+str(string[index:].count('1')))\n ele=index\nprint(string[:ele],string[ele:])","sub_path":"Python Programs/Split String - Maximum Value.py","file_name":"Split String - Maximum Value.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245906588","text":"# 124 나라의 숫자\n# 문제 설명\n# 124 나라가 있습니다. 124 나라에서는 10진법이 아닌 다음과 같은 자신들만의 규칙으로 수를 표현합니다.\n\n# 124 나라에는 자연수만 존재합니다.\n# 124 나라에는 모든 수를 표현할 때 1, 2, 4만 사용합니다.\n\ndef solution(n):\n answer = ''\n while(n>0):\n mod = n%3\n n//=3\n if mod is 0:\n n-=1\n answer = '412'[mod] + answer\n return answer\n\nsolution(5)","sub_path":"programmers/level_2/124world.py","file_name":"124world.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"105670949","text":"\r\n\r\n \r\nmat1= []\r\nmat2 = []\r\nr = int(input(\"enter\"))\r\nc = int(input(\"enter Second\" ))\r\n\r\nprint(\"Enter elements for mat1\")\r\n\r\nfor i in range(0, r):\r\n elements = []\r\n for j in range(0, c):\r\n elements.append(int(input(\"enter the element\")))\r\n mat1.append(elements)\r\nprint(\"enter elements for mat2\")\r\nfor i in range(0, r):\r\n elements=[]\r\n for j in range(0, c):\r\n elements.append(int(input(\"enter the element\")))\r\n mat2.append(elements)\r\nsum = []\r\ndif = []\r\nfor i in range(0,r):\r\n sumel= []\r\n difel= []\r\n for j in range(0,c):\r\n sumel.append(mat1[i][j] + mat2[i][j])\r\n difel.append(mat1[i][j] - mat2[i][j])\r\n sum.append(sumel)\r\n dif.append(difel)\r\nprint(sum)\r\nprint(dif)\r\n \r\n","sub_path":"adding and subtracting list.py","file_name":"adding and subtracting list.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537318393","text":"n = int(input())\ne = 9\ni = 1\ncounter = 0\nwhile n > 0:\n\t#print(\"{0}, {1}, {2}\".format(n, e, i))\n\tif n - e * i >= 0:\n\t\tn -= e * i\n\t\tcounter += e\n\t\te *= 10\n\t\ti += 1\n\telse:\n\t\tbreak\ncounter += int(n / i)\nif n % i == 0:\n\tprint(str(counter)[i - 1])\nelse:\n\tprint(str(counter + 1)[n % i - 1])\n\n\n","sub_path":"Se.py","file_name":"Se.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"533171094","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport rospy\nimport rospkg\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import Empty\nfrom asv_msgs.msg import StateArray\nfrom visualization_msgs.msg import Marker\nfrom scipy.signal import savgol_filter as sgf\nimport os\nimport time\nimport sys\n\nclass Referee(object) :\n\n def __init__(self, dt=0.2, finished=0,\n output='/home/adrien/catkin/src/seaowl/asv_common/output',\n op='0') :\n\n self.debugBool = False #if True prints the trajectory (t,x,y) of asv in debug.txt\n self.rvizBool = False\n self.tMaxSim = rospy.get_param(\"~tMaxSim\",600.)\n\n self.begin_wall = 0.\n self.begin_sim = 0.\n self.start = False\n self.rate = 1/dt\n\n self.odom = []\n self.n_obst = -1\n self.obst_prior = []\n self.obst_radii = []\n self.obst_states = []\n self.dcpa = []\n self.tcpa = []\n self.cross = []\n self.security = [] #indicateurs de sécurité pour chaque obstacle\n self.t0 = 4. #temps de sécurité en s\n self.d0 = 1/100 #distance minimale\n self.t1 = 10 #temps de manoeuvre\n self.d1 = 500 #distance de manoeuvre\n self.tth = 75 #temps théorique de manoeuvre\n self.r_offset = 5. #offset pour COLREG\n self.size = 8. #asv size radius\n self.output = output\n self.opus = op\n self.finished = finished #0,2 : no shutdown at the end; 1,3 : no shutdown at the end; 0,1 : program running; 2,3 : prgrm ended\n self.side = []\n if self.debugBool:\n self.debug = open(f'/home/adrien/catkin_ws/src/seaowl/asv_common/debug.txt','w')\n self.traj = []\n self.nend = True\n\n if self.rvizBool:\n self.cpa = Marker()\n self.cpa.header.frame_id = \"map\"\n self.cpa.header.stamp = rospy.get_rostime()\n self.cpa.ns = \"cpa\"\n self.cpa.id = 0\n self.cpa.type = 1\n self.cpa.action = 0\n self.cpa.pose.position.x = 0\n self.cpa.pose.position.y = 0\n self.cpa.pose.position.z = 10\n self.cpa.pose.orientation.x = 0\n self.cpa.pose.orientation.y = 0\n self.cpa.pose.orientation.z = 0\n self.cpa.pose.orientation.w = 1.0\n self.cpa.scale.x = 1.0\n self.cpa.scale.y = 1.0\n self.cpa.scale.z = 1.0\n self.cpa.color.r = 1.0\n self.cpa.color.g = 0\n self.cpa.color.b = 0.\n self.cpa.color.a = 1.0\n self.cpa.lifetime = rospy.Duration(0.)\n self.cpa_publisher = rospy.Publisher(\"cpa\", Marker, queue_size=10, latch=True)\n\n self.cpa2 = Marker()\n self.cpa2.header.frame_id = \"map\"\n self.cpa2.header.stamp = rospy.get_rostime()\n self.cpa2.ns = \"cpa2\"\n self.cpa2.id = 1\n self.cpa2.type = 1\n self.cpa2.action = 0\n self.cpa2.pose.position.x = 0\n self.cpa2.pose.position.y = 0\n self.cpa2.pose.position.z = 10\n self.cpa2.pose.orientation.x = 0\n self.cpa2.pose.orientation.y = 0\n self.cpa2.pose.orientation.z = 0\n self.cpa2.pose.orientation.w = 1.0\n self.cpa2.scale.x = 1.0\n self.cpa2.scale.y = 1.0\n self.cpa2.scale.z = 1.0\n self.cpa2.color.r = 1.0\n self.cpa2.color.g = 0\n self.cpa2.color.b = 0.\n self.cpa2.color.a = 1.0\n self.cpa2.lifetime = rospy.Duration(0.)\n self.cpa2_publisher = rospy.Publisher(\"cpa2\", Marker, queue_size=10, latch=True)\n\n self.asv_off_marker = Marker()\n self.asv_off_marker.header.frame_id = \"map\"\n self.asv_off_marker.header.stamp = rospy.get_rostime()\n self.asv_off_marker.ns = \"asv_off_marker\"\n self.asv_off_marker.id = 2\n self.asv_off_marker.type = 2\n self.asv_off_marker.action = 0\n self.asv_off_marker.pose.position.x = 0.\n self.asv_off_marker.pose.position.y = 0.\n self.asv_off_marker.pose.position.z = 0\n self.asv_off_marker.pose.orientation.x = 0\n self.asv_off_marker.pose.orientation.y = 0\n self.asv_off_marker.pose.orientation.z = 0\n self.asv_off_marker.pose.orientation.w = 1.\n self.asv_off_marker.scale.x = 1.0\n self.asv_off_marker.scale.y = 1.0\n self.asv_off_marker.scale.z = 1.0\n self.asv_off_marker.color.r = 0.5\n self.asv_off_marker.color.g = 0.0\n self.asv_off_marker.color.b = 0.5\n self.asv_off_marker.color.a = 1.0\n self.asv_off_marker.lifetime = rospy.Duration(0.)\n self.asv_off_publisher = rospy.Publisher(\"asv_off\", Marker, queue_size=10, latch=True)\n\n self.obst_off_marker = Marker()\n self.obst_off_marker.header.frame_id = \"map\"\n self.obst_off_marker.header.stamp = rospy.get_rostime()\n self.obst_off_marker.ns = \"obst_off_marker\"\n self.obst_off_marker.id = 3\n self.obst_off_marker.type = 2\n self.obst_off_marker.action = 0\n self.obst_off_marker.pose.position.x = 0\n self.obst_off_marker.pose.position.y = 0\n self.obst_off_marker.pose.position.z = 0\n self.obst_off_marker.pose.orientation.x = 0\n self.obst_off_marker.pose.orientation.y = 0\n self.obst_off_marker.pose.orientation.z = 0\n self.obst_off_marker.pose.orientation.w = 1.0\n self.obst_off_marker.scale.x = 1.0\n self.obst_off_marker.scale.y = 1.0\n self.obst_off_marker.scale.z = 1.0\n self.obst_off_marker.color.r = 0.5\n self.obst_off_marker.color.g = 0.0\n self.obst_off_marker.color.b = 0.5\n self.obst_off_marker.color.a = 1.0\n self.obst_off_marker.lifetime = rospy.Duration(0.)\n self.obst_off_publisher = rospy.Publisher(\"obst_off\", Marker, queue_size=10, latch=True)\n\n self.start_publisher = rospy.Publisher(\"start_simulation\", Empty, queue_size=1, latch=True)\n self.asv_off_publisher = rospy.Publisher(\"asv_off\", Marker, queue_size=10, latch=True)\n self.obst_off_publisher = rospy.Publisher(\"obst_off\", Marker, queue_size=10, latch=True)\n self.cpa_publisher = rospy.Publisher(\"cpa\", Marker, queue_size=10, latch=True)\n self.cpa2_publisher = rospy.Publisher(\"cpa2\", Marker, queue_size=10, latch=True)\n self._finish_publisher = rospy.Publisher(\"end_simulation\",Empty, queue_size = 1, latch = True)\n\n self._odom_subscriber = rospy.Subscriber(\"state\", Odometry,\n self._odom_callback,\n queue_size=1)\n self._obst_subscriber = rospy.Subscriber(\"obStatesRef\",\n StateArray, self._obst_callback,\n queue_size=1)\n self._finish_subscriber = rospy.Subscriber(\"end_simulation\",\n Empty, self._finish_callback,\n queue_size=1)\n self._start_subscriber = rospy.Subscriber(\"start_simulation\", Empty,\n self._start_callback,\n queue_size=10)\n\n def _odom_callback(self, data):\n if self.nend:\n if len(self.odom) == 0:\n self.odom = np.zeros(5)\n t = rospy.get_time()-self.begin_sim\n self.odom[0] = data.pose.pose.position.x\n self.odom[1] = data.pose.pose.position.y\n self.odom[4] = t\n #print(f'init pose: {self.odom[0:2]}')\n else:\n t = rospy.get_time()-self.begin_sim\n x = self.odom[0]\n y = self.odom[1]\n vx = self.odom[2]\n vy = self.odom[3]\n self.odom[0] = data.pose.pose.position.x\n self.odom[1] = data.pose.pose.position.y\n self.odom[2] = (self.odom[0]-x)/(t-self.odom[4])\n self.odom[3] = (self.odom[1]-y)/(t-self.odom[4])\n self.odom[4] = t\n #print([t,self.odom[0],self.odom[1]])\n self.traj.append(np.array([t,self.odom[0],self.odom[1]]))\n if self.debugBool:\n self.debug.write(f'{t}\\t{self.odom[0]}\\t{self.odom[1]}\\n')\n #print(f'add {t}')\n #print(f'uAsv = {np.linalg.norm(np.array([data.twist.twist.linear.x,data.twist.twist.linear.y]))}')\n def _obst_callback(self, data):\n if self.nend:\n if (self.n_obst == -1) :\n self.n_obst = len(data.states)\n self.dcpa = np.ones(self.n_obst)*sys.float_info.max\n self.tcpa = np.zeros((self.n_obst,2))\n self.cross = np.array(self.n_obst*[np.infty])\n self.obst_states = np.zeros((self.n_obst, 4))\n self.obst_radii = np.zeros(self.n_obst)\n self.security = np.zeros((self.n_obst,15))\n self.side = np.zeros(self.n_obst)\n self.obst_prior = np.array(self.n_obst*[\"\"])\n for i in range(self.n_obst):\n self.obst_radii[i] = data.states[i].header.radius\n self.obst_prior[i] = data.states[i].header.prior\n for j in range(1,4):\n self.security[i,j] = -np.inf\n\n for i in range(self.n_obst) :\n self.obst_states[i, 0] = data.states[i].x\n self.obst_states[i, 1] = data.states[i].y\n self.obst_states[i, 2] = data.states[i].u*np.cos(data.states[i].psi)\n self.obst_states[i, 3] = data.states[i].u*np.sin(data.states[i].psi)\n\n def _start_callback(self, data):\n if (not self.start):\n self.start = True\n self.begin_sim = rospy.Time.to_sec(rospy.Time.now())\n self.begin_wall = time.time()\n print(\"---------------------BEGINNING OF THE SIMULATION---------------------\")\n\n def _finish_callback(self, data) :\n self.nend = False\n if self.debugBool:\n self.debug.close()\n \n t = rospy.get_time()-self.begin_sim\n \n for i in range(self.n_obst):\n self.traj = np.array(self.traj)\n w = 11\n d = 2\n N = int(self.tcpa[i,1])\n pos = self.traj[:N,1:]\n vel = np.zeros((N,2))\n acc = np.zeros((N,2))\n for k in range(2): #coordinates\n pos[:,k] = sgf(pos[:,k],w,d, mode='nearest')\n vel[:,k] = np.gradient(pos[:,k], self.traj[:N,0])\n vel[:,k] = sgf(vel[:,k],w,d-1, mode='nearest')\n acc[:,k] = np.gradient(vel[:,k], self.traj[:N,0])\n acc[:,k] = sgf(acc[:,k],w,d-2, mode='nearest')\n v = np.linalg.norm(vel,axis = 1)\n irr = np.zeros((N,3)) #irregularity indicator\n irr[:,0] = np.linalg.norm(acc,axis = 1) #acceleration\n irr[:,1] = (vel[:,0]*acc[:,1]-vel[:,1]*acc[:,0])/v**2 #angular velocity\n irr[:,2] = irr[:,1]/v #curvature radius\n weight = att((np.abs(self.tcpa[i,0] -self.traj[:N,0]))/self.t1,3)\n weight = np.sqrt(weight/np.sum(weight))\n for k in range(3):\n self.security[i][4+k] = np.linalg.norm(irr[:,k]*weight)\n self.security[i,7+k] = self.security[i,1]+max(0,self.security[i,1])*self.security[i][4+k]\n self.security[i,0] = t\n self.security[i,10] = self.dcpa[i]\n self.security[i,11] = self.cross[i]\n weight = irr[:,0]**2/np.sum(irr[:,0]**2)\n self.security[i,12] = np.dot(weight,(np.abs(self.tcpa[i,0] -self.traj[:N,0])/self.t1))\n self.security[i,13] = 1/self.security[i,2]+max(1/self.security[i,2]-1,0)*self.security[i,12]\n\n f = open(f'{self.output}','a')\n print(\"---------------------END OF THE SIMULATION---------------------\")\n print(f'Duration of the simulation (real time) : {time.time() -self.begin_wall} s')\n print(f'Duration of the simulation: {t} s')\n print(f'Number of ships : {self.n_obst}')\n print(f'Opus : {self.opus}')\n for i in range(self.n_obst) :\n print(f'Ship {i+1}')\n print(f' --> dCPA = {self.dcpa[i]} m')\n print(f' --> tCPA = {self.tcpa[i,0]} s')\n print(f' --> security = {self.security[i]}')\n\n if (os.stat(self.output).st_size == 0) :\n f.write('OPUS TIME LOG_COL NAT_COL OFFSET_LOG ANTICIPATION_ACC ANTICIPATION_OMEGA ANTICIPATION_R AGG_ACC AGG_OMEGA AGG_R DCPA CROSSING_DIST ANT_TIME AGG_TIME N_CROSS\\n')\n f.write(f'{self.opus}')\n try:\n for k in range(len(self.security[0])) :\n if k in [10,11]: #dcpa or crossing distance\n f.write(f' {np.min(self.security[:,k])}')\n else:\n f.write(f' {np.max(self.security[:,k])}')\n f.write(f'\\n')\n except IndexError:\n f.write(' nan nan nan nan nan nan nan nan nan nan nan nan nan\\n')\n f.close()\n print(f'Output logged in {self.output}')\n print(\"---------------------------------------------------------------\")\n if self.finished == 0 or self.finished == 1:\n self.finished += 2\n\n def _update(self):\n if (self.n_obst > -1 and len(self.odom) > 0) :\n secu = self.ob_secu()\n for i in range(self.n_obst) :\n for j in range(1,len(secu[i])):\n self.security[i, j] = max(self.security[i, j],secu[i,j])\n if (secu[i,0] <= self.dcpa[i]) :\n self.dcpa[i] = secu[i,0]\n self.tcpa[i,0] = rospy.get_time() - self.begin_sim\n self.tcpa[i,1] = len(self.traj)\n\n if self.rvizBool:\n\n self.cpa.pose.position.x = self.odom[0]\n self.cpa.pose.position.y = self.odom[1]\n self.cpa2.pose.position.x = self.obst_states[i,0]\n self.cpa2.pose.position.y = self.obst_states[i,1]\n\n if self.rvizBool:\n\n self.cpa_publisher.publish(self.cpa)\n self.cpa2_publisher.publish(self.cpa2)\n\n side = np.dot(self.odom[0:2]-self.obst_states[i,0:2],rot(self.obst_states[i,2:4],np.pi/2))\n front = np.dot(self.odom[0:2]-self.obst_states[i,0:2],self.obst_states[i,2:4])\n if (side*self.side[i] < 0 and front > 0) :\n self.cross[i] = secu[i,0]\n self.security[i,14] += att(secu[i,0]/self.d1,3)\n self.side[i] = side\n\n\n\n def ob_dist(self) :\n dist = np.zeros(self.n_obst)\n for i in range(self.n_obst) :\n dist[i] = max(0,np.linalg.norm(self.obst_states[i,:2]-self.odom[:2])\n -self.size-self.obst_radii[i])\n return dist\n\n def ob_secu(self) :\n dist = self.ob_dist()\n rvel = np.zeros(self.n_obst) #relative velocity\n offd = np.zeros(self.n_obst) #distance avec offset\n\n for i in range(self.n_obst) :\n rvel[i] = np.linalg.norm(self.obst_states[i,2:4]-self.odom[2:4])\n cpa = rot((self.obst_states[i,2:4]-self.odom[2:4])/rvel[i],np.pi/2)\n self.r_offset = dist[i]/4\n if np.dot(cpa,rot(self.odom[2:4],np.pi/2))>0 :\n cpa = -cpa\n if self.obst_prior[i] == \"g\":\n if np.dot(cpa,self.odom[2:4]) < 0:\n cpa = -cpa\n asv_off = self.odom[:2]+self.r_offset*cpa #off before asv\n\n obst_off = self.obst_states[i,:2]-self.r_offset*cpa\n elif self.obst_prior[i] == \"s\":\n if np.dot(cpa,self.odom[2:4]) > 0:\n cpa = -cpa\n asv_off = self.odom[:2]+self.r_offset*cpa #off behind asv\n obst_off = self.obst_states[i,:2]-self.r_offset*cpa\n else:\n if np.dot(self.obst_states[i,2:4],rot(self.odom[2:4],np.pi/2))>0 : #obst right of asv\n if np.dot(cpa,self.odom[2:4]) > 0:\n cpa = -cpa\n asv_off = self.odom[:2]+self.r_offset*cpa #off behind asv\n obst_off = self.obst_states[i,:2]-self.r_offset*cpa\n else : #obst left of asv\n if np.dot(cpa,self.odom[2:4]) < 0:\n cpa = -cpa\n asv_off = self.odom[:2]+self.r_offset*cpa #off before asv\n obst_off = self.obst_states[i,:2]-self.r_offset*cpa\n\n if self.rvizBool:\n\n self.asv_off_marker.pose.position.x = asv_off[0]\n self.asv_off_marker.pose.position.y = asv_off[1]\n self.asv_off_publisher.publish(self.asv_off_marker)\n self.obst_off_marker.pose.position.x = obst_off[0]\n self.obst_off_marker.pose.position.y = obst_off[1]\n self.obst_off_publisher.publish(self.obst_off_marker)\n\n offd[i]= min(max(0,np.linalg.norm(asv_off-obst_off)-self.size-self.obst_radii[i]),\n max(0,np.linalg.norm(self.odom[:2]-obst_off)-self.size-self.obst_radii[i]),\n max(0,np.linalg.norm(asv_off-self.obst_states[i,:2])-self.size-self.obst_radii[i]),\n dist[i])\n\n secu = np.zeros((self.n_obst,4))\n secu[:,0] = dist #distance\n secu[:,1] = np.log(self.t0*rvel*att(dist/self.d0,0)/self.d0) #indicateur logarithmique de collision\n secu[:,2] = self.t0*rvel*att(dist/self.d0,0) /self.d0 #indicateur naturel de collision\n secu[:,3] = np.log(self.t0*rvel*att(offd/self.d0,0)/self.d0) #indicateur logarithmique de collision avec offset\n\n return secu\n\n\n def run_controller(self):\n r = rospy.Rate(self.rate)\n while (not rospy.is_shutdown()) and self.finished < 2 :\n if self.begin_sim > 0:\n t = rospy.get_time()-self.begin_sim\n if t < self.tMaxSim or t>3*self.tMaxSim:\n self._update()\n else:\n msg = Empty()\n self._finish_publisher.publish(msg)\n try:\n r.sleep()\n except rospy.exceptions.ROSInterruptException as e:\n if rospy.is_shutdown():\n break\n raise\n if self.finished == 3:\n rospy.signal_shutdown(\"End of the simulation\")\n\n\n#utils\ndef att(x,option) :\n if option == 0:\n return np.minimum(1,1/x)\n if option == 1:\n return 1/(1+x)\n if option == 2:\n return np.exp(-x)\n if option == 3:\n return np.maximum(1-x,0)\n\n\ndef rot(u,phi):\n v = np.zeros(2)\n v[0] = np.cos(phi)*u[0]-np.sin(phi)*u[1]\n v[1] = np.sin(phi)*u[0]+np.cos(phi)*u[1]\n return v\n\nif __name__ == \"__main__\" :\n\n rospy.init_node(\"Referee\")\n\n rospack = rospkg.RosPack()\n\n dt = rospy.get_param(\"~update_rate\", .2)\n finished = rospy.get_param(\"~shutdown\", 0)\n output = rospy.get_param(\"~output_file\", f'{rospack.get_path(\"asv_common\")}/output/test.txt')\n op = rospy.get_param(\"~opus\", '0')\n\n print(f'Output : {output}')\n\n refer = Referee(.01, finished, output, op)\n msg = Empty()\n refer.start_publisher.publish(msg) #top départ (temporary)\n refer.run_controller()\n","sub_path":"asv_referee/nodes/asv_referee_node.py","file_name":"asv_referee_node.py","file_ext":"py","file_size_in_byte":20315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"647683908","text":"#!/usr/bin/env python3\n\nimport json\nimport sys\nfrom collections import defaultdict, Counter\nimport numpy as np\nimport pandas as pd\nimport re\nimport os, sys\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nrepo_root = os.path.join(BASE_DIR, '..')\nos.chdir(repo_root)\n\nfrom utils.fix_label import fix_general_label_error\n\nEXPERIMENT_DOMAINS = [\"none\", \"hotel\", \"train\", \"restaurant\", \"attraction\", \"taxi\"]\nDOMAIN_INDICES = dict()\nfor domain in EXPERIMENT_DOMAINS:\n DOMAIN_INDICES[domain] = len(DOMAIN_INDICES)\ndef get_slot_information():\n ontology = json.load(open(\"data/multi-woz/MULTIWOZ2.1/ontology.json\", 'r'))\n ontology_domains = dict([(k, v) for k, v in ontology.items() if k.split(\"-\")[0] in EXPERIMENT_DOMAINS])\n SLOTS = [k.replace(\" \",\"\").lower() if (\"book\" not in k) else k.lower() for k in ontology_domains.keys()]\n return SLOTS\nALL_SLOTS = get_slot_information()\n\n\ndef replace_with(sentence, string, replacement):\n string_words = string.split()\n\n i = 0\n while i < len(sentence):\n found = True\n for j in range(len(string_words)):\n if i+j >= len(sentence) or sentence[i+j] != string_words[j]:\n found = False\n break\n if found:\n yield replacement\n i += len(string_words)\n else:\n yield sentence[i]\n i += 1\n\n\ndef preprocess(sentence):\n sentence = sentence.split()\n\n for word in sentence:\n if word in ('.', '?'):\n continue\n\n if re.match('[0-9]{,2}:[0-9]{2}', word, re.IGNORECASE):\n yield 'TIME'\n elif re.match('\\$?[0-9]+', word, re.IGNORECASE):\n if len(word) >= 9:\n yield 'PHONE_NUMBER'\n else:\n yield 'NUMBER'\n elif re.match('#[0-9a-z]+', word, re.IGNORECASE):\n yield 'RESERVATION_CODE'\n elif re.match('tr[0-9]+', word, re.IGNORECASE):\n yield 'TRAIN_NUMBER'\n elif re.match('cb[0-9a-z]{4,}', word, re.IGNORECASE):\n yield 'POST_CODE'\n elif re.match('([a-z]*[0-9]+[a-z]*)+', word, re.IGNORECASE): # word with number\n yield 'CONFIRMATION_NUMBER'\n else:\n yield word\n\n\ndef replace_with_system_act(sentence, system_acts):\n sentence = sentence.split()\n for act in system_acts:\n if isinstance(act, str):\n continue\n\n assert isinstance(act, (list, tuple))\n key, value = act\n if value in ('yes', 'no', 'dontcare'):\n continue\n\n sentence = list(replace_with(sentence, value, 'SYSTEM_' + key))\n\n return ' '.join(sentence)\n\n\ndef replace_with_slots(sentence, turn_label, label_dict):\n sentence = sentence.split()\n for slot in turn_label:\n assert isinstance(slot, (list, tuple))\n key, _ = slot\n value = label_dict[key]\n if value in ('yes', 'no', 'dontcare'):\n continue\n\n sentence = list(replace_with(sentence, value, 'SLOT_' + re.sub('[ -]', '_', key)))\n\n return ' '.join(sentence)\n\ndef difference(new, old):\n new_set = set(new)\n old_set = set(old)\n added = list(set(new_set)-set(old_set))\n return added\n\ndef load_data():\n filename = 'data/train_dials.json'\n if len(sys.argv) >= 2:\n filename = sys.argv[1]\n\n with open(filename) as fp:\n data = json.load(fp)\n\n for dialogue in data:\n dialogue_idx = dialogue['dialogue_idx']\n\n dialogue['dialogue'].sort(key=lambda x: x['turn_idx'])\n\n prev_turn = 'none'\n\n for turn in dialogue['dialogue']:\n system_transcript = ' '.join(preprocess(turn['system_transcript']))\n transcript = ' '.join(preprocess(turn['transcript']))\n\n label_dict = fix_general_label_error(turn['belief_state'], False, ALL_SLOTS)\n\n if prev_turn in EXPERIMENT_DOMAINS and system_transcript.strip() != '':\n yield replace_with_system_act(system_transcript, turn['system_acts']), prev_turn, 'system'\n\n if turn['domain'] in EXPERIMENT_DOMAINS:\n yield replace_with_slots(transcript, turn['turn_label'], label_dict), turn['domain'], 'user'\n\n prev_turn = turn['domain']\n\ndef get_full_belief(turn):\n dict_of_slots = fix_general_label_error(turn['belief_state'], False, ALL_SLOTS)\n return ['-'.join((el, dict_of_slots[el])) for el in dict_of_slots], list(dict_of_slots.keys())\n \ndef generate_turn_frame(data): \n \n slot_updates = dict()\n \n for dialogue in data:\n \n\n d_idx = dialogue[\"dialogue_idx\"].split('.')[0]\n \n for t_idx, turn in enumerate(dialogue[\"dialogue\"]):\n \n idx = '_'.join((d_idx, str(t_idx)))\n slot_updates[idx] = dict()\n belief, slots = get_full_belief(turn)\n added_belief = ['-'.join(el) for el in turn[\"turn_label\"]]\n added_slots = [el[0] for el in turn[\"turn_label\"]]\n \n slot_updates[idx] = {'dialogue': d_idx, 'turn': t_idx, 'full_belief': belief, \\\n 'step_belief': added_belief, 'full_slots': slots, 'step_slots': added_slots, \\\n 'transcript': turn['transcript'], 'system_transcript': turn['system_transcript'], \\\n 'step_empty': (len(added_belief) == 0), 'full_empty': (len(belief) == 0), \\\n 'domain': turn['domain']}\n \n slot_updates = pd.read_json(json.dumps(slot_updates)).transpose()\n return slot_updates\n\n \ndef select_domains(frame, domain_list):\n \n return frame[frame.domain.isin(domain_list)]\n\ndef dials_as_frame(split_type, domains = None):\n \n assert(split_type in [\"train\", \"test\", \"dev\"])\n\n filename = os.path.join(repo_root, 'data', ''.join((split_type, '_dials.json')))\n\n with open(filename) as fp:\n dialogue_data = json.load(fp)\n \n frame = generate_turn_frame(dialogue_data)\n \n if domains:\n frame = select_domains(frame, domains)\n\n return frame\n\ndef get_errors(df):\n df_correct = df[\"det_full_correct\"].apply(sum).astype(float)\n df_slots = df[\"det_full_correct\"].apply(len).astype(float)\n df[\"percent_correct\"] = df_correct/df_slots\n a = df[[\"turn\", \"percent_correct\"]]\n partially_correct = a[(a[\"percent_correct\"]>0) & (a[\"percent_correct\"] < 1)]\n fully_correct = a[a[\"percent_correct\"] == 1]\n fully_incorrect = a[a[\"percent_correct\"] == 0]\n correct_empty = a[a[\"percent_correct\"].isna()]\n return partially_correct, fully_correct, fully_incorrect, correct_empty\n\n'''\nTODO: This checks for full accuracy, doesn't split it...\n'''\ndef add_error_types(frame):\n \n pred_step_belief = frame['pred_step_belief']\n true_step_belief = frame['true_step_belief']\n \n pred_full_belief = frame['pred_full_belief']\n true_full_belief = frame['true_full_belief']\n\n\n frame[\"det_inserted\"] = list(set(pred_step_belief) - set(true_step_belief))\n frame[\"det_missed\"] = list(set(true_step_belief) - set(pred_step_belief))\n frame[\"det_full_correct\"] = [el in pred_full_belief for el in true_full_belief]\n frame[\"det_step_correct\"] = [el in pred_step_belief for el in true_step_belief]\n \n det_step_correct = frame[\"det_step_correct\"]\n det_full_correct = frame[\"det_full_correct\"]\n det_inserted = frame[\"det_inserted\"]\n det_missed = frame[\"det_missed\"]\n \n frame[\"step_correct\"] = (False not in det_step_correct)#bool(sum(det_step_correct))\n frame[\"full_correct\"] = (False not in det_full_correct) #bool(sum(det_full_correct)) \n frame[\"inserted\"] = (len(det_inserted)>0)\n frame[\"missed\"] = (len(det_missed)>0)\n if len(det_full_correct) > 0:\n frame[\"percent_found\"] = sum(det_full_correct)/len(det_full_correct)\n else:\n frame[\"percent_found\"] = None\n \n return frame\n\ndef generate_TRADE_turn_frame(predictions, pred_slot_columns=False, gt_slot_columns=False): \n\n slot_updates = dict()\n for d_idx, dialogue in predictions.items():\n old_belief = []\n true_old_belief = []\n for t_idx in range(len(dialogue.keys())):\n # the unique id: _\n idx = '_'.join((d_idx.split('.')[0], str(t_idx+1)))\n t_idx = str(t_idx)\n slot_updates[idx] = dict()\n turn = dialogue[t_idx]\n new_belief = turn['pred_bs_ptr']\n true_belief = turn['turn_belief']\n added_belief = difference(new_belief, old_belief)\n true_added_belief = difference(true_belief, true_old_belief)\n \n # has anything been added?\n added_empty = (len(added_belief) == 0)\n true_empty = (len(true_added_belief) == 0)\n \n slot_updates[idx]['dialogue'] = d_idx.split('.')[0]\n slot_updates[idx]['turn'] = t_idx\n \n slot_updates[idx]['pred_full_belief'] = new_belief\n slot_updates[idx]['true_full_belief'] = true_belief\n slot_updates[idx]['pred_step_belief'] = added_belief \n slot_updates[idx]['true_step_belief'] = true_added_belief\n \n slot_updates[idx] = add_error_types(slot_updates[idx])\n \n slot_updates[idx]['pred_empty'] = added_empty\n slot_updates[idx]['true_empty'] = true_empty\n \n old_belief = new_belief\n true_old_belief = true_belief\n \n slot_updates = pd.read_json(json.dumps(slot_updates, indent=4)).transpose()\n return slot_updates\n\ndef experiment_results_frame(input_file):\n output_file = os.path.join(experiment_path(experiment), \"inference_turn_info.csv\")\n baseline_test_set = read_json(input_file)\n frame = generate_TRADE_turn_frame(baseline_test_set)\n return frame\n","sub_path":"data_analysis/analyze-data.py","file_name":"analyze-data.py","file_ext":"py","file_size_in_byte":9786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"520494239","text":"# you will need to know how to set up your postgresql database\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport psycopg2 # to execute SQL statements\nimport urllib.request, urllib.error, urllib.parse\n\n\ndef get_table_headers():\n\turl = \"http://www.finviz.com/screener.ashx?v=111&f=cap_nano&r=1\"\n\tcontent = urllib.request.urlopen(url).read()\n\tsoup = BeautifulSoup(content)\n\ttable_headers = []\n\tfor th in soup.select(\".table-top\"):\n\t\ttable_headers.append(th.get_text())\n\ttable_headers.insert(1, \"Ticker\")\n\treturn table_headers\n\n\ndef get_rows_from_soup(soup, table_headers):\n\ttable_row_data = []\n\tcounter = 0\n\trow_data = {}\n\tfor tr in soup.select(\".screener-body-table-nw\"):\n\t\trow_data[table_headers[counter]] = tr.get_text()\n\t\tcounter += 1\n\t\tif counter >= len(table_headers):\n\t\t\tcounter = 0\n\t\t\ttable_row_data.append(row_data)\n\t\t\trow_data = {}\n\treturn table_row_data\n\n\ndef get_data():\n\theaders = get_table_headers()\n\tall_data = []\n\tended = False\n\tinitial_number = 1\n\twhile not ended:\n\t\turl = \"http://www.finviz.com/screener.ashx?v=111&f=cap_nano&r={}\".format(\n\t\t\tinitial_number\n\t\t)\n\t\tcontent = urllib.request.urlopen(url).read()\n\t\tsoup = BeautifulSoup(content)\n\t\tall_data += get_rows_from_soup(soup, headers)\n\t\tprint(len(all_data))\n\t\tinitial_number += 20\n\t\tprint(type(content))\n\t\tprint(initial_number)\n\t\tif not re.findall(b\"next\", content):\n\t\t\tended = True\n\treturn all_data\n\n\nDB = psycopg2.connect(dbname=\"xx\", user=\"xx\")\ncursor = DB.cursor()\ntable_name = \"market_cap\"\ncursor.execute(\"DROP TABLE IF EXISTS \" + table_name)\ncursor.execute(\"\"\"\n\tCREATE TABLE {} (\n\t\tno INT, ticket VARCHAR(10), company VARCHAR(255), sector VARCHAR(255), industry VARCHAR(255),\n\t\tcountry VARCHAR(255), market_cap FLOAT(4), price FLOAT(4), change FLOAT(4), volume INT, pe FLOAT(4),\n\t);\n\"\"\".format(table_name))\nDB.commit()\n\ndata = get_data()\n\nfrom pprint import pprint\npprint(data)\n\nfor row in data:\n\tcursor.execute(\"\"\"\n\tINSERT INTO {}(no, ticket, company, sector, industry, country, market_cap, price, change, volume, pe)\n\tVALUES({}, '{}', '{}', '{}', '{}', '{}', {}, {}, {}, {}, {})\n\t\"\"\".format(\n\t\ttable_name, row['No.'], row['Ticker'], row['Company'], row['Sector'], row['Industry'], row['Country'],\n\t\trow['Market Cap'][:-1], row['Price'], row['Change'][:-1], row['Volume'].replace(',', \"\"),\n\t\t\"NULL\" if row['P/E'] == \"-\" else row['P/E'],\t\n\t))\nDB.commit()\n\navg = cursor.execute('select AVG(market_cap) from market_cap;')\n\n#results = cursor.fetchall()\n#print(results)\n#DB.close()\n","sub_path":"data/scrapping projects/finviz/Example_FinViz_scraping_to_DB.py","file_name":"Example_FinViz_scraping_to_DB.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"452255477","text":"# encoding: utf-8\r\n# Author: zTaylor\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\nfrom xgboost import XGBRegressor\r\n\r\nimport pickle\r\n\r\n\r\ndata = pd.read_csv('./Processed_Data/RON_Feature_Selected_by_RF.csv') # todo:\r\ny = pd.read_csv('./Processed_Data/Data_y.csv')\r\n\r\n\r\ntest_ratio = 0.2\r\nis_shuffle = True\r\nrandom_state = 2\r\nx_train, x_test, y_train, y_test = train_test_split(data.to_numpy(), y.to_numpy()[:, 0],\r\n test_size=test_ratio, shuffle=is_shuffle, random_state=random_state)\r\n\r\n\r\n# 交叉验证 & 明星融合\r\nfrom sklearn.model_selection import KFold\r\n_N_FOLDS = 5 # 采用5折交叉验证\r\nkf = KFold(n_splits=_N_FOLDS, shuffle=True, random_state=10)\r\n# X = data.to_numpy()\r\n# Y = y.to_numpy()[:, 2]\r\nbst = XGBRegressor(\r\n learning_rate=0.01,\r\n n_estimators=300,\r\n max_delta=10,\r\n # min_child_weight=6,\r\n # gamma=0.1,\r\n subsample=0.5,\r\n colsample_btree=0.8,\r\n # objective='multi:softmax',\r\n # scale_pos_weight=1,\r\n # random_state=27\r\n)\r\nrf = RandomForestRegressor(\r\n n_estimators=300,\r\n max_depth=10,\r\n # max_features=30,\r\n bootstrap=True,\r\n # random_state=30,\r\n verbose=True,\r\n # max_samples=0.5,\r\n)\r\n\r\nmodel_list = [bst, rf]\r\n\r\ndef getStkTrain(model):\r\n stk_train = np.zeros((x_train.shape[0], 1))\r\n stk_test = np.empty((_N_FOLDS, x_test.shape[0], 1))\r\n for i, (train_index, val_index) in enumerate(kf.split(x_train)):\r\n X_train = x_train[train_index]\r\n Y_train = y_train[train_index]\r\n X_val = x_train[val_index]\r\n Y_val = y_train[val_index]\r\n\r\n model.fit(X_train, Y_train)\r\n # model = model.predict(X_val)\r\n stk_train[val_index] = model.predict(X_val).reshape(-1, 1)\r\n stk_test[i, :] = model.predict(x_test).reshape(-1, 1)\r\n # print(\"{} tims, XGB MSE_LOSS on validation dataset: {}\".format(i, mean_squared_error(Y_val, bst_y_pred)))\r\n\r\n stk_test = stk_test.mean(axis=0)\r\n return stk_train, stk_test\r\n\r\n\r\nnew_train, new_test = [], []\r\nfor model in model_list:\r\n stk_train, stk_test = getStkTrain(model)\r\n new_train.append(stk_train)\r\n new_test.append(stk_test)\r\n\r\n\r\nnew_train = np.concatenate(new_train, axis=1)\r\nnew_test = np.concatenate(new_test, axis=1)\r\n\r\n\r\nmodel = LinearRegression()\r\nmodel.fit(new_train, y_train)\r\nmodel.predict(new_test)\r\nprint(\"MSE_LOSS on test dataset: {}\".format(mean_squared_error(y_test, model.predict(new_test))))\r\nmodel_list.append(model)\r\n\r\nplt.plot(range(len(x_test)), y_test, 'b-', label='y_true')\r\nplt.plot(range(len(x_test)), model.predict(new_test), 'r-', label='y_pred')\r\nplt.legend(labels=['y_true','y_pred'],loc='best')\r\nplt.xlabel('Sample NO.')\r\nplt.ylabel('S')\r\nplt.show()\r\n\r\nis_save = True\r\nif is_save:\r\n for i, model in enumerate(model_list):\r\n with open('./saved_model/S_model_{}.pickle'.format(i), 'wb') as f:\r\n pickle.dump(model, f)\r\n","sub_path":"SPredictionModel.py","file_name":"SPredictionModel.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649354990","text":"import pymysql as py\nfrom tkinter import *\nfrom PIL import Image,ImageTk\nfrom tkinter import messagebox\nfrom tkinter import ttk\nimport math\nimport random\nroot = Tk()\nroot.title(\"billing\")\nroot.geometry(\"655x400\")\nl1 = Label(root, text=\"Welcome in Billing Software\", padx=34, pady=34, font=\"comicsansms 43 bold\")\nl1.pack(side=\"top\", fill=X)\ndef mymet():\n ##cosmetics##\n global soap_price\n global detergent_price\n global deodrants_price\n global facewash_price\n global hairgel_price\n global moisturecream_price\n global c_tax\n soap_price=Soap.get() * 10\n detergent_price=Detergent.get() * 30\n deodrants_price=Deodrants.get() * 200\n facewash_price=Facewash.get() * 80\n hairgel_price=Hairgel.get() * 100\n moisturecream_price=Moisturecream.get() * 150\n total_cosmetics_price=(soap_price + detergent_price + deodrants_price + facewash_price+\n moisturecream_price)\n cosmetics_price.set(\"Rs. \"+str(total_cosmetics_price))\n c_tax=total_cosmetics_price * 0.05\n cosmetics_tax.set(\"Rs. \"+str(c_tax))\n\n ##grocery##\n global grainsbread_price\n global oilfat_price\n global dairyeggs_price\n global salt_price\n global meatfish_price\n global driedproduce_price\n global g_tax\n grainsbread_price=Grainsbread.get() * 200\n oilfat_price=Oilfat.get() * 300\n dairyeggs_price=Dairyeggs.get() * 70\n salt_price=Salt.get() * 80\n meatfish_price=Meatfish.get() * 250\n driedproduce_price=Driedproduce.get() * 100\n total_grocery_price = (grainsbread_price + oilfat_price + dairyeggs_price + salt_price+\n meatfish_price + driedproduce_price)\n grocery_price.set(\"Rs. \"+str(total_grocery_price))\n g_tax=total_grocery_price * 0.05\n grocery_tax.set(\"Rs. \"+str(g_tax))\n ##softdrink\n global cocacola_price\n global redbull_price\n global nescafe_price\n global pepsi_price\n global tropicana_price\n global sprite_price\n global s_tax\n cocacola_price=cocacola.get() * 20\n redbull_price=redbull.get() * 300\n nescafe_price=nescafe.get() * 70\n pepsi_price=pepsi.get() * 80\n tropicana_price=tropicana.get() * 250\n sprite_price=sprite.get() * 100\n total_softdrink_price = (cocacola_price + redbull_price + nescafe_price + pepsi_price +\n tropicana_price + sprite_price )\n softdrink_price.set(\"Rs. \"+str(total_softdrink_price))\n s_tax=total_softdrink_price * 0.05\n softdrink_tax.set(\"Rs. \"+str(s_tax))\n\n ##household\n global lamps_price\n global tableware_price\n global bottles_price\n global glassware_price\n global stoves_price\n global flask_price\n global h_tax\n global Total_bill\n lamps_price=lamps.get() * 200\n tableware_price=tableware.get() * 350\n bottles_price=bottles.get() * 70\n glassware_price=glassware.get() * 180\n stoves_price=stoves.get() * 250\n flask_price=flask.get() * 100\n total_household_price = (lamps_price + tableware_price + bottles_price + glassware_price +\n stoves_price + flask_price)\n household_price.set(\"Rs. \"+str(total_household_price))\n h_tax=total_household_price * 0.05\n household_tax.set(\"Rs. \"+str(h_tax))\n Total_bill=(total_cosmetics_price + total_grocery_price + total_softdrink_price +\n total_household_price + c_tax + g_tax + s_tax + h_tax)\n\ndef welcome_bill():\n txtbox.delete('1.0',END)\n txtbox.insert(END,\"\\tWelcome in Bill Area \\n\")\n txtbox.insert(END,f\"\\n Bill Number: {customer_billno.get()}\")\n txtbox.insert(END, f\"\\n Customer Name: { customer_name.get()}\")\n txtbox.insert(END, f\"\\n Phone Number: {customer_phone.get()}\")\n txtbox.insert(END, f\"\\n ************************************\")\n txtbox.insert(END, f\"\\n Product\\t\\tQuantity\\t\\tPrice\")\n txtbox.insert(END,f\"\\n *************************************\")\ndef bill_area():\n welcome_bill()\n ##cosmetics\n if Soap!=0:\n txtbox.insert(END, f\"\\n Soap\\t\\t{Soap.get()}\\t\\t{soap_price}\")\n if Detergent != 0:\n txtbox.insert(END, f\"\\n Detergent\\t\\t{Detergent.get()}\\t\\t{detergent_price}\")\n if Deodrants != 0:\n txtbox.insert(END, f\"\\n Deodrants\\t\\t{Deodrants.get()}\\t\\t{deodrants_price}\")\n if Facewash != 0:\n txtbox.insert(END, f\"\\n Facewash\\t\\t{Facewash.get()}\\t\\t{facewash_price}\")\n if Hairgel != 0:\n txtbox.insert(END, f\"\\n Hairgel\\t\\t{Hairgel.get()}\\t\\t{hairgel_price}\")\n if Moisturecream != 0:\n txtbox.insert(END, f\"\\n Moisturecream\\t\\t{Moisturecream.get()}\\t\\t{moisturecream_price}\")\n ##grocery\n if Grainsbread!=0:\n txtbox.insert(END, f\"\\n Grainsbread\\t\\t{Grainsbread.get()}\\t\\t{grainsbread_price}\")\n if Oilfat != 0:\n txtbox.insert(END, f\"\\n Oilfat\\t\\t{Oilfat.get()}\\t\\t{oilfat_price}\")\n if Dairyeggs != 0:\n txtbox.insert(END, f\"\\n Dairyeggs\\t\\t{Dairyeggs.get()}\\t\\t{dairyeggs_price}\")\n if Salt != 0:\n txtbox.insert(END, f\"\\n Salt\\t\\t{Salt.get()}\\t\\t{salt_price}\")\n if Meatfish != 0:\n txtbox.insert(END, f\"\\n Meatfish\\t\\t{Meatfish.get()}\\t\\t{meatfish_price}\")\n if Driedproduce != 0:\n txtbox.insert(END, f\"\\n Driedproduce\\t\\t{Driedproduce.get()}\\t\\t{driedproduce_price}\")\n ##softdrink##\n if cocacola!=0:\n txtbox.insert(END, f\"\\n cocacola\\t\\t{cocacola.get()}\\t\\t{cocacola_price}\")\n if redbull != 0:\n txtbox.insert(END, f\"\\n redbull\\t\\t{redbull.get()}\\t\\t{redbull_price}\")\n if nescafe != 0:\n txtbox.insert(END, f\"\\n nescafe\\t\\t{nescafe.get()}\\t\\t{nescafe_price}\")\n if glassware != 0:\n txtbox.insert(END, f\"\\n glassware\\t\\t{glassware.get()}\\t\\t{glassware_price}\")\n if tropicana != 0:\n txtbox.insert(END, f\"\\n tropicana\\t\\t{tropicana.get()}\\t\\t{tropicana_price}\")\n if sprite != 0:\n txtbox.insert(END, f\"\\n sprite\\t\\t{sprite.get()}\\t\\t{sprite_price}\")\n ##household\n if lamps!=0:\n txtbox.insert(END, f\"\\n lamps\\t\\t{lamps.get()}\\t\\t{lamps_price}\")\n if tableware != 0:\n txtbox.insert(END, f\"\\n tableware\\t\\t{tableware.get()}\\t\\t{tableware_price}\")\n if bottles != 0:\n txtbox.insert(END, f\"\\n bottles\\t\\t{bottles.get()}\\t\\t{bottles_price}\")\n if pepsi != 0:\n txtbox.insert(END, f\"\\n pepsi\\t\\t{pepsi.get()}\\t\\t{pepsi_price}\")\n if stoves != 0:\n txtbox.insert(END, f\"\\n stoves\\t\\t{stoves.get()}\\t\\t{stoves_price}\")\n if flask != 0:\n txtbox.insert(END, f\"\\n flask\\t\\t{flask.get()}\\t\\t{flask_price}\")\n txtbox.insert(END, f\"\\n **************************************\")\n txtbox.insert(END, f\"\\n Total Bill: \\t\\t\\tRs. {Total_bill}\")\ndef clear_data():\n Soap.set(0)\n Detergent.set(0)\n Deodrants.set(0)\n Facewash.set(0)\n Hairgel.set(0)\n Moisturecream.set(0)\n ###grocery variable\n Grainsbread.set(0)\n Oilfat.set(0)\n Dairyeggs.set(0)\n Salt.set(0)\n Meatfish.set(0)\n Driedproduce.set(0)\n ##softdrink variable\n cocacola.set(0)\n redbull.set(0)\n nescafe.set(0)\n pepsi.set(0)\n tropicana.set(0)\n sprite.set(0)\n ###household###\n lamps.set(0)\n tableware.set(0)\n bottles.set(0)\n glassware.set(0)\n stoves.set(0)\n flask.set(0)\n ##prices\n grocery_price.set(\"\")\n cosmetics_price.set(\"\")\n softdrink_price.set(\"\")\n household_price.set(\"\")\n ##taxes##\n cosmetics_tax.set(\"\")\n grocery_tax.set(\"\")\n softdrink_tax.set(\"\")\n household_tax.set(\"\")\n ##customer\n customer_name.set(\"\")\n customer_phone.set(\"\")\n customer_billno.set(\"\")\n a = random.randint(1100, 9999)\n customer_billno.set(str(a))\n customer_search.set(\"\")\n welcome_bill()\ndef exit_bill():\n op=messagebox.askyesno(\"Exit\",\"exit\")\n root.destroy()\ndef cosmo():\n top=Toplevel()\n top.title(\"Billing System\")\n # customer details\n global Soap\n global Detergent\n global Deodrants\n global Facewash\n global Hairgel\n global Moisturecream\n\n global Grainsbread\n global Oilfat\n global Dairyeggs\n global Salt\n global Meatfish\n global Driedproduce\n\n global cocacola\n global redbull\n global nescafe\n global pepsi\n global tropicana\n global sprite\n\n global lamps\n global tableware\n global bottles\n global glassware\n global stoves\n global flask\n global cosmetics_price\n global grocery_price\n global softdrink_price\n global household_price\n\n\n global cosmetics_tax\n global grocery_tax\n global softdrink_tax\n global household_tax\n\n\n global customer_billno\n global customer_phone\n global customer_name\n global customer_search\n ##cosmeticsvariable##\n Soap=IntVar()\n Detergent=IntVar()\n Deodrants=IntVar()\n Facewash=IntVar()\n Hairgel=IntVar()\n Moisturecream=IntVar()\n ###grocery variable\n Grainsbread=IntVar()\n Oilfat=IntVar()\n Dairyeggs=IntVar()\n Salt=IntVar()\n Meatfish=IntVar()\n Driedproduce=IntVar()\n ##softdrink variable\n cocacola=IntVar()\n redbull=IntVar()\n nescafe = IntVar()\n pepsi = IntVar()\n tropicana = IntVar()\n sprite = IntVar()\n ###household###\n lamps=IntVar()\n tableware=IntVar()\n bottles=IntVar()\n glassware=IntVar()\n stoves=IntVar()\n flask=IntVar()\n ##prices\n grocery_price=StringVar()\n cosmetics_price=StringVar()\n softdrink_price=StringVar()\n household_price=StringVar()\n ##taxes##\n cosmetics_tax=StringVar()\n grocery_tax=StringVar()\n softdrink_tax=StringVar()\n household_tax=StringVar()\n ##customer\n customer_name=StringVar()\n customer_phone=StringVar()\n customer_billno=StringVar()\n a=random.randint(1100,9999)\n customer_billno.set(str(a))\n customer_search=StringVar()\n f1 = LabelFrame(top, bd=10, text=\"Customer Details\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\", relief=SUNKEN)\n f1.pack(side=\"top\",fill=X)\n custom_name = Label(f1, text=\"Customer Name :\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n custom_name.grid(row=0, column=0, padx=20, pady=5)\n custom_text = Entry(f1, width=10, font=\"comicsansms 15\",textvariable=customer_name, bd=6, relief=SUNKEN)\n custom_text.grid(row=0, column=1)\n custom_phone = Label(f1, text=\"Customer Phone No :\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n custom_phone.grid(row=0, column=2, padx=20, pady=5)\n custom_text = Entry(f1, width=10, font=\"comicsansms 15 bold\",textvariable=customer_phone, bd=6, relief=SUNKEN)\n custom_text.grid(row=0, column=3)\n ##cosmetics products\n f3 = LabelFrame(top, bd=10, text=\"Cosmetics Products\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\", relief=SUNKEN)\n f3.place(x=4, y=105, width=350, height=305)\n c1_product1 = Label(f3, text=\"Soap\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n c1_product1.grid(row=0, column=0, padx=20, pady=5)\n c1_product1 = Entry(f3, width=8, font=\"comicsansms 15 bold\",textvariable=Soap, bd=6, relief=SUNKEN)\n c1_product1.grid(row=0, column=1)\n c2_product2 = Label(f3, text=\"Detergent\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n c2_product2.grid(row=1, column=0, padx=20, pady=5)\n c2_product2 = Entry(f3, width=8,textvariable=Detergent, font=\"comicsansms 15 bold\", bd=6, relief=SUNKEN)\n c2_product2.grid(row=1, column=1)\n c3_product3 = Label(f3, text=\"Deodrants\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n c3_product3.grid(row=2, column=0, padx=20, pady=5)\n c3_product3 = Entry(f3, width=8, font=\"comicsansms 15 bold\",textvariable=Deodrants, bd=6, relief=SUNKEN)\n c3_product3.grid(row=2, column=1)\n c4_product4 = Label(f3, text=\"Face Wash\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n c4_product4.grid(row=3, column=0, padx=20, pady=5)\n c4_product4 = Entry(f3, width=8, font=\"comicsansms 15 bold\",textvariable=Facewash, bd=6, relief=SUNKEN)\n c4_product4.grid(row=3, column=1)\n c5_product5 = Label(f3, text=\"Hair Gel\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n c5_product5.grid(row=4, column=0, padx=20, pady=5)\n c5_product5 = Entry(f3, width=8, font=\"comicsansms 15 bold\",textvariable=Hairgel, bd=6, relief=SUNKEN)\n c5_product5.grid(row=4, column=1)\n c6_product6 = Label(f3, text=\"Moisture Cream\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n c6_product6.grid(row=5, column=0, padx=20, pady=5)\n c6_product6 = Entry(f3, width=8, font=\"comicsansms 15 bold\", bd=6,textvariable=Moisturecream, relief=SUNKEN)\n c6_product6.grid(row=5, column=1)\n # Grocery products################\n f2 = LabelFrame(top, bd=10, text=\"Grocery Products\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\", relief=SUNKEN)\n f2.place(x=350, y=105, width=325, height=305)\n g1_product1 = Label(f2, text=\"Grains&bread\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n g1_product1.grid(row=0, column=0, padx=20, pady=5)\n g1_product1 = Entry(f2, width=8,textvariable=Grainsbread, font=\"comicsansms 15 bold\", bd=6, relief=SUNKEN)\n g1_product1.grid(row=0, column=1)\n g2_product2 = Label(f2, text=\"Oil&fat\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n g2_product2.grid(row=1, column=0, padx=20, pady=5)\n g2_product2 = Entry(f2, width=8,textvariable=Oilfat, font=\"comicsansms 15 bold\", bd=6, relief=SUNKEN)\n g2_product2.grid(row=1, column=1)\n g3_product3 = Label(f2, text=\"Dairy&Eggs\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n g3_product3.grid(row=2, column=0, padx=20, pady=5)\n g3_product3 = Entry(f2, width=8,textvariable=Dairyeggs, font=\"comicsansms 15 bold\", bd=6, relief=SUNKEN)\n g3_product3.grid(row=2, column=1)\n g4_product4 = Label(f2, text=\"Salt\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n g4_product4.grid(row=3, column=0, padx=20, pady=5)\n g4_product4 = Entry(f2, width=8,textvariable=Salt, font=\"comicsansms 15 bold\", bd=6, relief=SUNKEN)\n g4_product4.grid(row=3, column=1)\n g6_product6 = Label(f2, text=\"Meat&fish\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n g6_product6.grid(row=5, column=0, padx=20, pady=5)\n g6_product6 = Entry(f2, width=8,textvariable=Meatfish, font=\"comicsansms 15 bold\", bd=6, relief=SUNKEN)\n g6_product6.grid(row=5, column=1)\n g7_product7 = Label(f2, text=\"Dried produce\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n g7_product7.grid(row=6, column=0, padx=20, pady=5)\n g7_product7 = Entry(f2, width=8,textvariable=Driedproduce, font=\"comicsansms 15 bold\", bd=6, relief=SUNKEN)\n g7_product7.grid(row=6, column=1)\n ###softdrink\n f4 = LabelFrame(top, bd=10, text=\"Softdrinks Products\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\", relief=SUNKEN)\n f4.place(x=680, y=105, width=300, height=305)\n co1_product1 = Label(f4, text=\"Coca-Cola\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co1_product1.grid(row=0, column=0, padx=20, pady=5)\n co1_product1 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=cocacola, bd=6, relief=SUNKEN)\n co1_product1.grid(row=0, column=1)\n co2_product2 = Label(f4, text=\"Red Bull\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co2_product2.grid(row=1, column=0, padx=20, pady=5)\n co2_product2 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=redbull, bd=6, relief=SUNKEN)\n co2_product2.grid(row=1, column=1)\n co3_product3 = Label(f4, text=\"Nescafe\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co3_product3.grid(row=2, column=0, padx=20, pady=5)\n co3_product3 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=nescafe, bd=6, relief=SUNKEN)\n co3_product3.grid(row=2, column=1)\n co4_product4 = Label(f4, text=\"Pepsi\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co4_product4.grid(row=3, column=0, padx=20, pady=5)\n co4_product4 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=pepsi, bd=6, relief=SUNKEN)\n co4_product4.grid(row=3, column=1)\n co5_product5 = Label(f4, text=\"Tropicana\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co5_product5.grid(row=4, column=0, padx=20, pady=5)\n co5_product5 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=tropicana, bd=6, relief=SUNKEN)\n co5_product5.grid(row=4, column=1)\n co6_product6 = Label(f4, text=\"Sprite\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co6_product6.grid(row=6, column=0, padx=20, pady=5)\n co6_product6 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=sprite, bd=6, relief=SUNKEN)\n co6_product6.grid(row=6, column=1)\n ###Household\n\n f4 = LabelFrame(top, bd=10, text=\"Household Products\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\", relief=SUNKEN)\n f4.place(x=985, y=105, width=290, height=305)\n co1_house1 = Label(f4, text=\"Lamps\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co1_house1.grid(row=0, column=0, padx=20, pady=5)\n co1_house1 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=lamps, bd=6, relief=SUNKEN)\n co1_house1.grid(row=0, column=1)\n co2_house2 = Label(f4, text=\"Tableware\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co2_house2.grid(row=1, column=0, padx=20, pady=5)\n co2_house2 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=tableware, bd=6, relief=SUNKEN)\n co2_house2.grid(row=1, column=1)\n co3_house3 = Label(f4, text=\"Bottles\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co3_house3.grid(row=2, column=0, padx=20, pady=5)\n co3_house3 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=bottles, bd=6, relief=SUNKEN)\n co3_house3.grid(row=2, column=1)\n co4_house4 = Label(f4, text=\"Glassware\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co4_house4.grid(row=3, column=0, padx=20, pady=5)\n co4_house4 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=glassware, bd=6, relief=SUNKEN)\n co4_house4.grid(row=3, column=1)\n co5_house5 = Label(f4, text=\"Stove\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co5_house5.grid(row=4, column=0, padx=20, pady=5)\n co5_house5 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=stoves, bd=6, relief=SUNKEN)\n co5_house5.grid(row=4, column=1)\n co6_house6 = Label(f4, text=\"Flask\", font=(\"times new roman\", 19, \"bold\"), bg=\"yellow\", fg=\"red\")\n co6_house6.grid(row=5, column=0, padx=20, pady=5)\n co6_house6 = Entry(f4, width=8, font=\"comicsansms 15 bold\",textvariable=flask, bd=6, relief=SUNKEN)\n co6_house6.grid(row=5, column=1)\n ##Bill Area\n global txtbox\n frame = Frame(top, bd=10, relief=SUNKEN)\n frame.place(x=5, y=410, width=340, height=325)\n label = Label(frame, text=\"Bill Area\", font=\"comicsansms 19 bold\", bd=7, relief=GROOVE)\n label.pack()\n xscrollbar = Scrollbar(frame, orient=HORIZONTAL)\n xscrollbar.pack(side=\"bottom\", fill=X)\n yscrollbar = Scrollbar(frame)\n yscrollbar.pack(side=\"right\", fill=Y)\n txtbox = Text(frame, wrap=NONE, xscrollcommand=xscrollbar.set, yscrollcommand=yscrollbar.set)\n txtbox.pack()\n xscrollbar.config(command=txtbox.xview())\n yscrollbar.config(command=txtbox.yview())\n ##button frame###################################\n f5 = LabelFrame(top, bd=10, text=\"Bill Menu\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\", relief=SUNKEN)\n f5.place(x=345, y=400, relwidth=1, height=250)\n l1=Label(f5,text=\"Total_Cosmetics_Price\",font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\")\n l1.grid(row=0,column=0,padx=20,pady=1)\n e1=Entry(f5,font=(\"times new roman\", 15, \"bold\"),width=15,textvariable=cosmetics_price,bd=10,relief=SUNKEN)\n e1.grid(row=0,column=1,padx=20,pady=5)\n l2 = Label(f5, text=\"Total_Grocery_Price\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\")\n l2.grid(row=1, column=0, padx=20, pady=1)\n e2 = Entry(f5, font=(\"times new roman\", 15, \"bold\"),textvariable=grocery_price, width=15, bd=10, relief=SUNKEN)\n e2.grid(row=1, column=1, padx=20, pady=5)\n l3=Label(f5,text=\"Total_Softdrink_Price\",font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\")\n l3.grid(row=2,column=0,padx=20,pady=1)\n e3=Entry(f5,font=(\"times new roman\", 15, \"bold\"),textvariable=softdrink_price,width=15,bd=10,relief=SUNKEN)\n e3.grid(row=2,column=1,padx=20,pady=5)\n l4 = Label(f5, text=\"Total_Household_Price\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\")\n l4.grid(row=3, column=0, padx=20, pady=1)\n e4 = Entry(f5, font=(\"times new roman\", 15, \"bold\"),textvariable=household_price, width=15, bd=10, relief=SUNKEN)\n e4.grid(row=3, column=1, padx=20, pady=5)\n l11 = Label(f5, text=\"Cosmetics_Tax\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\")\n l11.grid(row=0, column=2, padx=20, pady=1)\n e11 = Entry(f5, font=(\"times new roman\", 15, \"bold\"),textvariable=cosmetics_tax, width=15, bd=10, relief=SUNKEN)\n e11.grid(row=0, column=3, padx=20, pady=5)\n l22 = Label(f5, text=\"Grocery_Tax\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\")\n l22.grid(row=1, column=2, padx=20, pady=1)\n e22 = Entry(f5, font=(\"times new roman\", 15, \"bold\"),textvariable=grocery_tax, width=15, bd=10, relief=SUNKEN)\n e22.grid(row=1, column=3, padx=20, pady=5)\n l33 = Label(f5, text=\"Softdrink_Tax\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\")\n l33.grid(row=2, column=2, padx=20, pady=1)\n e33 = Entry(f5, font=(\"times new roman\", 15, \"bold\"),textvariable=softdrink_tax, width=15, bd=10, relief=SUNKEN)\n e33.grid(row=2, column=3, padx=20, pady=5)\n l44 = Label(f5, text=\"Household_Tax\", font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", fg=\"green\")\n l44.grid(row=3, column=2, padx=20, pady=1)\n e44 = Entry(f5, font=(\"times new roman\", 15, \"bold\"), width=15,textvariable=household_tax, bd=10, relief=SUNKEN)\n e44.grid(row=3, column=3, padx=20, pady=5)\n f7 = LabelFrame(top, bd=10, font=(\"times new roman\", 15, \"bold\"), bg=\"yellow\", relief=SUNKEN)\n f7.place(x=345, y=645, relwidth=1, height=250)\n btn=Button(f7,text=\"Total\",font=(\"times new roman\", 15, \"bold\"), width=15, bd=10,command=mymet)\n btn.grid(row=0,column=0,padx=20,pady=5)\n btn1 = Button(f7, text=\"Bill\",command=bill_area, font=(\"times new roman\", 15, \"bold\"), width=15, bd=10)\n btn1.grid(row=0, column=1, padx=20, pady=5)\n btn2=Button(f7,text=\"Clear\",font=(\"times new roman\", 15, \"bold\"), width=15, bd=10,command=clear_data)\n btn2.grid(row=0, column=2, padx=20, pady=5)\n btn3 = Button(f7, text=\"Quit\",command=exit_bill, font=(\"times new roman\", 15, \"bold\"), width=12, bd=10)\n btn3.grid(row=0, column=3, padx=20, pady=5)\n welcome_bill()\ndef billing():\n f3 = LabelFrame(top, text=\"Cosmetics Products\",fg=\"green\")\n f3.place(x=450, y=70, width=450, height=330)\n serialno = Label(f3, text=\"serialno\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n serialno.grid(row=0, column=0, padx=20, pady=5)\n Prod_Name = Label(f3, text=\"Prod_Name\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Prod_Name.grid(row=0, column=1, padx=20, pady=5)\n Rupees = Label(f3, text=\"Rupees\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Rupees.grid(row=0, column=2, padx=20, pady=5)\n b = pymysql.connect(user='root', password='Lucknow@123', host='localhost', database='cosmetics')\n mycursor = b.cursor()\n myquery = \"select * from cosmetics\"\n mycursor.execute(myquery)\n index = 1\n for rec in mycursor:\n Label(f3, text=rec[0], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=0)\n Label(f3, text=rec[1], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=1)\n Label(f3, text=rec[2], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=2)\n index = index + 1\n mycursor.close()\n b.close()\ndef submit1():\n b = py.connect(user='root', password='Lucknow@123', host='localhost', database='customer')\n str11 = text_rec.get()\n str22 = text_rec1.get()\n str33 = text_rec2.get()\n str44 = text_rec3.get()\n str55 = text_rec4.get()\n str66 = text_rec5.get()\n mycursor = b.cursor()\n myquery = \"insert into customer values(%s,%s,%s,%s,%s,%s)\"\n tuple = (str11, str22, str33, str44, str55, str66)\n mycursor.execute(myquery, tuple)\n b.commit()\n b.close()\n print(\"data inserted\")\n top=Toplevel()\n title = Label(top, text=\"Billing Software \", font=(\"times new roman\", 30, \"bold\"), bd=12,fg=\"green\",bg=\"yellow\",relief=SUNKEN)\n title.place(x=0, y=0, relwidth=1)\n ###########Customer details\n f3 = LabelFrame(top, text=\"Cosmetics Products\",font=(\"times new roman\", 15, \"bold\"),bg=\"yellow\",fg=\"black\",relief=SUNKEN,bd=12)\n f3.place(x=5, y=70, width=450, height=330)\n serialno = Label(f3, text=\"serialno\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n serialno.grid(row=0, column=0, padx=20, pady=5)\n Prod_Name = Label(f3, text=\"Prod_Name\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Prod_Name.grid(row=0, column=1, padx=20, pady=5)\n Rupees = Label(f3, text=\"Rupees\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Rupees.grid(row=0, column=2, padx=20, pady=5)\n b = py.connect(user='root', password='Lucknow@123', host='localhost', database='cosmetics')\n mycursor = b.cursor()\n myquery = \"select * from cosmetics\"\n mycursor.execute(myquery)\n index = 1\n for rec in mycursor:\n Label(f3, text=rec[0], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=0)\n Label(f3, text=rec[1], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=1)\n Label(f3, text=rec[2], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=2)\n index = index + 1\n mycursor.close()\n b.commit()\n b.close()\n f4 = LabelFrame(top, text=\"Grocery Products\", font=(\"times new roman\", 15, \"bold\"),fg=\"green\",bg=\"yellow\",relief=SUNKEN,bd=12)\n f4.place(x=850, y=70, width=450, height=330)\n serialno = Label(f4, text=\"serialno\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n serialno.grid(row=0, column=0, padx=20, pady=5)\n Prod_Name = Label(f4, text=\"Prod_Name\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Prod_Name.grid(row=0, column=1, padx=20, pady=5)\n Rupees = Label(f4, text=\"Rupees\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Rupees.grid(row=0, column=2, padx=20, pady=5)\n b = py.connect(user='root', password='Lucknow@123', host='localhost', database='grocery')\n mycursor = b.cursor()\n myquery = \"select * from grocery\"\n mycursor.execute(myquery)\n index = 1\n for rec in mycursor:\n Label(f4, text=rec[0], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=0)\n Label(f4, text=rec[1], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=1)\n Label(f4, text=rec[2], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=2)\n index = index + 1\n mycursor.close()\n b.commit()\n b.close()\n f5 = LabelFrame(top, text=\"Softdrinks Products\", font=(\"times new roman\", 15, \"bold\"),fg=\"green\",bg=\"yellow\",relief=SUNKEN,bd=12)\n f5.place(x=5, y=420, width=450, height=330)\n serialno = Label(f5, text=\"serialno\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n serialno.grid(row=0, column=0, padx=20, pady=5)\n Prod_Name = Label(f5, text=\"Prod_Name\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Prod_Name.grid(row=0, column=1, padx=20, pady=5)\n Rupees = Label(f5, text=\"Rupees\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Rupees.grid(row=0, column=2, padx=20, pady=5)\n b = py.connect(user='root', password='Lucknow@123', host='localhost', database='softdrinks')\n mycursor = b.cursor()\n myquery = \"select * from softdrinks\"\n mycursor.execute(myquery)\n index = 1\n for rec in mycursor:\n Label(f5, text=rec[0], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=0)\n Label(f5, text=rec[1], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=1)\n Label(f5, text=rec[2], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=2)\n index = index + 1\n mycursor.close()\n b.commit()\n b.close()\n f6 = LabelFrame(top, text=\"Household Products\", font=(\"times new roman\", 15, \"bold\"),fg=\"green\",bg=\"yellow\",bd=12,relief=SUNKEN)\n f6.place(x=850, y=420, width=450, height=330)\n serialno = Label(f6, text=\"serialno\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n serialno.grid(row=0, column=0, padx=20, pady=5)\n Prod_Name = Label(f6, text=\"Prod_Name\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Prod_Name.grid(row=0, column=1, padx=20, pady=5)\n Rupees = Label(f6, text=\"Rupees\", font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\")\n Rupees.grid(row=0, column=2, padx=20, pady=5)\n b = py.connect(user='root', password='Lucknow@123', host='localhost', database='household')\n mycursor = b.cursor()\n myquery = \"select * from household\"\n mycursor.execute(myquery)\n index = 1\n for rec in mycursor:\n Label(f6, text=rec[0], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=0)\n Label(f6, text=rec[1], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=1)\n Label(f6, text=rec[2], font=(\"times new roman\", 19, \"bold\"),bg=\"yellow\",fg=\"black\").grid(row=index, column=2)\n index = index + 1\n mycursor.close()\n b.commit()\n b.close()\n\n btn=Button(top, text=\"Next\",font=(\"times new roman\", 19, \"bold\"), bg=\"black\", fg=\"white\", command=cosmo)\n btn.place(x=600,y=640,width=165,height=50)\n\n\ndef showlog():\n b = py.connect(user='root', password='Lucknow@123', host='localhost', database='managementsystemdb')\n usertext=text10.get()\n usertext1=text11.get()\n x=0\n mycursor=b.cursor()\n myquery=\"select * from register\"\n mycursor.execute(myquery)\n allrows=mycursor.fetchall()\n for row in allrows:\n if row[0] == usertext and row[1] == usertext1:\n messagebox.showinfo(\"Login\",\"login successfully\")\n top=Toplevel()\n top.title(\"Billing Management System\")\n top.geometry(\"655x400\")\n title=Label(top,text=\"Customer Records\",font=(\"times new roman\", 30, \"bold\"),bg=\"Yellow\",fg=\"Green\",relief=SUNKEN,bd=12)\n title.place(x=0,y=0,relwidth=1)\n global text_rec\n global text_rec1\n global text_rec2\n global text_rec3\n global text_rec4\n global text_rec5\n f = Label(top, text=\"Customer_ID\", width=15, padx=5, pady=3, font=(\"times new roman\", 15, \"bold\"), fg=\"Black\")\n f.place(x=350, y=150)\n text_rec = Entry(top, width=15, font=\"comicsansms 19 bold\",relief=SUNKEN,bd=12)\n text_rec.place(x=350, y=180)\n f1 = Label(top, text=\"Customer_fName\", width=15, padx=5, pady=3, font=(\"times new roman\", 15, \"bold\"), fg=\"Black\")\n f1.place(x=350, y=240)\n text_rec1 = Entry(top, width=15, font=\"comicsansms 19 bold\",relief=SUNKEN,bd=12)\n text_rec1.place(x=350, y=270)\n f2 = Label(top, text=\"Customer_Contact\", width=15, padx=5, pady=3, font=(\"times new roman\", 15, \"bold\"), fg=\"Black\")\n f2.place(x=360, y=325)\n text_rec2 = Entry(top, width=15, font=\"comicsansms 19 bold\",relief=SUNKEN,bd=12)\n text_rec2.place(x=345, y=360)\n f3 = Label(top, text=\"Customer_Address\", width=15, padx=5, pady=3, font=(\"times new roman\", 15, \"bold\"), fg=\"Black\")\n f3.place(x=700, y=150)\n text_rec3 = Entry(top, width=15, font=\"comicsansms 19 bold\",relief=SUNKEN,bd=12)\n text_rec3.place(x=665, y=180)\n f4 = Label(top, text=\"Customer_LName\", width=15, padx=5, pady=3, font=(\"times new roman\", 15, \"bold\"), fg=\"Black\")\n f4.place(x=700, y=240)\n text_rec4 = Entry(top, width=15, font=(\"comicsansms 19 bold\"),relief=SUNKEN,bd=12)\n text_rec4.place(x=665, y=271)\n f5 = Label(top, text=\"Customer_email\", width=15, padx=5, pady=3, font=(\"times new roman\", 15, \"bold\"), fg=\"Black\")\n f5.place(x=700, y=327)\n text_rec5 = Entry(top, width=15, font=(\"comicsansms 19 bold\"),relief=SUNKEN,bd=12)\n text_rec5.place(x=665, y=360)\n btn=Button(top, text=\"Submit\",font=(\"times new roman\", 19, \"bold\"),command=submit1,bg=\"Black\",fg=\"White\")\n btn.place(x=550,y=450,width=165,height=50)\n mycursor.close()\n b.commit()\n b.close()\ndef show():\n b=py.connect(user='root',password='Lucknow@123',host='localhost',database='managementsystemdb')\n str1=text.get()\n str2=text1.get()\n str3=text2.get()\n str4=text3.get()\n str5=text4.get()\n str6=text5.get()\n str7=text6.get()\n str8=text7.get()\n str9=cmb.get()\n str10=text9.get()\n\n mycursor=b.cursor()\n myquery=\"insert into register values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n tuple=(str1,str2,str3,str4,str5,str6,str7,str8,str9,str10)\n mycursor.execute(myquery,tuple)\n b.commit()\n b.close()\n messagebox.showinfo(\"Registered\",\"You are Registered Successfully\")\ndef abt():\n top=Toplevel()\n top.title(\"About US\")\n top.geometry(\"655x500\")\n load=Image.open(\"C:\\\\Users\\\\PC\\\\Downloads\\\\WhatsApp Image 2020-05-18 at 11.35.40 PM.jpeg\")\n render=ImageTk.PhotoImage(load)\n load=load.resize((200,200))\n l1=Label(top,image=render)\n l1.place(x=5,y=4)\n l2=Label(top,text=\"Welcome in our Billing Software\\n This project is made by Shivam Saxena\\n.I am \"\n \"a native of Lucknow and a\\n student of Babu Banarsi Das\\n Engineering College Lucknow.\\n\\nI have made this \"\n \"project on Python\\n Language to increase my knowledge\\n in coding and traditional life. \",\n fg=\"black\",\n font=\"comicsansms 19 bold\")\n l2.place(x=790,y=12)\n top.mainloop()\n\ndef reg():\n top=Toplevel()\n top.geometry(\"655x400+0+0\")\n top.title(\"Registration Window\")\n global text\n global text1\n global text2\n global text3\n global text4\n global text5\n global text6\n global text7\n global cmb\n global text9\n title = Label(top, text=\"Register Screen\", font=(\"times new roman\", 30, \"bold\"),fg=\"green\", bd=12, relief=SUNKEN)\n title.place(x=0, y=0, relwidth=1)\n l1=Label(top,text=\"user_id\",width=15,padx=5,pady=3,font=(\"times new roman\", 15, \"bold\"),fg=\"grey\")\n l1.place(x=350,y=150)\n text=Entry(top,width=15,font=\"comicsansms 19 bold\")\n text.place(x=350,y=180)\n l2 = Label(top, text=\"user_pass\", width=15, padx=5, pady=3,font=(\"times new roman\",15,\"bold\"),fg=\"grey\")\n l2.place(x=750, y=150)\n text1 = Entry(top, width=15,font=\"comicsansms 19 bold\")\n text1.place(x=750, y=185)\n l3 = Label(top, text=\"user_fname\", width=15, padx=5, pady=3,font=(\"times new roman\", 15, \"bold\"),fg=\"grey\")\n l3.place(x=365, y=250)\n text2 = Entry(top, width=15,font=\"comicsansms 19 bold\")\n text2.place(x=345, y=280)\n l4 = Label(top, text=\"user_lname\", width=15, padx=5, pady=3,font=(\"times new roman\", 15, \"bold\"),fg=\"grey\")\n l4.place(x=750, y=245)\n text3 = Entry(top, width=15,font=\"comicsansms 19 bold\")\n text3.place(x=750, y=280)\n l5 = Label(top, text=\"user_contact\", width=15, padx=5, pady=3,font=(\"times new roman\", 15, \"bold\"),fg=\"grey\")\n l5.place(x=370, y=350)\n text4 = Entry(top, width=15,font=\"comicsansms 19 bold\")\n text4.place(x=350, y=380)\n l6 = Label(top, text=\"user_email\", width=15, padx=5, pady=3,font=(\"times new roman\", 15, \"bold\"),fg=\"grey\")\n l6.place(x=750, y=350)\n text5 = Entry(top, width=15,font=\"comicsansms 19 bold\")\n text5.place(x=750, y=380)\n l7 = Label(top, text=\"user_address\", width=15, padx=5, pady=3,font=(\"times new roman\", 15, \"bold\"),fg=\"grey\")\n l7.place(x=375, y=445)\n text6 = Entry(top, width=15,font=\"comicsansms 19 bold\")\n text6.place(x=350, y=475)\n l8 = Label(top, text=\"user_city\", width=15, padx=5, pady=33,font=(\"times new roman\", 15, \"bold\"),fg=\"grey\")\n l8.place(x=740, y=415)\n text7 = Entry(top, width=15,font=\"comicsansms 19 bold\")\n text7.place(x=753, y=475)\n l9 = Label(top, text=\"user_sec_ques\", width=15, padx=5, pady=33,font=(\"times new roman\", 15, \"bold\"),fg=\"grey\")\n l9.place(x=380, y=510)\n cmb = ttk.Combobox(top, width=15,font=(\"comicsansms 19 bold\"),state='readonly',justify=CENTER)\n cmb['values']=(\"Select\",\"Your Fav Colour\",\"Your Birth Place\",\"Your Best Friend\")\n cmb.place(x=350, y=570)\n cmb.current(0)\n l10 = Label(top, text=\"user_sec_ans\", width=15, padx=5, pady=3,font=(\"times new roman\", 15, \"bold\"),fg=\"grey\")\n l10.place(x=750, y=540)\n text9=Entry(top, width=15,font=\"comicsansms 19 bold\")\n text9.place(x=750, y=575)\n btn = Button(top, padx=4, pady=5, text=\"Register Now\", font=\"comicsansms 19 bold\",command=show)\n btn.place(x=565,y=650)\n top.mainloop()\ndef log():\n top=Toplevel()\n top.geometry(\"655x400+0+0\")\n top.title(\"Login Window\")\n global text10;\n global text11\n title = Label(top, text=\"Login Screen\", font=(\"times new roman\", 30, \"bold\"), fg=\"green\", bd=12, relief=SUNKEN,bg=\"yellow\")\n title.place(x=0, y=0, relwidth=1)\n l1 = Label(top, text=\"user_id\", width=15, padx=5, pady=3, font=(\"times new roman\", 15, \"bold\"), fg=\"Black\")\n l1.place(x=550, y=150)\n text10 = Entry(top, width=15, font=(\"comicsansms 19 bold\"),bd=12,relief=SUNKEN)\n text10.place(x=550, y=190)\n l2 = Label(top, text=\"user_pass\", width=15, padx=5, pady=3, font=(\"times new roman\", 15, \"bold\"), fg=\"Black\")\n l2.place(x=550, y=260)\n text11 = Entry(top, width=15, font=(\"comicsansms 19 bold\"),bd=12,relief=SUNKEN)\n text11.place(x=550, y=300)\n btn = Button(top, padx=6, pady=3, text=\"Login\", font=\"comicsansms 19 bold\",command=showlog,bg=\"Black\",fg=\"White\")\n btn.place(x=610,y=400)\n top.mainloop()\nif __name__==\"__main__\":\n b1=Button(root,padx=3,pady=5,font=\"comicsansms 19 bold\",command=log)\n b1.place(relx=0.5,rely=0.3,anchor=\"center\")\n b2=Button(root,padx=4,pady=5,font=\"comicsansms 19 bold\",command=reg)\n b2.place(relx=0.5,rely=0.5,anchor=\"center\")\n b3=Button(root,padx=15,pady=5,font=\"comicsansms 19 bold\",command=abt)\n b3.place(relx=0.5,rely=0.7,anchor=\"center\")\n load = Image.open(\"C:\\\\Users\\\\PC\\\\Documents\\\\login3 image.jpg\")\n render = ImageTk.PhotoImage(load)\n b1.config(image=render)\n load1 = Image.open(\"C:\\\\Users\\\\PC\\\\Documents\\\\register image.jpg\")\n render1 = ImageTk.PhotoImage(load1)\n b2.config(image=render1)\n load2 = Image.open(\"C:\\\\Users\\\\PC\\Documents\\\\about us.jpg\")\n render2 = ImageTk.PhotoImage(load2)\n b3.config(image=render2)\n\n root.mainloop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Billing.py","file_name":"Billing.py","file_ext":"py","file_size_in_byte":39439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"430285643","text":"import httplib as http, urllib as url\n\nparams = url.urlencode({'age': '100'})\nheaders = {\"Content-type\": \"application/x-www-form-urlencoded\",\n\t \"Accept\": \"text/plain\"}\n\nconn = http.HTTPConnection(\"www.wsb.com\")\nconn.request(\"POST\", \"/Assignment2/case01.php\", params, headers)\nres = conn.getresponse()\ncontent = res.read()\nconn.close()\n\nimport os \nimport webbrowser as browser\npath = os.path.abspath('case01response.html') \nurl = 'file://' + path \nwith open(path, 'w') as f: \n\tf.write(content) \nbrowser.get('firefox').open_new_tab(url)\n\n","sub_path":"exploit12a-3.py","file_name":"exploit12a-3.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448881662","text":"#!/usr/bin/env python\n\nimport gym\nimport gym_csv\n\nimport numpy as np\nimport time\n\n# X points down (rows)(v), Y points right (columns)(>), Z would point outwards.\nRIGHT = 0 # > Increase Y (column)\nUP = 1 # ^ Decrease X (row)\nLEFT = 2 # < Decrease Y (column)\nDOWN = 3 # v Increase X (row)\n\nSIM_PERIOD_MS = 500.0\n\nenv = gym.make('csv-pygame-v0')\nstate = env.reset()\nprint(\"state: \" + str(state))\nenv.render()\ntime.sleep(0.5)\n\nfor i in range(5):\n new_state, reward, done, _ = env.step(RIGHT)\n env.render()\n print(\"new_state: \" + str(new_state) + \", reward: \" + str(reward) + \", done: \" + str(done))\n time.sleep(SIM_PERIOD_MS/1000.0)\n\n\nfor i in range(6):\n new_state, reward, done, _ = env.step(DOWN)\n env.render()\n print(\"new_state: \" + str(new_state) + \", reward: \" + str(reward) + \", done: \" + str(done))\n time.sleep(SIM_PERIOD_MS/1000.0)\n","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"585446468","text":"# 邮箱要求:1、@之前包含4~20字母数字;2、邮箱可以是163、126、qq;3、提取邮箱的前缀和邮箱类型\n\nimport re\n\n\ndef main():\n email_addr = input(\"请输入邮箱地址:\")\n # 注意:若在正则表达式中需要用到的普通字符在正则中被使用,如(.),可使用 \\ 进行转意\n ret = re.match(r'^([a-zA-Z0-9]{4,20})@(163|126|qq)\\.com$', email_addr) # ()的分组功能可以被单独取出\n if ret:\n print(F\"你输入的邮箱符合要求,邮箱前缀为:{ret.group(1)},邮箱类型为:{ret.group(2)}\")\n else:\n print(\"输入有误\")\n\n\nif __name__ == '__main__':\n main()","sub_path":"python/03.web服务器/01.正则表达式/03_判断邮箱是否符合要求.py","file_name":"03_判断邮箱是否符合要求.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"464373950","text":"import logging\n\n\nclass Logger:\n def __new__(cls):\n _logger = logging.getLogger('internal')\n ch = logging.StreamHandler()\n formatter = logging.Formatter('[%(asctime)s] - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n _logger.addHandler(ch)\n return _logger\n\n\nlogger = Logger()\n","sub_path":"bolinette/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502078799","text":"#!/usr/bin/env python\nimport json\nimport os\nimport sys\nfrom cv2 import DualTVL1OpticalFlow\nfrom random import shuffle, randint\n\nimport cv2\nimport numpy as np\nfrom keras.utils import Sequence\n\nfrom utils import yield_frames\n\n# TVL1: DualTVL1OpticalFlow = cv2.DualTVL1OpticalFlow_create(warps=3)\nTVL1: DualTVL1OpticalFlow = cv2.DualTVL1OpticalFlow_create()\n\n\nclass BatchSeq(Sequence):\n\n def __init__(self, seq: Sequence, batch_size=16):\n self.batch_size = batch_size\n self.seq = seq\n self.len = int(len(seq) / batch_size)\n\n def __getitem__(self, index):\n s = index * self.batch_size\n e = s + self.batch_size\n\n x_batch = []\n y_batch = []\n for i in range(s, e):\n x, y = self.seq[i]\n x_batch.append(x)\n y_batch.append(y)\n\n return np.array(x_batch), np.array(y_batch)\n\n def __len__(self):\n return self.len\n\n\ndef calc_flow(gray, prev):\n curr_flow = cv2.calcOpticalFlowFarneback(prev, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n # curr_flow = TVL1.calc(prev, gray, None)\n\n # curr_flow = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n curr_flow[curr_flow >= 20] = 20\n curr_flow[curr_flow <= -20] = -20\n\n # scale to [-1, 1]\n max_val = max(curr_flow.max(), abs(curr_flow.min()))\n if max_val == 0:\n return curr_flow\n else:\n curr_flow = curr_flow / max_val\n\n return curr_flow\n\n\nclass SmokeGifSequence(Sequence):\n\n def __init__(self, data_dir: str, neg_txt: str, pos_txt: str, input_shape_hwc: tuple, batch_size=16,\n show_data=False, only_temporal=False, only_spacial=False):\n self.only_temporal = only_temporal\n self.only_spacial = only_spacial\n self.batch_size = batch_size\n\n self.show = show_data\n self.input_hwc = input_shape_hwc\n self.data_dir = data_dir\n\n with open(os.path.join(data_dir, neg_txt), 'r') as list_f:\n items = list(map(lambda l: [l.strip(), 0], list_f.readlines()))\n\n with open(os.path.join(data_dir, pos_txt), 'r') as list_f:\n p = list(map(lambda l: [l.strip(), 1], list_f.readlines()))\n items.extend(p)\n\n self.file_and_clsid = items\n shuffle(self.file_and_clsid)\n\n self.num_batches = int(np.ceil(len(self.file_and_clsid) / batch_size))\n\n def __getitem__(self, index):\n xx, y_batch = self.rgb_and_flows_batch(index)\n rgb = xx[0]\n flow = xx[1]\n\n if self.only_temporal:\n return flow, y_batch\n elif self.only_spacial:\n return rgb, y_batch\n else:\n return xx, y_batch\n\n def __len__(self):\n return self.num_batches\n\n @staticmethod\n def flow_to_hsv(dst_hsv: np.ndarray, mag_ang: tuple):\n mag, ang = mag_ang\n dst_hsv[..., 0] = ang * 180 / np.pi / 2\n dst_hsv[..., 1] = 255\n dst_hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n return dst_hsv\n\n def rgb_and_flows(self, video_file: str, flows_count: int = 10,\n drop_every_n_frame: int = 2,\n drop_first_n_frames: int = -1):\n old_gray = None\n rgb = None\n crop_x1 = randint(0, 32)\n crop_x2 = randint(0, 32)\n crop_y1 = randint(0, 32)\n crop_y2 = randint(0, 32)\n\n if drop_first_n_frames < 0:\n cap = cv2.VideoCapture(video_file)\n frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n if frame_count > 10 * drop_every_n_frame:\n drop_first_n_frames = randint(0, frame_count - 10 * drop_every_n_frame - 1)\n else:\n drop_first_n_frames = 0\n\n flows_mag_ang = np.zeros(shape=(self.input_hwc[0], self.input_hwc[1], flows_count * 2))\n for fn, bgr in yield_frames(video_file, start_frame=drop_first_n_frames):\n h, w, c = bgr.shape\n bgr = bgr[crop_y1:h - crop_y2, crop_x1:w - crop_x2]\n\n bgr = cv2.resize(bgr, dsize=(self.input_hwc[1], self.input_hwc[0]))\n gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n\n if old_gray is None:\n old_gray = gray\n\n if fn % drop_every_n_frame != 0:\n continue\n\n end_frame = drop_every_n_frame * flows_count + 1\n if fn < end_frame:\n flow_frame = 2 * int(fn / drop_every_n_frame) - 2\n\n curr_flow = calc_flow(gray, old_gray)\n\n flows_mag_ang[:, :, flow_frame] = curr_flow[:, :, 0]\n flows_mag_ang[:, :, flow_frame + 1] = curr_flow[:, :, 1]\n old_gray = gray\n else:\n break\n\n assert rgb is not None, \"zero frame for %s; drop_first_n_frames=%d\" % (video_file, drop_first_n_frames)\n\n return rgb, flows_mag_ang\n\n def best_frame_for_cls_id(self, cls_id: int, drop_every_n_frame, flows_count, json_lf):\n with open(json_lf) as f:\n predictions_by_frame = [json.loads(s.strip()) for s in f]\n\n total_frames = len(predictions_by_frame)\n class1_prob = np.array([x[1][cls_id] for x in predictions_by_frame])\n rand = np.random.uniform(0, 1, total_frames)\n # find center frame, probably max of the sequence\n center_frame_num = int(np.argmax(class1_prob * rand))\n # check that first frame is > 0\n drop_first_n_frames = max(1, center_frame_num - flows_count * drop_every_n_frame // 2)\n # check that last frame is <= n\n drop_first_n_frames += min(0, total_frames - center_frame_num - flows_count * drop_every_n_frame // 2)\n return drop_first_n_frames\n\n def rgb_and_flows_batch(self, index):\n files_count = len(self.file_and_clsid)\n start_index = min(files_count - self.batch_size, index * self.batch_size)\n\n flows_count: int = 10\n xrgb_batch = []\n xflow_batch = []\n y_batch = np.zeros(shape=(self.batch_size, 2))\n for i in range(0, self.batch_size):\n # print(\"dataset_batches=%d, data_items=%d batch_index=%d, file_index=%d\" % (\n # self.num_batches, files_count, index, start_index + i))\n\n gif, cls_id = self.file_and_clsid[start_index + i]\n video_path = os.path.join(self.data_dir, gif)\n\n drop_every_n_frame = 1\n drop_first_n_frames = -1\n\n human_y = os.path.join(self.data_dir, \"jsonl.byhuman\", gif, 'result.jsonl')\n machine_y = os.path.join(self.data_dir, \"jsonl\", gif, 'result.jsonl')\n\n if os.path.isfile(human_y):\n drop_first_n_frames = self.best_frame_for_cls_id(cls_id, drop_every_n_frame, flows_count, human_y)\n\n elif os.path.isfile(machine_y):\n # print(\"JSONL best frame is %d : %start_index\" % (drop_first_n_frames, gif))\n drop_first_n_frames = self.best_frame_for_cls_id(cls_id, drop_every_n_frame, flows_count, machine_y)\n else:\n raise Exception(\"Oooops. No JSONL\")\n\n # print(\"video_path=%s; start_frame=%d; total_frames=%d\" % (video_path, drop_first_n_frames, frame_count))\n x_rgb, x_flows = self.rgb_and_flows(video_path,\n drop_every_n_frame=drop_every_n_frame,\n drop_first_n_frames=drop_first_n_frames,\n flows_count=flows_count)\n\n x_rgb = x_rgb / 127.5 - 1 # norm\n xrgb_batch.append(x_rgb)\n xflow_batch.append(x_flows)\n y_batch[i][cls_id] = 1.\n\n return [np.array(xrgb_batch), np.array(xflow_batch)], y_batch\n\n\ndef test():\n # data_dir = \"/blender/storage/datasets/vg_smoke/\"\n # seq = SmokeGifSequence(data_dir, neg_txt='validate.txt', pos_txt='validate.txt',\n # input_shape_hwc=(300, 600, 3),\n # batch_size=2,\n # only_spacial=False, only_temporal=False)\n\n data_dir = \"/blender/storage/datasets/vg_smoke\"\n seq = SmokeGifSequence(data_dir, neg_txt='negatives.txt', pos_txt='positives.txt',\n input_shape_hwc=(300, 600, 3),\n only_spacial=False, only_temporal=False)\n\n for i in range(len(seq)):\n print(\"i=%d\" % i)\n # x_rgb_b, x_flows_b, y_b = seq[i]\n xx, y_b = seq[i]\n x_rgb_b, x_flows_b = xx\n\n for j in range(len(x_rgb_b)):\n x_rgb, y = x_rgb_b[j], y_b[j]\n x_rgb = (x_rgb + 1) * 127.5\n x_flows = x_flows_b[j]\n\n bgr = cv2.cvtColor(x_rgb.astype(np.uint8), cv2.COLOR_RGB2BGR)\n\n hsv = np.zeros_like(x_rgb)\n mag = x_flows[:, :, 18]\n ang = x_flows[:, :, 19]\n hsv = seq.flow_to_hsv(dst_hsv=hsv, mag_ang=(mag, ang))\n\n flow_mask = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR)\n # flow_mask = 255 - cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR)\n\n # cv2.imshow(\"bgr\", bgr)\n cls_id = np.argmax(y)\n cv2.imshow(\"frame %d\" % cls_id, bgr)\n cv2.imshow(\"flow %d\" % cls_id, flow_mask)\n\n c = cv2.waitKey(0)\n if c == 27:\n sys.exit(1)\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"2448821","text":"#\n# @lc app=leetcode id=1091 lang=python3\n#\n# [1091] Shortest Path in Binary Matrix\n#\n\n\"\"\"\nIn an N by N square grid, each cell is either empty (0) or blocked (1).\n\nA clear path from top-left to bottom-right has length k if and only if it is composed of cells C_1, C_2, ..., C_k such that:\n\nAdjacent cells C_i and C_{i+1} are connected 8-directionally (ie., they are different and share an edge or corner)\nC_1 is at location (0, 0) (ie. has value grid[0][0])\nC_k is at location (N-1, N-1) (ie. has value grid[N-1][N-1])\nIf C_i is located at (r, c), then grid[r][c] is empty (ie. grid[r][c] == 0).\nReturn the length of the shortest such clear path from top-left to bottom-right. If such a path does not exist, return -1.\n\n\nExample 1:\n\nInput: [[0,1],[1,0]]\nOutput: 2\n\n\nExample 2:\n\nInput: [[0,0,0],[1,1,0],[1,1,0]]\nOutput: 4\n \n\nNote:\n\n1 <= grid.length == grid[0].length <= 100\ngrid[i][j] is 0 or 1\n\"\"\"\n\nfrom collections import deque\n\nclass Solution(object):\n EMPTY = 0\n BLOCK = 1\n \n DIRECTIONS = [[-1, -1], [-1, 1], [1, -1], [1, 1], [0, 1], [0, -1], [1, 0], [-1, 0]]\n \n def shortestPathBinaryMatrix(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if not grid or len(grid) == 0 or len(grid[0]) == 0 or grid[0][0] != self.EMPTY or grid[len(grid) - 1][len(grid[0]) - 1] != self.EMPTY:\n return -1\n\n m, n = len(grid), len(grid[0])\n \n step = 1\n queue = deque([(0, 0)])\n visited = set([(0, 0)])\n while queue:\n step += 1\n size = len(queue)\n for _ in range(size):\n x, y = queue.popleft()\n for d in self.DIRECTIONS:\n x_, y_ = x + d[0], y + d[1]\n if self._inBound(x_, y_, m, n, grid, visited):\n if x_ == m - 1 and y_ == n - 1:\n return step\n queue.append((x_, y_))\n visited.add((x_, y_))\n \n return -1\n \n \n def _inBound(self, x, y, m, n, grid, visited):\n return 0 <= x < m and 0 <= y < n and grid[x][y] == self.EMPTY and (x, y) not in visited\n","sub_path":"1091.shortest-path-in-binary-matrix.py","file_name":"1091.shortest-path-in-binary-matrix.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39141353","text":"tauMuAllHists = ['mu/mu_h_eta',\n 'mu/mu_h_iso',\n 'mu/mu_h_iso_abs',\n 'mu/mu_h_iso_abs_dbeta',\n 'mu/mu_h_iso_ch',\n 'mu/mu_h_iso_dbeta',\n 'mu/mu_h_iso_nh',\n 'mu/mu_h_iso_ph',\n 'mu/mu_h_pt',\n 'tau/tau_h_eta',\n 'tau/tau_h_iso',\n 'tau/tau_h_iso_abs',\n 'tau/tau_h_iso_abs_dbeta',\n 'tau/tau_h_iso_ch',\n 'tau/tau_h_iso_dbeta',\n 'tau/tau_h_iso_nh',\n 'tau/tau_h_iso_ph',\n 'tau/tau_h_pt',\n 'tauMu/tauMu_h_mT',\n 'tauMu/tauMu_h_pzeta',\n 'tauMu/tauMu_h_svfitmass',\n 'tauMu/tauMu_h_vismass',\n 'vertex/vertex_h_nvertices' ]\n\ndef histogramSet( options, allHists=None):\n if allHists is None:\n allHists = tauMuAllHists\n hists = set()\n if options.histlist is not None:\n hists = set([ hist for hist in options.histlist.split(',') if hist is not '' ])\n elif options.histgroup is None:\n hists = set( allHists )\n else:\n histgroup = options.histgroup\n groups = [ group for group in histgroup.split(',') if group is not '']\n if 'TAUMU' in groups:\n hists.update([\n # 'mu/mu_h_iso_dbeta',\n # 'tau/tau_h_iso',\n # 'tau/tau_h_pt',\n 'tauMu/tauMu_h_mT',\n #'tauMu/tauMu_h_pzeta',\n 'tauMu/tauMu_h_svfitmass',\n 'tauMu/tauMu_h_vismass',\n # 'vertex/vertex_h_nvertices'\n ])\n if 'TAU' in groups:\n hists.update([hist for hist in allHists if 'tau_' in hist])\n if 'MU' in groups:\n hists.update([hist for hist in allHists if 'mu_' in hist]) \n \n return hists\n\n","sub_path":"CMGTools/H2TauTau/python/proto/HistogramSet.py","file_name":"HistogramSet.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351649020","text":"class Configuration:\n # hyper-parameters for vehicle detection\n def __init__(self):\n # flags\n self.is_training_png = True\n self.save_debug_samples = False\n\n # which features to include\n self.with_spatial_feature = True\n self.with_color_feature = True\n self.with_gradient_feature = True\n\n # HOG params\n self.orient = 10\n self.pix_per_cell = 8\n self.cell_per_block = 2\n\n # spatial and color params\n self.spatial_size = (32, 32)\n self.hist_bins = 32\n self.hog_channel = 'ALL' # 'ALL' # 0, 1, 2, or \"ALL\"\n self.hist_range = (0, 256)\n self.cspace = 'YCrCb' # RGB, HSV, LUV, HLS, YUV, YCrCb\n self.channels=3\n\n # sliding window search params\n self.window_size = 64 # as size of training images is 64x64\n self.cells_per_step = 2\n self.window_color = (0, 0, 255)\n self.window_thickness = 3\n self.scale = 1.7\n self.skip_frames=2\n\n # camera calibration and classifier pickles\n self.classifier = \"../classifier.p\"\n self.calibration_parameters = \"../calibration_parameters.p\"\n\n # ROI\n self.xy_start_stop_left = (0, 400), (370, 600)\n self.xy_start_stop_top = (400, 800), (380, 560)\n self.xy_start_stop_right = (800, 1270), (370, 600)\n\n # heatmap\n self.history_limit=8\n self.threshold = 2\n # training datasets\n self.training_not_cars = \"../training_datasets/non-vehicles/*/*.png\"\n self.training_cars = \"../training_datasets/vehicles/*/*.png\"\n self.training_not_cars_small = \"../training_datasets_small/non-vehicles_smallset/*/*.jpeg\"\n self.training_cars_small = \"../training_datasets_small/vehicles_smallset/*/*.jpeg\"\n\n # testing datasets\n self.testing_video = \"../test_video.mp4\"\n self.testing_video_2 = \"../test_video_2.avi\"\n self.project_video = \"../project_video.mp4\"\n","sub_path":"implementation/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"346114206","text":"#!/usr/bin/python3\n\nimport xml.etree.ElementTree as etree\n\nimport standard_format_constants as std\n\n\nclass IssueConverter:\n \"\"\"A class which provides an interface for converting data\n to the standard XML format.\n \"\"\"\n\n def __init__(self, data_extractor):\n self.extractor = data_extractor\n\n def convert_issue(self, issue_data_string):\n extractor = self.extractor\n preprocessed_data = extractor.parse_input_issue_data(issue_data_string)\n\n issue_tag = self.create_issue_tag(preprocessed_data)\n creation_tag = self.create_creation_tag(preprocessed_data)\n resolution_tag = self.create_resolution_tag(preprocessed_data)\n description_tag = self.create_description_tag(preprocessed_data)\n comments_tag = self.create_comments_tag(preprocessed_data)\n attachments_tag = self.create_attachments_tag(preprocessed_data)\n\n issue_tag.append(creation_tag)\n issue_tag.append(resolution_tag)\n issue_tag.append(description_tag)\n issue_tag.append(comments_tag)\n issue_tag.append(attachments_tag)\n\n return etree.tostring(issue_tag).decode(\"utf-8\")\n\n def create_issue_tag(self, issue_data):\n issue_number = self.extractor.get_issue_number(issue_data)\n if issue_number is None:\n return None\n\n general_type, literal_type = self.extractor.get_issue_type(issue_data)\n\n issue_tag = etree.Element(std.ISSUE_TAG)\n issue_tag.attrib[std.ISSUE_NUMBER_ATTR] = str(issue_number)\n issue_tag.attrib[std.ISSUE_GENERAL_TYPE_ATTR] = general_type\n issue_tag.attrib[std.ISSUE_TYPE_ATTR] = literal_type\n return issue_tag\n\n def create_creation_tag(self, issue_data):\n creation_tag = etree.Element(std.CREATION_TAG)\n\n creation_date = self.extractor.get_creation_date(issue_data)\n if creation_date is not None:\n date_str = creation_date.strftime(std.DATE_FORMAT)\n creation_tag.attrib[std.CREATION_CREATION_DATE_ATTR] = date_str\n\n creation_author = self.extractor.get_creation_author(issue_data)\n if creation_author is not None:\n creation_tag.attrib[std.CREATION_CREATED_BY_ATTR] = creation_author\n\n return creation_tag\n\n def create_resolution_tag(self, issue_data):\n resolution_tag = etree.Element(std.RESOLUTION_TAG)\n OPEN = std.RESOLUTION_RESOLVED_OPEN\n CLOSED = std.RESOLUTION_RESOLVED_CLOSED\n resolved_flag_attr = std.RESOLUTION_RESOLVED_ATTR\n resolved_date_attr = std.RESOLUTION_RESOLUTION_DATE_ATTR\n\n resolution_date = self.extractor.get_resolution_date(issue_data)\n if resolution_date is None:\n resolution_tag.attrib[resolved_flag_attr] = OPEN\n else:\n date_str = resolution_date.strftime(std.DATE_FORMAT)\n resolution_tag.attrib[resolved_flag_attr] = CLOSED\n resolution_tag.attrib[resolved_date_attr] = date_str\n\n return resolution_tag\n\n def create_description_tag(self, issue_data):\n description_tag = etree.Element(std.DESCRIPTION_TAG)\n\n description_text = self.extractor.get_description_text(issue_data)\n if description_text is not None:\n description_tag.text = description_text\n\n return description_tag\n\n def create_comments_tag(self, issue_data):\n comments_tag = etree.Element(std.COMMENTS_TAG)\n\n raw_comments_data = self.extractor.get_raw_comments_data(issue_data)\n if raw_comments_data is None:\n return comments_tag\n\n for comment in raw_comments_data:\n comment_tag = self.create_comment_tag(comment)\n comments_tag.append(comment_tag)\n\n return comments_tag\n\n def create_comment_tag(self, raw_comment_data):\n comment = etree.Element(std.COMMENT_TAG)\n\n author = self.extractor.get_comment_author(raw_comment_data)\n if author is not None:\n comment.attrib[std.COMMENT_AUTHOR_ATTR] = author\n\n created_date = self.extractor.get_comment_date(raw_comment_data)\n if created_date is not None:\n comment.attrib[std.COMMENT_DATE_ATTR] = created_date\n\n comment_text = self.extractor.get_comment_text(raw_comment_data)\n if comment_text is not None:\n comment.text = comment_text\n\n return comment\n\n def create_attachments_tag(self, issue_data):\n attachments_tag = etree.Element(std.ATTACHMENTS_TAG)\n\n attachments = self.extractor.get_raw_attachments_data(issue_data)\n if attachments is None:\n return attachments_tag\n\n for attachment in attachments:\n attachment_tag = self.create_attachment_tag(attachment)\n attachments_tag.append(attachment_tag)\n\n return attachments_tag\n\n def create_attachment_tag(self, raw_attachment):\n attachment_tag = etree.Element(std.ATTACHMENT_TAG)\n\n author = self.extractor.get_attachment_author(raw_attachment)\n if author is not None:\n attachment_tag.attrib[std.ATTACHMENT_UPLOADED_BY_ATTR] = author\n\n date = self.extractor.get_attachment_date(raw_attachment)\n if date is not None:\n attachment_tag.attrib[std.ATTACHMENT_UPLOAD_DATE_ATTR] = date\n\n filename = self.extractor.get_attachment_name(raw_attachment)\n if filename is not None:\n attachment_tag.text = filename\n\n return attachment_tag\n","sub_path":"lib/issue_converter.py","file_name":"issue_converter.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"390853707","text":"import tkinter as tk # note that module name has changed from Tkinter in Python 2 to tkinter in Python 3\n\nwindow = tk.Tk()\nwindow.title('Checkbutton')\n\nwindow.geometry('200x200')\n\nl = tk.Label(window, text='',bg='grey')\nl.pack()\n\ncounter = 0\ndef do_job():\n global counter\n l.config(text='do '+ str(counter))\n counter += 1\n\nmenubar = tk.Menu(window)\n\n#file menu\nfilemenu = tk.Menu(menubar,tearoff = 0)\nmenubar.add_cascade(label='File',menu=filemenu)\nfilemenu.add_command(label='New',command = do_job)\nfilemenu.add_command(label='Open',command = do_job)\nfilemenu.add_command(label='Save',command = do_job)\nfilemenu.add_separator()\n\nsubmenu = tk.Menu(filemenu)\nfilemenu.add_cascade(label='Import', menu=submenu,underline=0)\nsubmenu.add_command(label=\"Submenu1\",command = do_job)\nfilemenu.add_separator()\n\nfilemenu.add_command(label='Exit',command = window.quit)\n\n#edit menu\neditmenu = tk.Menu(menubar,tearoff = 0)\nmenubar.add_cascade(label='Edit',menu=editmenu)\neditmenu.add_command(label='Cut',command = do_job)\neditmenu.add_command(label='Copy',command = do_job)\neditmenu.add_command(label='Pase',command = do_job)\n\nwindow.config(menu=menubar)\nwindow.mainloop()","sub_path":"Menubar.py","file_name":"Menubar.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"459609456","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef lzy(_A, _b):\n # 变换为增广矩阵\n for i, j in enumerate(b):\n _A[i].append(j)\n # 列主元消去法\n for i in range(len(_A)):\n t = [a[i] for a in _A][i:]\n if len(t) == 1:\n continue\n t_abs = list(np.abs(t))\n if max(t_abs) == 0:\n return False\n n_max = t_abs.index(max(t_abs)) + i\n temp = _A[i]\n _A[i] = _A[n_max]\n _A[n_max] = temp\n for j in range(len(_A) - i - 1):\n _A[j + i + 1] = list(np.array(_A[j + i + 1]) - np.array([k / _A[i][i] * _A[j + i + 1][i] for k in _A[i]]))\n print(\"列主元消去法得到的上三角矩阵为:\")\n for _i in _A:\n print(_i)\n return _A\n\n\ndef qj(_A):\n n=len(_A)\n x = np.zeros(n)\n for _i in range(n):\n x[n-_i-1] = (_A[n - _i - 1][n] - sum(x * _A[n - _i - 1][:-1]))/_A[n-_i-1][n-_i-1]\n return x\n\nif __name__ == \"__main__\":\n A = eval(input(\"请输入方程组参数矩阵\\n例如:\\n[[3.01, 6.03, 1.99], \\n[1.27, 4.16, -1.23], \\n[0.987, -4.81, 9.34]]\\n\"))\n b = eval(input(\"请输入方程组结果矩阵\\n例如:\\n[1, 1, 1]\\n\"))\n A = lzy(A, b)\n if A==False:\n print(\"计算错误\")\n else:\n x = qj(A)\n for i,xi in enumerate(x):\n print(\"x{:d}=\".format(i),xi)\n\n","sub_path":"线性方程组-列主元高斯消去法.py","file_name":"线性方程组-列主元高斯消去法.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"390798959","text":"from django.test import TestCase\nfrom django.urls import resolve\nfrom django.utils.html import escape\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpRequest\n\nfrom unittest.mock import patch, call, Mock\nfrom unittest import skip\n\nfrom lists.views import home_page, new_list, HomePageView\nfrom ..models import Item, List\nfrom ..forms import ItemForm, NewListForm\n\nUser = get_user_model()\n\n# Create your tests here.\nclass HomePageTest(TestCase):\n @skip\n def test_root_url_resolves_to_homepage(self):\n # resolve handles url processing\n found = resolve('/')\n\n # check if the function used in resolve is\n # homepage\n self.assertEqual(found.func, home_page)\n\n def test_root_url_uses_home_template(self):\n response = self.client.get('/')\n\n self.assertTemplateUsed(response, 'lists/home.html')\n\n def test_home_page_returns_correct_html(self):\n # create request\n response = self.client.get('/')\n html = response.content.decode('utf-8')\n\n\n self.assertTrue(html.startswith(''))\n self.assertIn('To-Do lists', html)\n self.assertTrue(html.strip().endswith(''))\n\n def test_home_page_uses_item_form(self):\n response = self.client.get('/')\n self.assertIsInstance(response.context['form'], ItemForm)\n\n\n\nclass ListViewTest(TestCase):\n def test_uses_list_template(self):\n list_ = List.objects.create()\n response = self.client.get(f'/lists/{ list_.id }/')\n self.assertTemplateUsed(response, 'lists/list.html')\n\n def test_display_only_items_for_that_list(self):\n # create items\n correct_list = List.objects.create()\n Item.objects.create(text='item 1', list=correct_list)\n Item.objects.create(text='item 2', list=correct_list)\n\n\n # check against another list\n other_list = List.objects.create()\n Item.objects.create(text='other 1', list=other_list)\n Item.objects.create(text='other 2', list=other_list)\n\n response = self.client.get(f'/lists/{ correct_list.id }/')\n\n\n ## assertContains know how to process byte containing response\n ## as a shortcut to decoding the content\n self.assertContains(response, 'item 1')\n self.assertContains(response, 'item 2')\n self.assertNotContains(response, 'other 1')\n self.assertNotContains(response, 'other 2')\n\n\n\n ## assertContains know how to processed byte containing response\n ## as a shortcut to decoding the content\n self.assertContains(response, 'item 1')\n self.assertContains(response, 'item 2')\n\n def test_passes_correct_list_to_template(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n response = self.client.get(f'/lists/{ correct_list.id }/')\n\n # check if the list in the context in the response is same as the correct_list\n self.assertEqual(response.context['list'], correct_list)\n\n\n # test if you can add an item to the existing list\n def test_can_append_a_new_item_in_post_request(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n # issue a post request\n self.client.post(f'/lists/{ correct_list.id }/',\n data={'text': 'A new item on existing list'}\n )\n\n self.assertEqual(Item.objects.count(), 1)\n # check if the first item as been saved\n new_item = Item.objects.first()\n # check the text is correct\n self.assertEqual(new_item.text, 'A new item on existing list')\n # check if the item is in the right list\n self.assertEqual(new_item.list, correct_list)\n\n def test_redirects_to_list_view(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n\n response = self.client.post(f'/lists/{ correct_list.id }/',\n data={'text': 'A new item on existing list'}\n )\n\n self.assertRedirects(response, f'/lists/{ correct_list.id }/')\n\n\n def test_for_invalid_input_renders_home_template(self):\n response = self.client.post('/lists/new', data={'text': ''})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'lists/home.html')\n\n\n def test_validation_errors_are_shown_on_home_page(self):\n response = self.client.post('/lists/new', data={'text': ''})\n self.assertContains(response, escape(\"You can't have an empty list item\"))\n\n def test_for_invalid_input_passes_form_to_template(self):\n response = self.client.post('/lists/new', data={'text':''})\n self.assertIsInstance(response.context['form'], ItemForm)\n\n def test_display_item_form_on_get(self):\n list_ = List.objects.create()\n response = self.client.get(f'/lists/{ list_.id }/')\n\n print('Response is: ', response)\n self.assertIsInstance(response.context['form'], ItemForm)\n self.assertContains(response, 'name=\"text\"')\n\n\n\n # refactor below\n # def test_validation_errors_ended_on_lists_page(self):\n # list_ = List.objects.create()\n # response = self.client.post(f'/lists/{ list_.id }/', data = {'text': ''})\n #\n # # response should be 200\n # self.assertEqual(response.status_code, 200)\n # self.assertTemplateUsed(response, 'lists/list.html')\n # print(response.content.decode())\n # expected_error = escape(\"You can't have an empty list item\")\n # self.assertContains(response, expected_error)\n\n def post_invalid_input(self):\n list_ = List.objects.create()\n return self.client.post(f'/lists/{ list_.id }/', data={'text': ''})\n\n def test_for_invalid_input_nothing_saved_to_db(self):\n self.post_invalid_input()\n self.assertEqual(Item.objects.count(), 0)\n\n def test_for_invalid_input_renders_list_template(self):\n response = self.post_invalid_input()\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'lists/list.html')\n\n def test_for_invalid_input_passed_form_to_template(self):\n response = self.post_invalid_input()\n self.assertIsInstance(response.context['form'], ItemForm)\n\n def test_for_invalid_input_shows_error_on_page(self):\n response = self.post_invalid_input()\n self.assertContains(response, escape(\"You can't have an empty list item\"))\n\n\n def test_duplication_item_validation_errors_end_up_on_lists_page(self):\n list1 = List.objects.create()\n item = Item.objects.create(list=list1, text='item')\n\n response = self.client.post(\n f'/lists/{list1.id}/',\n data={'text': 'item'}\n )\n\n self.assertContains(response, 'Duplicate item')\n self.assertTemplateUsed(response, 'lists/list.html')\n self.assertEqual(Item.objects.all().count(), 1)\n\n\nclass MyListsTest(TestCase):\n\n def test_my_lists_url_renders_my_lists_template(self):\n User.objects.create(email='user@tdd.com')\n response = self.client.get('/lists/users/user@tdd.com/')\n self.assertTemplateUsed(response, 'lists/my_lists.html')\n\n\n def test_passes_correct_owner_to_template(self):\n User.objects.create(email='wrong@tdd.com')\n correct_user = User.objects.create(email='user@tdd.com')\n\n response = self.client.get('/lists/users/user@tdd.com/')\n\n\n self.assertEqual(response.context['owner'], correct_user)\n\n\n# integrated tests\nclass NewListViewIntegratedTest(TestCase):\n\n def test_can_save_a_post_request(self):\n # no trailing slash for action url\n response =self.client.post('/lists/new', data={'text': 'A new list item'})\n\n self.assertEqual(Item.objects.count(), 1)\n new_item = Item.objects.first()\n self.assertEqual(new_item.text, 'A new list item')\n\n # test after saving an item redirects\n def test_redirects_after_post(self):\n response =self.client.post('/lists/new', data={'text': 'A new list item'})\n\n # check for a redirect status\n # self.assertEqual(response.status_code, 302)\n # self.assertEqual(response['location'], '/lists/the-only-list-in-the-world/')\n\n # check for a redirect status in one go\n new_list = List.objects.first()\n self.assertRedirects(response, f'/lists/{ new_list.id }/')\n\n # Not applicable if response is a HttpResponseRedirect\n # self.assertIn('A new list item', response.content.decode())\n # self.assertTemplateUsed(response, 'lists/home.html')\n\n # parameters start with inner level on multiple patching\n # @patch('lists.views.List')\n # @patch('lists.views.ItemForm')\n def test_list_owner_is_saved_if_user_is_authenticated(self):\n user = User.objects.create(email='user@tdd.com')\n self.client.force_login(user)\n\n # return the instance MagicMock with name: `List`\n # since were mocking ListClass\n\n self.client.post('/lists/new', data={'text': 'new item'})\n list_ = List.objects.first()\n self.assertEqual(list_.owner, user)\n '''\n mock_list = mock_ListClass.return_value\n print('mock list return value is: ', mock_list) # \n\n ## why this?\n # To make sure list.save is called\n # after list.owner is set\n ##\n\n # create a function to be called later\n def check_owne34/r_assigned():\n self.assertEqual(mock_list.owner, user)\n\n # attached it to save method as a side_effect\n # to make sure that the assertion is called after\n # save method is called\n mock_list.save.side_effect = check_owner_assigned\n\n self.client.post('/lists/new', data={'text': 'new item'})\n\n # check if the function with side effect has been called\n mock_list.save.assert_called_once_with()\n '''\n\n\n### isolated tests\n\n# mock NewListForm at class level\n# to use it in all tests\n# inside the class\n@patch('lists.views.NewListForm')\nclass NewListViewUnitTest(TestCase):\n\n def setUp(self):\n # set up a request here manually\n # since we would like to test\n # the view function\n # calling it manually by its name\n # instead of doing a self.client.post\n # requires also setting up the urls\n # call it with this argument\n self.request = HttpRequest()\n self.request.POST['text'] = 'new list item'\n # mock the user too\n self.request.user = Mock()\n\n def test_passes_POST_data_to_NewListForm(self, mock_NewListForm):\n new_list(self.request)\n\n # lets check if NewListForm has been called\n # and called with the POST data we have set up\n print('mock call args: ', mock_NewListForm.call_args)\n print('mock return value: ', mock_NewListForm.return_value)\n\n # Check using assertEqual\n self.assertEqual(mock_NewListForm.call_args, call(data=self.request.POST))\n\n # Check using assert_called_once_with\n mock_NewListForm.assert_called_once_with(data=self.request.POST)\n\n\n def test_saves_form_if_form_is_valid(self, mock_NewListForm):\n # form = NewListForm()\n # mock the statement above with this\n mock_form = mock_NewListForm.return_value\n\n print('mock form return value: ', mock_form)\n print('mock form: ', mock_NewListForm)\n\n # initialize the is_valid to return true\n mock_form.is_valid.return_value = True\n\n # call the view\n new_list(self.request)\n\n # and check if the form.save() is called\n # using the mock_form\n mock_form.save.assert_called_once_with(owner=self.request.user)\n\n def test_does_not_save_when_form_is_invalid(self, mock_NewListForm):\n mock_form = mock_NewListForm.return_value\n mock_form.is_valid.return_value = False\n\n new_list(self.request)\n\n self.assertFalse(mock_form.save.called)\n\n @patch('lists.views.redirect')\n def test_redirects_to_form_return_object_if_form_is_valid(self, mock_redirect, mock_NewListForm):\n mock_form = mock_NewListForm.return_value\n mock_form.is_valid.return_value = True\n\n # In view, this return a redirect\n # which we have mock it out\n response = new_list(self.request)\n\n print('Mock redirect return value: ', mock_redirect.return_value)\n print('response: ', response)\n\n # so both response and mock_redirect.return_value\n # should be equal\n # of class MagicMock\n self.assertEqual(response, mock_redirect.return_value)\n\n # we also need to check it its called with the right `arguments`\n # if redirect is called with arguments from the return value form.save(), -redirect(list_)\n # with the mock_form.save.return_value\n\n # the return value is used in view\n # so it must be an object\n mock_redirect.assert_called_once_with(mock_form.save.return_value)\n\n\n @patch('lists.views.render')\n def test_renders_home_template_with_form_if_the_form_is_invalid(self, mock_render, mock_NewListForm):\n mock_form = mock_NewListForm.return_value\n mock_form.is_valid.return_value = False\n\n response = new_list(self.request)\n\n self.assertEqual(response, mock_render.return_value)\n\n # return the form and home template and request using render\n mock_render.assert_called_once_with(self.request, 'lists/home.html', {'form': mock_form})\n\n @patch('lists.forms.List.create_new')\n def test_save_returns_new_list_object(self, mock_List_create_new, mock_NewListForm):\n user = Mock(is_authenticated=True)\n form = NewListForm(data={'text': 'item1'})\n\n form.is_valid()\n\n list_obj = form.save(owner=user)\n\n self.assertEqual(mock_List_create_new.return_value, list_obj)\n\n###\n\n\nclass ShareListTest(TestCase):\n\n def test_post_redirects_to_lists_page(self):\n User.objects.create(email='user1@tdd.com')\n list_ = List.objects.create()\n\n response = self.client.post(f'/lists/share_lists/{ list_.id }/', data = {'sharee': 'user1@tdd.com'})\n self.assertRedirects(response, f'/lists/{ list_.id }/')\n\n\n def test_user_is_added_to_list_shared_with_all_with_POST(self):\n\n user = User.objects.create(email='user1@tdd.com')\n\n list_ = List.objects.create()\n self.client.post(f'/lists/share_lists/{list_.id}/', data={'sharee': 'user1@tdd.com'})\n\n self.assertIn(user, list_.shared_with.all())\n","sub_path":"superlists_TDD/lists/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":14611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"98405460","text":"import numpy\nfrom Functions.cm_with_pbc import cm\nfrom Functions.pbc import pbc2d, pbc1d\nfrom cfunctions.functions import pbc2d, pbc, cm_cc\nfrom math import sqrt\n\ndef shells(center, pos, box, s = 300):\n '''\n Centeroid position is needed; 300 shells for default\n '''\n n, m = pos.shape # Ensures that pos is a 2D array\n result = numpy.zeros((n, s), dtype=numpy.int)\n L = box.min()/2\n delta = L/s\n pbc_pos = pbc2d(pos - center, box)\n for i in range(n):\n d = sum(pbc_pos[i]**2)**0.5\n if not d > L:\n result[i][int(d/delta)] += 1\n return(result)\n\ndef shells_c0(pos_c0, box, s = 300):\n '''\n Positions already centered at 0; 300 shells for default\n '''\n n, m = pos_c0.shape # Ensures that pos is a 2D array\n result = numpy.zeros((n, s), dtype=numpy.int) # N paritcles with s shells\n L = box.min()/2\n delta = L/s\n for i in range(n):\n d = sqrt(pos_c0[i].dot(pos_c0[i]))\n if not d > L:\n result[i][int(d/delta)] += 1\n return(result)\n","sub_path":"Functions/shellize.py","file_name":"shellize.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11844112","text":"'''\nGiven a rectangular matrix of characters, add a border of asterisks(*) to it.\n\nExample\n\nFor\n\npicture = [\"abc\",\n \"ded\"]\nthe output should be\n\naddBorder(picture) = [\"*****\",\n \"*abc*\",\n \"*ded*\",\n \"*****\"]\n'''\n\ndef addBorder(picture):\n temp=len(picture[0])\n picture.insert(0,'*'*(temp+2))\n picture.append('*'*(temp+2))\n for i in range(1,len(picture)-1):\n picture[i]='*'+picture[i]+'*'\n return(picture)\n","sub_path":"Arcade/Intro/15_addBorder.py","file_name":"15_addBorder.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"583917727","text":"from socket import *\nimport sys\nimport time\n\n#Check input\nif len(sys.argv) != 3:\n print (\"Your command is not right. Please be in this format:UDPPingClient.py server_host server_port\")\n sys.exit(0)\n\n#Connect to server from input\nserverName = str(sys.argv[1])\nserverPort = int(sys.argv[2])\naddress = (serverName, serverPort)\nclientSocket = socket(AF_INET, SOCK_DGRAM)\n\n# To set waiting time of one second for reponse from server\nclientSocket.settimeout(1)\n\n# Find max, min, average RTT, packet loss rate\nRTTmax = 0\nRTTmin = 1\nRTTaverage = 0\npacket = 10 \n\nfor i in range(1, 11):\n start = time.time() # Lay time hien tai\n message = 'Ping ' + str(i) + ' ' + time.ctime(start)\n try:\n clientSocket.sendto(message.encode(), address)\n print('Sent: ' + message)\n modifiedMessage = clientSocket.recvfrom(1024)\n print('Receive: ' + modifiedMessage[0].decode()) # [0] la message [1] la address cua server\n end = time.time()\n elapsed = end - start\n RTTmax = max(RTTmax, elapsed)\n RTTmin = min(RTTmin, elapsed)\n RTTaverage = RTTaverage + elapsed\n print(\"RTT: \" + str(elapsed) + \" seconds\\n\")\n except timeout:\n print(\"#\" + str(i) + \" Requested Time out\\n\")\n packet = packet - 1\n\nprint(\"RTTmin: \" + str(RTTmin) + \" seconds\")\nprint(\"RTTmax: \" + str(RTTmax) + \" seconds\")\nprint(\"RTTaverage: \" + str(RTTaverage/packet) + \" seconds\")\nprint(\"Packet loss rate: \" + str(packet / 10 * 100) + \"%\")\nclientSocket.close()","sub_path":"UDPPingClient.py","file_name":"UDPPingClient.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"545383882","text":"'''\nIn this file, we restore the model trained before\n'''\nfrom six import string_types, iteritems\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n#define the model_fn, where our model are designed\ndef layers(op):\n def layer_decorated(self, *args, **kwargs):\n '''\n All layers are abstracted to same operation\n input -> process -> output = input -> process -> output\n So we abstract the process here and realize it in each layer\n '''\n #set the layer name\n name = kwargs.setdefault('name', self.get_unique_name(op.__name__))\n print('Name in layer_decorated: %r'%name)\n #set the layer's input\n if len(self.terminals) == 0:\n raise RuntimeError('Empty input for layer %s'%name)\n elif len(self.terminals) == 1:\n layer_inputs = self.terminals[0]\n else:\n layer_inputs = list(self.terminals)\n print(\"The args is %r\"%kwargs)\n layer_outputs = op(self, layer_inputs, *args, **kwargs)\n self.layers[name] = layer_outputs\n self.feed(layer_outputs)\n self.logits = []\n self.logits = layer_outputs\n return self\n return layer_decorated\nclass NeuralNetwork(object):\n def __init__(self, inputs, trainable=True):\n self.inputs = inputs\n #terminals store the intermediate result,\n # which is the output of last layer and the input of next layer\n self.terminals = []\n self.layers = dict(inputs)\n self.trainable = trainable\n self.logits = []\n self.setup()\n def setup(self):\n raise NotImplementedError('Must be implemented by sub class')\n def get_unique_name(self, prefix):\n ident = sum(t.startswith(prefix) for t,_ in self.layers.items()) + 1\n return '%s_%d'%(prefix, ident)\n def feed(self, *args):\n '''\n This function look up the dictionary of layers by the layer name\n '''\n assert len(args)!=0\n self.terminals = []\n for arg in args:\n if isinstance(arg, string_types):\n try:\n arg = self.layers[arg]\n except KeyError:\n raise KeyError('Unknow layer name %s'%arg)\n self.terminals.append(arg)\n return self\n def make_variable(self, name, shape):\n # tf.truncated_normal_initializer\n initialization = tf.truncated_normal(shape=shape, stddev=0.1, dtype=tf.float32)\n return tf.get_variable(name=name, initializer=initialization)\n # return tf.get_variable(name=name, shape=shape, initializer=tf.truncated_normal_initializer)\n @layers\n def conv(self, input_nn, k_h, k_w, s_h, s_w, channels, name, padding='VALID'):\n #get the input depth\n input_dim = int(input_nn.get_shape()[-1])\n #define the variables for convolution\n convolue = lambda inp, kernel_nn: tf.nn.conv2d(inp, kernel_nn, [1, s_h, s_w, 1], padding=padding)\n with tf.variable_scope(name) as scope:\n #In tensorflow, the convolutional kernel is [h, w, in_channels, out_channels]\n kernel_nn = self.make_variable('weights', [k_h, k_w, input_dim, channels])\n output = convolue(input_nn, kernel_nn)\n #add the biases\n biases = self.make_variable('biases', [channels])\n output = tf.add(output, biases)\n return output\n\n @layers\n def activate(self, input_nn, name, atype_nn='PReLU'):\n with tf.variable_scope(name) as scope:\n if atype_nn.lower() == 'relu':\n output = tf.nn.relu(input_nn, name=scope.name)\n return output\n elif atype_nn.lower() == 'sigmoid':\n output = tf.nn.sigmoid(input_nn, name=scope.name)\n return output\n elif atype_nn.lower() == 'prelu':\n i = int(input.get_shape()[-1])\n alpha = self.make_variable('alpha', shape=(i,))\n output = tf.nn.relu(input_nn) + tf.multiply(alpha, -tf.nn.relu(-input_nn))\n return output\n else:\n raise RuntimeError('Unknow activations: %s'%atype_nn)\n\n @layers\n def fc(self, input_nn, output_num, name):\n #get the input shape\n input_shape = input_nn.get_shape()\n input_num = 1\n for num in input_shape[1:].as_list():\n input_num = input_num*int(num)\n with tf.variable_scope(name) as scope:\n W = self.make_variable('weights', [input_num, output_num])\n biases = self.make_variable('biases', [output_num])\n input_flatten = tf.reshape(input_nn, [-1, input_num])\n output = tf.add(tf.matmul(input_flatten, W), biases)\n return output\n\n @layers\n def pool(self, input_nn, k_h, k_w, s_h, s_w, name, ptype_nn='MAX', padding='SAME'):\n with tf.variable_scope(name) as scope:\n if ptype_nn.lower() == 'max':\n output = tf.nn.max_pool(input_nn, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)\n return output\n elif ptype_nn.lower() == 'avg':\n output = tf.nn.avg_pool(input_nn, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)\n return output\n else:\n raise KeyError('Unknow pooling kernel %s'%ptype_nn)\n\n @layers\n def dropout(self, input_nn, keep_prob, name):\n with tf.variable_scope(name):\n output = tf.nn.dropout(input_nn, keep_prob=keep_prob)\n return output\n\nclass mnist_nn(NeuralNetwork):\n def setup(self):\n (self.feed('data')\n .conv(5, 5, 1, 1, 32, name='conv1', padding='SAME')\n .activate(name='relu1', atype_nn='relu')\n .pool(2, 2, 2, 2, name='pool1')\n .conv(5, 5, 1, 1, 64, name='conv2', padding='SAME')\n .activate(name='relu2', atype_nn='relu')\n .pool(2, 2, 2, 2, name='pool2')\n # .dropout(keep_prob=0.5, name='drop1')\n .fc(1024, name='fc1')\n .activate(name='relu3', atype_nn='relu')\n .dropout(keep_prob=0.5, name='drop2')\n .fc(10, name='fc2')\n )\n\n\n#Here we start to restore the model\n#defien the loss\nprint('Let\\'s flying~')\nx = tf.placeholder(dtype=tf.float32, shape=[None, 784])\ny = tf.placeholder(dtype=tf.float32, shape=[None, 10])\nsess = tf.Session()\n# mnist_cnn = create_cnn(sess)\n# logits = ('mnist/fc2/fc2:0')\ndata = tf.reshape(x, [-1, 28, 28, 1])\nlogits = mnist_nn({'data': data}).logits\n# logits = mnist_nn({'data': data}).layers['fc2']\n# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))\n# train_op = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n# train_op = tf.train.AdamOptimizer(1e-4).minimize(loss)\ninit = tf.global_variables_initializer()\nsess.run(init)\nsaver = tf.train.Saver()\n\ncorrections = tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1))\naccuracy = tf.reduce_mean(tf.cast(corrections, tf.float32))\nsaver.restore(sess, 'model/mnist_l4_800.ckpt')\nfor i in range(1, 10):\n\n print(\"TEST: The final accuracy on test is %r\" % sess.run(accuracy, feed_dict={\n x: mnist.test.images, y: mnist.test.labels}))","sub_path":"course_2/course2_4.py","file_name":"course2_4.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"613446493","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: the given BST\n @param k: the given k\n @return: the kth smallest element in BST\n \"\"\"\n def kthSmallest(self, root, k):\n # write your code here\n dummy = TreeNode(0)\n dummy.right, stack = root, [dummy]\n\n for i in range(k):\n node = stack.pop()\n if node.right:\n node = node.right\n while node:\n stack.append(node)\n node = node.left\n if not stack:\n break\n\n return stack[-1].val\n","sub_path":"04 - Binary Tree - Divide Conquer & Traverse/M902.KthSmallestElementinaBST.py","file_name":"M902.KthSmallestElementinaBST.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115235570","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 23 13:52:59 2021\r\n\r\n@author: mttco\r\n\"\"\"\r\nimport numpy as np\r\nimport sklearn\r\nimport time\r\nfrom sklearn.svm import SVC\r\nfrom sklearn import svm\r\nfrom scipy.io import loadmat\r\nfrom scipy import *\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nreduced_train_database_zone_project = loadmat('reduced_train_database_LBP.mat')\r\nreduced_train_database=reduced_train_database_zone_project.get('reduced_train_database')\r\nx1=reduced_train_database[0:10000]\r\nxtrain=np.array(x1)\r\ntrain_label=reduced_train_database_zone_project.get('train_label')\r\ny1=train_label[0:10000]\r\nytrain=np.reshape(np.array(y1),10000)\r\n# x2=reduced_train_database[40000:60000] #pour l entrainement du modèle\r\n# y2=train_label[40000:60000]\r\n\r\ndel reduced_train_database_zone_project\r\nreduced_test_database_zone_project = loadmat('reduced_test_database_LBP.mat')\r\nreduced_test_database=reduced_test_database_zone_project.get('test_database_reduced')\r\n\r\n\r\nx2=reduced_test_database\r\n\r\nxtest=np.array(x2)\r\n\r\ntest_label=reduced_test_database_zone_project.get('test_label')\r\n\r\ny2=test_label\r\nytest=np.reshape(np.array(y2),10000)\r\n\r\n# ytrain=np.reshape(np.array(y1),1000)\r\n# ytest=np.reshape(np.array(y2),10000)\r\n\r\n\r\nstart_time = time.time()\r\nclf = make_pipeline(StandardScaler(), SVC(C=100,gamma=0.001,kernel='rbf'))\r\nclf.fit(xtrain, ytrain)\r\nt1 = time.time()\r\ntemps_entrainement=t1-start_time\r\nstart_time = time.time()\r\nypred=clf.predict(xtest)\r\nresult=1-clf.score(xtest,ytest)\r\nt1 = time.time()\r\ntemps_test=t1-start_time\r\n\r\n\r\n##PARTIE CALCUL EN CHAINE\r\n#class SVC(*, C=1.0, kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=- 1, decision_function_shape='ovr', break_ties=False, random_state=None)\r\n\r\n# tab1=zeros((49, 4))\r\n\r\n# indice=0\r\n# indice1=0.001\r\n# indice2=0.001\r\n\r\n# i=1000\r\n# j=2\r\n\r\n# # for i in range(1,7):\r\n# for j in range(1,7):\r\n# a=indice2\r\n# b=indice1\r\n# start_time = time.time()\r\n# clf = make_pipeline(StandardScaler(), SVC(C= a ,gamma= b ,kernel='rbf'))\r\n# clf.fit(xtrain, ytrain)\r\n# ypred=clf.predict(xtest)\r\n# result=1-clf.score(xtest,ytest)\r\n# t1 = time.time()\r\n# tab1[indice][0]=a\r\n# tab1[indice][1]=b\r\n# tab1[indice][2]=result\r\n# tab1[indice][3]=t1-start_time\r\n# indice=indice+1\r\n# indice1=indice1*10\r\n# indice2=indice2*10\r\n \r\n","sub_path":"Lab1/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"589174195","text":"######################################################################################################################\n## ##\n## Cargando las librerías necesarias... ##\n## ##\n###################################################################################################################### \n\nimport pandas as panda\nimport mysql.connector\nfrom mysql.connector import errorcode\nfrom datetime import datetime\n\n######################################################################################################################\n## ##\n## Listado de variables utilizadas... ##\n## ##\n## direccionFichero [Contiene la ruta donde se encuentra el fichero xls a leer]... ##\n## ##\n## config [Contiene la información de configuración del servidor de base de datos MySQL]... ##\n## nombreBd [Contiene el nombre de la base de datos]... ##\n## tabla [Contiene las instrucciones SQL necesarias para crear las tablas en la base de datos]... ##\n## datos [Contiene toda la información necesaria para insertar un nuevo registro en la base de datos]... ##\n## add [Contiene las instrucciones SQL para insertar un registro nuevo en la base de datos]... ##\n## cnx [Para manejar la conección al servidor MySQL]... ##\n## cursor [Para indicar las instrucciones al servidor MySQL]... ##\n## ##\n######################################################################################################################\n\n######################################################################################################################\n## ##\n## Estableciendo las variables de configuración necesarias... ##\n## Estableciendo la esctructura de las tablas en la base de datos... ##\n## ##\n######################################################################################################################\n\nnombreBDLoc = 'datawarehouse' ## Configuración para la base de dato de localización gegráfica...\nconfig = {\n 'user': 'kike',\n 'password': 'kike123',\n 'host': '127.0.0.1',\n 'database': 'datawarehouse',\n 'raise_on_warnings': True,\n}\n\ntablaLoc = {} ## Definición de la tabla de localización...\ntablaLoc[nombreBDLoc] = ( \n \"CREATE TABLE `tiempo` (\"\n \" `IDTIEMPO` INT NOT NULL AUTO_INCREMENT,\"\n \" `DIA` DOUBLE NULL,\"\n \" `MES` DOUBLE NULL,\"\n \" `ANNO` DOUBLE NULL,\"\n \" `HORA` DOUBLE NULL,\"\n \" `MINUTO` DOUBLE NULL,\"\n \" `SEGUNDO` DOUBLE NULL,\"\n \" PRIMARY KEY (`IDTIEMPO`));\"\n \" ENGINE = InnoDB\"\n)\n\n######################################################################################################################\n## ##\n## Creando una clase para convertir los tipos de datos a tipos MySQL... ##\n## ##\n######################################################################################################################\n\nclass NumpyMySQLConverter(mysql.connector.conversion.MySQLConverter):\n \n \"\"\" A mysql.connector Converter que es capaz de manejar los tipos de datos de Numpy \"\"\"\n\n def _float32_to_mysql(self, value):\n return float(value)\n\n def _float64_to_mysql(self, value):\n return float(value)\n\n def _int32_to_mysql(self, value):\n return int(value)\n\n def _int64_to_mysql(self, value):\n return int(value) \n\n def _timestamp_to_mysql(self, value):\n return datetime.timestamp(value)\n\n######################################################################################################################\n## ##\n## Conectando al SGBD MySQL y creando las tablas definidas... ##\n## ##\n######################################################################################################################\n\ntry:\n print (\"Creando las variables de conexión...\")\n cnx = mysql.connector.connect(**config)\n cnx.set_converter_class(NumpyMySQLConverter)\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print (\" \")\n print(\"Su usuario o contraseña no son correctos, por favor verifique...\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print (\" \")\n print(\"No existe la base de datos, por favor verifique...\")\n else:\n print (\" \")\n print(err)\nelse:\n print (\" \")\n print (\"Conexión exitosa...\")\n cursor = cnx.cursor()\n\n######################################################################################################################\n## ##\n## Función para crear la base de datos en el formato correspondiente... ##\n## ##\n######################################################################################################################\n\ndef create_database(cursor): ## Función para la base de d...\n try:\n cursor.execute(\n \"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".format(nombreBDLoc))\n except mysql.connector.Error as err:\n print (\" \")\n print(\"Error al crear la base de datos señalada: {}\".format(err))\n exit(1)\n\n######################################################################################################################\n## ##\n## Se solicita crear las base de datos... ##\n## ##\n######################################################################################################################\n\ntry: \n cnx.database = nombreBDLoc \nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = nombreBDLoc\n else:\n print (\" \")\n print(err)\n exit(1) \n\n######################################################################################################################\n## ##\n## Se solicita crear todas las tablas definidas... ##\n## ##\n######################################################################################################################\n\nfor name, ddl in tablaLoc.items():\n try:\n print (\" \")\n print(\"Creando la tabla {}: \".format(name), end='')\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print (\" \")\n print(\"Ya existe la base de datos...\")\n else:\n print (\" \")\n print(err.msg)\n else:\n print (\" \")\n print(\"Tablas creadas...\")\n\n######################################################################################################################\n## ##\n## Las variables corresponden a sus respectivos campos en la base de datos (Ver descripción anterior)... ##\n## datosEgreso [Contiene la información necesaria para hacer la inserción en la base de datos]... ##\n## addEgreso [Consulta SQL que inserta la información en la base de datos]... ##\n## ##\n######################################################################################################################\n\ndia = \"\"\nmes = \"\"\nanno = \"\"\nhora = \"\"\nminuto = \"\"\nsegundo = \"\"\n\ndatosTiempo = {\n 'datoDia' : dia,\n 'datoMes' : mes,\n 'datoAnno' : anno,\n 'datoHora' : hora,\n 'datoMinuto' : minuto,\n 'datoSegundo' : segundo,\n}\n\naddTiempo = (\"INSERT INTO tiempo\"\n \"(DIA, MES, ANNO, HORA, MINUTO, SEGUNDO)\"\n \"VALUES (%(datoDia)s, %(datoMes)s, %(datoAnno)s, %(datoHora)s, %(datoMinuto)s, %(datoSegundo)s)\"\n )\n\n######################################################################################################################\n## ##\n## Se leen los valores del json y se asignan a las variables... ##\n## Esto es necesario para poder insertar correctamente los valores en la base de datos... ##\n## ##\n######################################################################################################################\n\nvalorTiempo = int(1527811200)\ntiempo = datetime.fromtimestamp(valorTiempo)\n\nfor i in range(1, 308160):\n dia = tiempo.day\n mes = tiempo.month\n anno = tiempo.year\n hora = tiempo.hour\n minuto = tiempo.minute\n segundo = tiempo.second\n valorTiempo = valorTiempo + int(60)\n tiempo = datetime.fromtimestamp(valorTiempo) \n\n datosTiempo = {\n 'datoDia' : dia,\n 'datoMes' : mes,\n 'datoAnno' : anno,\n 'datoHora' : hora,\n 'datoMinuto' : minuto,\n 'datoSegundo' : segundo,\n }\n \n print (\"Insertando registro \" + str(i) + \" de \" + str(308160))\n cursor.execute(addTiempo, datosTiempo)\n cnx.commit()\n print (\"Registro \" + str(i) + \" insertado, completado el \" + str(int(i)*100/int(308160)) + \" porciento del total de datos\")\n\n######################################################################################################################\n## ##\n## Cerrando las conexiones a la base de datos... ##\n## ##\n######################################################################################################################\n\nprint (\"Se insertaron adecuadamente el 100 porciento de los datos del xls en la base de datos.\")\ncursor.close()\ncnx.close()","sub_path":"tiempo.py","file_name":"tiempo.py","file_ext":"py","file_size_in_byte":12542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"517759754","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_utils\n----------------------------------\n\nTests for `teamsupport.utils` module.\n\"\"\"\nimport unittest\n\nfrom teamsupport.utils import to_xml\nfrom tests import XmlTestCase\n\n\nclass TestToXml(XmlTestCase):\n def setUp(self):\n super(TestToXml, self).setUp()\n\n def test_to_xml_success_with_data(self):\n params = {\n 'data': {'Field1': 'Test field'},\n 'root': 'OuterField',\n }\n result = to_xml(**params)\n\n self.assertEqualXml(result, self.xml_element)\n\n def tearDown(self):\n super(TestToXml, self).tearDown()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510130214","text":"from slackclient import SlackClient\n\nimport json\nimport time\nimport sys\n\n#The access token needed by Slack to connect. This access token can be created\n#by following the instructions at: https://api.slack.com/docs/oauth. For\n#security reasons, this shouldn't be stored in plaintext in a file but instead\n#it should be pulled from configuration, which isn't easily accessible. .\ntoken = \"INSERT_TOKEN_HERE\"\n\n#Enter the name of the channel you want the message to be posted in. It must \n#be authenticated using the access token entered. No hashtag is needed before\n#the channel name.\nchannel = \"pri_slack_dev\"\n\n#Initialise the slackClient. This opens a connection to Slack, and validates\n#the access token that's passed in. If the access token has been revoked or is\n#invalid, the slackClient will not connect, and therefore, we will not be able\n#to publish messages to any Slack channel(s).\nslack_client = SlackClient(token)\n\n#This checks if our connection to Slack was successfully established. We connect\n#to the RTM, which is an acronym for Slack's Realtime Messaging (RTM) API. More\n#details can be found at: https://api.slack.com/rtm.\n#It is desirable to use the RTM API as we will create a connection that'll stay\n#open indefinitely (ideally), and stream messages to the channel as they come\n#in.\nif slack_client.rtm_connect():\n #Ideally, we are going to pipe messages into this script _forever_.\n #Therefore, we have a while loop that continuously runs, and pushes messages\n #to the Slack channel, provided there are messages to send.\n #It's worth noting that the flood of messages expected is relatively low.\n #Hence, there are no attempts to conflate messages (i.e. send messages in\n #batches. Additionally, as this information isn't critical, we can afford to\n #sleep for a minute (60 seconds) between iterations.\n while True:\n item = sys.stdin.readline()\n alert = json.loads(item)\n slack_client.rtm_send_message(channel, alert[\"message\"])\n time.sleep(60)\nelse:\n print(\"Connection Failed, invalid token?\")\n\n","sub_path":"WikiEdits/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"132257611","text":"from time import sleep\nimport sys\nimport RPi.GPIO as GPIO\n\nGPIO.setwarnings(False)\nDIR = -1\nSTEP = -1\n\nif sys.argv[1] == '4':\n DIR = 20\n STEP = 21\nelif sys.argv[1] == '3':\n DIR = 16\n STEP = 12\nelif sys.argv[1] == '2':\n DIR = 26\n STEP = 19\nelif sys.argv[1] == '1':\n DIR = 13\n STEP = 6\n\nCW = 1\nCCW = 0\nT = 0.003 # tempo entre passos\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(DIR, GPIO.OUT)\nGPIO.setup(STEP, GPIO.OUT)\nGPIO.output(DIR, CW)\n\nfor x in range(int(sys.argv[2])):\n GPIO.output(STEP, GPIO.HIGH)\n sleep(T)\n GPIO.output(STEP, GPIO.LOW)\n sleep(T)\n","sub_path":"nema.py","file_name":"nema.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"30723382","text":"# This file is only for development. Do NOT look in this file... unless you want the solution. :)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nimport socket\n\ndef get_open_ports(target, port_range):\n open_ports = []\n\n if target[-1:].isalpha():\n target = socket.gethostbyname(target)\n\n print(\"Checking for open ports on:\", target)\n\n for port in range(port_range[0], port_range[1]):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(1)\n result = sock.connect_ex((target, port))\n if result == 0:\n open_ports.append(port)\n sock.close()\n\n return(open_ports)","sub_path":"Port Scanner/port_scanner_SOLUTION.py","file_name":"port_scanner_SOLUTION.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535517335","text":"import read\n'''import pandas as pd\n\nimport numpy as np\n'''\nimport matplotlib.pyplot as plt\n\n'''\n#### read excel sheet\n\nsheet1= pd.read_excel('Placementsois.xlsx',sheetname='placementstats')\n\n\nsheet2= pd.read_excel('Placementsois.xlsx',sheetname='enrollment')\n\n\nsheet1_reindexing=sheet1.reset_index()\n'''\n##################################\n###Visualization\n####\n\n##1.Show the Visualization of Percantage of placement of all branches in 2016\n\n\nsample1=read.sheet1[read.sheet1.Year==2016]\nsamp1=sample1.Percentage\n#sam=samp1.Percentage\n#samp1=filter(None,samp1)\n\n#print(samp1)\n####\nsample2=read.sheet1[read.sheet1.Year==2016]\nsamp2=sample2.MscTechProgram\n#print(samp2)\n\nforsize1=read.sheet1[read.sheet1.Year==2016]\nforsz1=forsize1.Percentage\n#print(forsz1)\n##\n\ndef Percentage_placement_2016():\n\tlabels=samp1\t\n\n\n\tsizes=forsz1\n\n\t#colors = ['gold', 'yellowgreen', 'lightcoral','lightskyblue','pink','darkblue','white','yellow','red','lightgreen']\n\t\n\texplode = (0.02, 0.02, 0.02, 0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02) # exploding all slice\n\t# Plot\n\t#plt.pie(sizes, explode=explode,labels=labels,autopct='%1.1f%%', startangle=140)\n\tplt.pie(sizes, explode=explode,labels=labels,startangle=140)\t\n\tleg=samp2\n\t#plt.legend(leg,loc=\"upper right\",bbox_to_anchor=(0.5,0.5,0.5,0.5))\n\tplt.legend(leg,loc=\"upper right\",ncol=1,fancybox=True)\n #plt.sizeslables(100,100,90.48,90,100,100,0.0,100,100)\n\tplt.axis('equal')\n\tplt.show()\n\nif __name__=='__main__':\n\tPercentage_placement_2016()","sub_path":"Project_code/visualization/vis1.py","file_name":"vis1.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507596616","text":"#making febonacci series with generators\ndef feb():\n a=0;b=1;c=25\n while True:\n yield a;\n a,b=b,a+b\nfor f in feb():\n print(f)\n if f>1000:\n break","sub_path":"ptut/src/jay/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552767130","text":"\r\n\r\nclass Place2(object):\r\n def __init__(self, y=None, x=None):\r\n assert type(y) == int or y is None, \"illegal data type for y\"\r\n assert type(x) == int or x is None, \"illegal data type for x\"\r\n self.y = y\r\n self.x = x\r\n\r\n def __add__(self, other_Place2):\r\n assert type(other_Place2) == Place2, \"Place2 can add only with Place2\"\r\n return Place2(self.y + other_Place2.y, self.x + other_Place2.x)\r\n\r\n def __sub__(self, other_Place2):\r\n assert type(other_Place2) == Place2, \"Place2 can sub only with Place2\"\r\n return Place2(self.y - other_Place2.y, self.x - other_Place2.x)\r\n\r\n def get_as_list(self):\r\n return [self.y, self.x]\r\n\r\n\r\nclass Direction(object):\r\n def __init__(self, direction=None):\r\n assert direction == 0 or direction == 1 or direction == None, \"illegal direction\"\r\n self._direction = direction\r\n\r\n def show_direction_info(self):\r\n if self._direction == 0:\r\n return \"parallel to y-axis\"\r\n elif self._direction == 1:\r\n return \"parallel to x-axis\"\r\n elif self._direction == None:\r\n return \"set direction\"\r\n else:\r\n assert False, \"illegal access has occured on direction\"\r\n return None\r\n\r\n def get_as_int(self):\r\n if type(self._direction) == int:\r\n return self._direction\r\n else:\r\n assert False, \"set direction\"\r\n return None\r\n\r\n def reverse(self):\r\n if self._direction == 0:\r\n self._direction = 1\r\n elif self._direction == 1:\r\n self._direction = 0\r\n else:\r\n assert False, \"direction is not set\"\r\n\r\n\r\nclass Place3(object):\r\n def __init__(self, direction=None, y=None, x=None):\r\n assert type(y) == int or y is None, \"illegal data type for y\"\r\n assert type(x) == int or x is None, \"illegal data type for x\"\r\n assert type(\r\n direction) == Direction or direction is None, \"illegal data type for direction\"\r\n\r\n self.direction = direction\r\n self.y = y\r\n self.x = x\r\n\r\n def get_as_list(self):\r\n return [self.direction, self.y, self.x]\r\n\r\n def get_as_Place2(self):\r\n return Place2(self.y, self.x)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n place0 = Place2()\r\n print(place0.y)\r\n","sub_path":"python/quoridor/place.py","file_name":"place.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"4528057","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 20 22:44:10 2018\n\n@author: Merlin The Sorcerer\n\"\"\"\n\n\n\n\"Bubble sort\"\ndef bubbleSort(a):\n \"a = b.copy()\"\n n = len(a)\n for i in range(n):\n for j in range(0, n-i-1):\n if a[j] > a[j + 1]:\n a[j], a[j+1] = a[j+1], a[j]\n \"return a\"\n\n\"Insertion Sort\"\ndef insertionSort(a):\n \"a = b.copy()\"\n for i in range(len(a)):\n minIndex = i\n for j in range(i+1, len(a)):\n if a[minIndex] > a[j]:\n minIndex = j\n a[i], a[minIndex] = a[minIndex], a[i]\n \"return a\"\n\ndef isSortedAsc(a):\n return all(a[i] <= a[i+1] for i in range(len(a) - 1))\n\ndef isSortedDesc(a):\n return all(a[i] >= a[i+1] for i in range(len(a) - 1))","sub_path":"Ordenacion/ordenaciones.py","file_name":"ordenaciones.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529551589","text":"from __future__ import print_function\nimport sys\nimport datetime\nimport numpy as np\n\n\ndef trace(text, out_file=sys.stderr):\n print(datetime.datetime.now(), '...', text, file=out_file)\n\n\ndef load_bin_vec(fname, vocab):\n \"\"\"\n Loads word vecs from Google (Mikolov) word2vec\n \"\"\"\n word_vecs = {}\n with open(fname, \"rb\") as f:\n header = f.readline()\n vocab_size, layer1_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * layer1_size\n i = 0\n for line in xrange(vocab_size):\n i += 1\n sys.stderr.write(\"\\r%d\" % i)\n sys.stderr.flush()\n word = []\n while True:\n ch = f.read(1)\n if ch == b\" \":\n word = b\"\".join(word)\n break\n if ch != b\"\\n\":\n word.append(ch)\n if word in vocab:\n word_vecs[word] = np.fromstring(\n f.read(binary_len), dtype='float32')\n else:\n f.read(binary_len)\n sys.stderr.write(\"\\n\")\n return word_vecs\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"222727052","text":"import sys\nimport getopt\nimport getpass\nimport json\n\n\ndef PrintOutput(sqlQuery, QueryName):\n \"\"\" text_file = open(\"Output/Output.txt\", \"w\")\n text_file.write(sqlQuery)\n text_file.close() \"\"\"\n\n sqlFile = open(\"C:/Users/\" + getpass.getuser() +\n \"/Documents/MSSQLDatabase/FormulaOne/{}.sql\".format(QueryName), \"w\")\n sqlFile.write(sqlQuery)\n sqlFile.close()\n\n\ndef SQLtable(TableName, JsonFile):\n queryContent = \"\"\n print(\"Table name:\", TableName, \"| JSON file:\", JsonFile)\n with open('../FormulaOneScrapy/{}.json'.format(JsonFile)) as data_file:\n data = json.load(data_file)\n # print(\"Data:\\n\",json.dumps(data, indent=4))\n\n print(\"ROWS: \", len(data))\n print(\"COLUMNS: \", len(data[0]))\n\n for drivers in data:\n sqlQuery = \"INSERT INTO [dbo].[{}](\\n\".format(TableName)\n for num, items in enumerate(drivers, start=1):\n if num == len(data[0]):\n sqlQuery += \"\\t{}\".format(items)\n else:\n sqlQuery += \"\\t{},\\n\".format(items)\n break\n\n for rows, drivers in enumerate(data, start=1):\n queryContent += \"(\"\n for num, items in enumerate(drivers, start=1):\n # Check if exist single quotation mark\n if drivers[items].find(\"'\") != -1:\n drivers[items] = drivers[items].replace(\"'\", \"''\")\n if num == len(data[0]):\n queryContent += \"'{}'\".format(drivers[items])\n else:\n queryContent += \"'{}',\".format(drivers[items])\n queryContent += \");\" if rows == len(data) else \"),\\n\"\n\n sqlQuery += \"\\n)\\nVALUES\\n{}\".format(queryContent)\n PrintOutput(sqlQuery, TableName)\n print(\"---\\nFile generato correttamente\\n---\")\n # print(sqlQuery)\n\n#\n# ──────────────────────────────────────────────── I ──────────\n# :::::: M A I N : : : : : : : :\n# ──────────────────────────────────────────────────────────\n#\n\n\ndef main(argv):\n inputfile = ''\n outputfile = ''\n try:\n opts, args = getopt.getopt(argv, \"t:o:\", [\"iTable=\", \"ofile=\"])\n # print(\"OPTS: \", opts, \"ARGS \", args)\n except getopt.GetoptError:\n # print('main.py -t -o ')\n print('main.py -t ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-help':\n print('main.py -t ')\n sys.exit()\n elif opt in (\"-t\", \"--iTable\"):\n # print(\"OPT \", opt, \"ARG \", arg)\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n # print(\"OPT \", opt, \"ARG \", arg)\n outputfile = arg\n\n SQLtable(inputfile, inputfile.casefold())\n # print('Input file is \"', inputfile)\n # print('Output file is \"', outputfile)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"FormulaOneSQLCreator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450935940","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport gevent\nfrom user import User\nfrom chatroom import ChatRoom\nfrom gevent.server import StreamServer\n\n\nclass ChatServer:\n def __init__(self, port):\n self.rooms = {}\n self.roomid = 1\n self.port = port\n\n def run(self):\n self.server = StreamServer(('0.0.0.0', self.port), self.accept_handler)\n print('To join, nc (your ip here) %d' % self.port)\n self.server.serve_forever()\n\n def room_selector(self, user):\n while True:\n user.queue.put('List of active rooms:\\n')\n for room in self.rooms.values():\n user.queue.put(room.status() + '\\n')\n user.queue.put('Enter room id to enter(or $ to create new one):\\n')\n\n gevent.sleep(0)\n selection = user.file.readline().strip()\n\n if selection:\n if selection == '$':\n self.create_room(user)\n continue\n\n if self.rooms.get(selection):\n return self.rooms[selection]\n\n def route(self, user):\n while True:\n self.room_selector(user).register_user(user)\n\n def create_room(self, user):\n while True:\n user.queue.put('Enter name of the new room: ')\n\n gevent.sleep(0)\n name = user.file.readline().strip()\n\n if name:\n new_room = ChatRoom(str(self.roomid), name)\n self.rooms[str(self.roomid)] = new_room\n self.roomid += 1\n return\n\n def accept_handler(self, sock, addr):\n new_user = User(sock, addr)\n print('User %s has connected!' % new_user.name)\n self.route(new_user)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234375851","text":"import socket\n\nclient = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\nwhile True:\n content = input('input what you say:')\n client.sendto(content.encode('utf-8'),(\"169.254.179.36\",8024))\n print('发送成功!')\n data = client.recv(1024).decode('utf-8 ')\n print(\"服务器:\",data)","sub_path":"test/Internet/udpClient.py","file_name":"udpClient.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"646343891","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 29 15:17:48 2016\n\n@author: zp4wo\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ndf_chusage_cl10_0 = pd.read_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0329//df_data_csv ch usage and new fields clster 10-0 0329.csv')\ndf_with_racing_interst = pd.read_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//df_full_groupon_pri_V8_0323.csv')\ndf_chsuage_new_fields = pd.read_csv('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//data_csv//df_final_new_fields 0331.csv.csv')\n\n\n# do some cleaning about \n\n\n\n\n\n\n\ndf_chsuage_new_fields.columns = ['KeyID', 'FB_trascation_counts', 'Day_time_come_counts',\n 'night_time_come_counts', 'Ratio_Day_night',\n 'receation_no_charge_count', 'receation_with_charge_count',\n 'weekday_counts', 'weekend_counts', 'Ratio_Weekends_weekdays',\n 'cluster_no_new']\n \ndf_chusage_new_cl_merge = df_chsuage_new_fields[['KeyID','cluster_no_new']]\ndf_with_racing_interst = df_with_racing_interst[['KeyID',\n 'Racing_interest_HVRC_pri_spendS1415',\n 'Racing_interest_STRC_pri_spendS1415',\n 'Racing_interest_HVRC_sup_spendS1415',\n 'Racing_interest_STRC_sup_spendS1415',\n 'Racing_interest_HVRC_pri_transcation_countS1415',\n 'Racing_interest_STRC_pri_transcation_countS1415',\n 'Racing_interest_HVRC_sup_transcation_countS1415',\n 'Racing_interest_STRC_sup_transcation_countS1415',\n 'Racing_interest_HVRC_pri_spendS1415_ave_transcation',\n 'Racing_interest_STRC_pri_spendS1415_ave_transcation',\n 'Racing_interest_HVRC_sup_spendS1415_ave_transcation',\n 'Racing_interest_STRC_sup_spendS1415_ave_transcation',\n ]]\ndfs =[df_chusage_cl10_0, df_chusage_new_cl_merge, df_with_racing_interst]\n\n\nimport functools as ft\ndf_chusage_cl10_0_merge = ft.reduce(lambda left,right: pd.merge(left,right,on='KeyID', how='left'),dfs)\n# X_pre1 = df_chusage_cl10_0_merge.drop('cluster_no', axis=1)\nX_pre1 = df_chusage_cl10_0_merge.drop('cluster_no_new', axis=1)\n\n\n#%%\ndef profile(X_pre1,date_str): \n import copy\n X_pre1.describe().T.to_excel('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//'+ date_str +'//CH_usage_overall_profile.xlsx')\n #add clustering\n X_pre = copy.deepcopy(X_pre1)\n X_pre_mean = X_pre.drop('KeyID', axis=1).groupby('cluster_no').mean()\n X_pre_cluster_count = X_pre[['KeyID','cluster_no']].groupby('cluster_no').count()\n X_pre_mean_table = pd.concat([X_pre_cluster_count, X_pre_mean],axis=1)\n X_pre_mean_table.columns = np.insert(X_pre_mean_table.columns[1:].values,0,'cluster_count') \n nan_count = X_pre.replace(0,np.nan).drop(['cluster_no','KeyID'], axis=1).isnull()\n nan_count['cluster_no'] = X_pre['cluster_no']\n Nan_count_table_cl = nan_count.groupby('cluster_no').sum().T\n\n X_pre_mean_table_T = X_pre_mean_table.T\n X_pre_mean_table_T.columns = 'mean_'+ X_pre_mean_table_T.columns.astype(str)\n Nan_count_table_cl.columns = 'sum_nan_zero_' + Nan_count_table_cl.columns.astype(str)\n \n meantable_nan_table = pd.concat([X_pre_mean_table_T, Nan_count_table_cl], axis=1)\n meantable_nan_table.to_excel('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//'+ date_str +'//CH_usage_mean_table_with_nan_profile_old_cluster.xlsx')\n\ndate_str = '0330'\n\nprofile(X_pre1,date_str)\n\ndef cross(df):\n df_cross = df[['KeyID','cluster_no_new', 'cluster_no']]\n pivot_cross = pd.pivot_table(df_cross, index='cluster_no', columns='cluster_no_new', values='KeyID', aggfunc=np.count_nonzero)\n pivot_cross.to_excel('C://Users//zp4wo//Documents//HKJC_edit//memberprofile2015//output//0330//pivot cross.xlsx')\n \n# cross(df_chusage_cl10_0_merge)\n ","sub_path":"diging the cl10-0 profiling.py","file_name":"diging the cl10-0 profiling.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"185884714","text":"\"\"\"\nMIT License\nCopyright (c) 2019 WebKide [d.id @323578534763298816]\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport discord\nimport asyncio\nimport aiohttp\nimport datetime\nimport time\nimport random\nimport json\nimport sys\nimport os\nimport re\nimport traceback\nimport textwrap\n\nfrom datetime import timedelta\nfrom discord.ext import commands\nfrom pathlib import Path\n\ndev_list = [('WebKide', 323578534763298816)]\nbot_channel = 375179500604096512\nguilds_spam = [('Mds', 540072370527010841),\n ('spam_log', 540600062217158666),\n ('invites', 541059392951418880),\n ('msg_log', 541061070907899905),\n ('default', 540072370979864577),\n ('ignored_chan', 540582467648749569),\n ('ssevana', 328341202103435264)]\n# +------------------------------------------------------------+\n# | Dictionary of blacklisted words for test |\n# +------------------------------------------------------------+\nurl_list = ['discord.gg/', 'discord.io/', 'discord.me/', 'discordapp.com/invite/',\n 'discordlisting.me', 'discordlist.me', 'invite/']\n\n\nclass AutoDelete:\n \"\"\"\n Automatic message deletion, blaclisted words, server invites\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.user_color = discord.Colour(0xed791d)\n\n # +------------------------------------------------------------+\n # | Blacklist words command group |\n # | it shows usage if no word str is given |\n # +------------------------------------------------------------+\n @commands.group(invoke_without_command=True)\n async def blw(self, ctx):\n \"\"\" Blacklist words cmd \"\"\"\n if 'include' not in ctx.message.content:\n usage = f'```css\\n{ctx.prefix}{ctx.invoked_with} [pacman]\\n```'\n await ctx.send(usage)\n \n @blw.command(no_pm=True)\n async def include(self, ctx, *, _include: str = None):\n \"\"\" Blacklist word add \"\"\"\n if _include is None:\n await ctx.message.add_reaction('\\N{WAVING HAND SIGN}')\n \n if _include is not None:\n with open('data/blacklist_words.json', 'a') as f:\n json_words = f.write(_include)\n await ctx.send(f'added {json_words}')\n await ctx.message.add_reaction('\\N{CHERRIES}')\n\n # Monitoring guild events and member messages\n async def on_message(self, message):\n if message.guild.id not in (x[1] for x in guilds_spam):\n return\n\n userinfo = f'**{message.author.display_name}** | `{message.author.id}`'\n msgLower = message.content.lower()\n msgs = []\n with open('data/blacklist_words.json') as f: # json file that contains blacklisted words\n blacklisted_words = json.load(f)\n\n # +------------------------------------------------------------+\n # | Needs more work for blacklist words to work |\n # | per guild, role, and channel in db |\n # +------------------------------------------------------------+\n if any(x in message.content.lower() for x in blacklisted_words):\n try:\n await message.delete()\n await message.add_reaction('\\N{WARNING SIGN}')\n\n except discord.Forbidden:\n pass\n\n finally:\n if random.randint(1, 3) != 1:\n warn = f\"{userinfo}\\n\" \\\n f\"\\N{WARNING SIGN} Watch your language!\"\n await message.channel.send(warn, delete_after=60)\n\n # +------------------------------------------------------------+\n # | Server invite monitor and auto-deleter |\n # | it warns user and logs invite with author ID |\n # +------------------------------------------------------------+\n if message.guild.id in (x[1] for x in guilds_spam):\n if message.channel.id in (x[1] for x in guilds_spam):\n return\n\n else:\n if '/HDJZnEj' in message.content: # Modbot development support official invite\n return\n if '/dvAq6Hm' in message.content: # Ssevana official invite\n return\n\n if any(x in msgLower for x in url_list):\n warns = f\"{userinfo}\\n\" \\\n f\"\\N{WARNING SIGN} Invites aren't allowed in this text channel!\"\n\n try:\n # Invites are logged in deleted-invites channel\n await message.delete()\n\n except discord.Forbidden: # FORBIDDEN (status code: 403): Missing Permissions\n await message.channel.send(warns, delete_after=69)\n\n try:\n await message.add_reaction('\\N{AUBERGINE}')\n\n except discord.Forbidden: # FORBIDDEN (status code: 403): Missing Permissions\n pass\n\n finally:\n # invite links and their message are logged if there's a channel for them\n log_content = f'{userinfo} *spammed in:*\\n' \\\n f'```css\\ntChan: {message.channel.name} | {message.channel.id}\\n' \\\n f'Guild: {message.guild.name} | {message.guild.id}```' \\\n f'{message.content}'\n return await self.bot.get_channel(541059392951418880).send(log_content)\n\n\ndef setup(bot):\n bot.add_cog(AutoDelete(bot))\n","sub_path":"cogs/autodelete.py","file_name":"autodelete.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"143381313","text":"#-*- coding:utf-8 -*-\nimport sys,pygame\n\nclass MyBallClass(pygame.sprite.Sprite):\n def __init__(self,image_file,speed,location):#定义Ball类\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.image.load(image_file)\n self.rect=self.image.get_rect()\n self.rect.left,self.rect.top=location\n self.speed=speed\n def move(self):#定义move()方法\n global score,score_surface,score_font\n self.rect=self.rect.move(self.speed)\n if self.rect.left<0 or self.rect.right>screen.get_width():\n self.speed[0]=-self.speed[0]\n if self.rect.top<=0: #球碰到屏幕顶部,score就+1\n self.speed[1]=-self.speed[1]\n score=score+1\n score_surf=score_font.render(str(score),1,(0,0,0))\n\nclass MyPaddleClass(pygame.sprite.Sprite):#定义球拍类\n def __init__(self,location=[0,0]):\n pygame.sprite.Sprite.__init__(self)\n image_surface=pygame.surface.Surface([100,20])\n image_surface.fill([0,0,0])\n self.image=image_surface.convert()\n self.rect=self.image.get_rect()\n self.rect.left,self.rect.top=location\n\npygame.init()\nscreen=pygame.display.set_mode([640,480])\nclock=pygame.time.Clock()\nmyBall=MyBallClass(r\"wackyball.bmp\",[10,5],[50,50])\nballGroup=pygame.sprite.Group(myBall)\npaddle=MyPaddleClass([270,400])\nlives=3\nscore=0\nscore_font=pygame.font.Font(None,50) #创建font对象 50\nscore_surf=score_font.render(str(score),1,(0,0,0))\nscore_pos=[10,10]\ndone=False #避免球再次出现,定义done变量\nrunning=True\nwhile running:\n clock.tick(30)\n screen.fill([255,255,255])\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n funning=False\n elif event.type==pygame.MOUSEMOTION:\n paddle.rect.centerx=event.pos[0]\n if pygame.sprite.spritecollide(paddle,ballGroup,False):#检测球与球拍的碰撞\n myBall.speed[1]=-myBall.speed[1]\n myBall.move()\n if not done:\n screen.blit(myBall.image,myBall.rect)\n screen.blit(paddle.image,paddle.rect)\n screen.blit(score_surf,score_pos)\n for i in range(lives):#画出右上角显示为三条命的球\n width=screen.get_width()\n screen.blit(myBall.image,[width-40*i,20])\n pygame.display.flip()\n if myBall.rect.top>=screen.get_rect().bottom:#如果球碰到底边就减一条命\n lives=lives-1\n if lives==0:#创和绘制最终的分数文本\n final_text1=\"GameOver\"\n final_text2=\"Your final score is:\"+str(score)\n ft1_font=pygame.font.Font(None,70)\n ft1_surf=ft1_font.render(final_text1,1,(0,0,0))\n ft2_font=pygame.font.Font(None,50)\n ft2_surf=ft2_font.render(final_text2,1,(0,0,0))\n screen.blit(ft1_surf,[screen.get_width()/2-ft1_surf.get_width()/2,100])\n screen.blit(ft2_surf,[screen.get_width()/2-ft2_surf.get_width()/2,200])\n pygame.display.flip()\n done=True\n else:\n pygame.time.delay(2000)\n myBall.rect.topleft=[50,50]\npygame.quit() ","sub_path":"python/chenxiao/pypong.py","file_name":"pypong.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66013122","text":"#!/bin/env python3\n\"\"\"\nREF:\n https://www.hackerrank.com/challenges/py-set-discard-remove-pop\n\n .remove(x)\n\n operation removes element x from the set.\n returns: None.\n raises KeyError if element x does not exist\n\n .discard(x)\n\n liked .remove(x) without KeyError\n\n .pop(x)\n operation removes and returns an arbitrary element from the set.\n raises KeyError if no elements to remove.\n\nTask:\n You have a non-empty set s, and you have to execute N commands\n given in N lines.\n The commands will be pop, remove, discard.\n\nINPUT:\n lines:\n 1 - integer n, number of elements in set s\n 2 n space separated elements of set s\n all elements non-negative, <= 9\n 3 - integer N, the number of commands\n 4...4+N commands to execute\n pop, remove and/or discard commands followed by their associated\n value.\nConstraints:\n 0 < n < 20\n 0 < N < 20\nOUTPUT:\n sum of elements of set s on a single line\n\"\"\"\nif __name__ == '__main__':\n n = int(input().strip())\n s = set(map(int, input().strip().split()))\n for i in range(N = int(input())):\n line = input().strip().split()\n cmd = line[0]\n if cmd == \"pop\":\n s.pop()\n elif cmd == \"remove\":\n s.remove(int(line[1]))\n elif cmd == \"discard\":\n s.discard(int(line[1]))\n print(sum(s))\n","sub_path":"pySetDiscardRemovePop-1.py","file_name":"pySetDiscardRemovePop-1.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"283208217","text":"import os\nimport sys\nlib_path = os.path.abspath('../..')\nsys.path.insert(0, lib_path)\n\nfrom TwitterStreamer import TwitterStreamer\n\nclass RunStreamingClient:\n def __init__(self, auth, ds_tweets, ds_logs):\n self.auth = auth\n self.ds_tweets = ds_tweets\n self.ds_logs = ds_logs\n\n def start(self, keywords):\n self.keywords = keywords\n stream = TwitterStreamer(self.ds_tweets, self.ds_logs, self.auth['APP_KEY'], self.auth['APP_SECRET'], self.auth['OAUTH_TOKEN'], self.auth['OAUTH_TOKEN_SECRET'])\n stream.statuses.filter(track=self.keywords)\n\n\n","sub_path":"src/streams/RunStreamingClient.py","file_name":"RunStreamingClient.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165738195","text":"from model.figure import Figure\n\nclass Bishop(Figure):\n \n def __init__(self, board, x, y, color, player_type):\n Figure.__init__(self, board, x, y, color, player_type)\n \n def get_valid_moves(self) -> set:\n valid_moves = set()\n for i in range(-1, 2, 2):\n for j in range(-1, 2, 2):\n for k in range(1, 8):\n if not (self.posx + k * i < 8 and self.posx + k * i >= 0 and self.posy + k * j < 8 and self.posy + k * j >= 0):\n break\n if not self.board.is_empty_field(self.posx + k * i, self.posy + k * j):\n valid_moves.add((self.posx + k * i, self.posy + k * j))\n break\n valid_moves.add((self.posx + k * i, self.posy + k * j))\n return valid_moves","sub_path":"model/figures/bishop.py","file_name":"bishop.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"303111947","text":"# coding=utf-8 #默认编码格式为utf-8\nfrom 自定义代码.爬小说测试.RequestUtils import Tool, header\nimport 自定义代码.爬小说测试.IoUtils as io\n\nhost = 'http://www.xxshu5.com'\nurl = 'http://www.xxshu5.com/xiuzhenliaotianqun/'\n\nlist_xpath = '//*[@id=\"list\"]/dl/dd[last()]/a'\ntitle_xpath = '//*[@id=\"wrapper\"]/div[5]/div/div[2]/h1/text()'\ncontent_xpath = '//*[@id=\"content\"]/text()'\n\ndef main():\n tool = Tool(url, header)\n for titleAndHref in tool.xpath(list_xpath):\n href = '%s%s' % (host, titleAndHref.get('href'))\n content_tool = Tool(href, header)\n title = content_tool.xpathOne(title_xpath)\n content = content_tool.xpathOne(content_xpath)\n\n # io.printConsole(titleAndHref.get('href'), title)\n io.printConsole(title, content)\n # io.writeFile(title, content)\n\nmain()","sub_path":"自定义代码/爬小说测试/爬小说-修真聊天群.py","file_name":"爬小说-修真聊天群.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"480949478","text":"import os\nimport json\n\n\ndef load_schemas(model=None):\n vwpydir = os.path.dirname(os.path.dirname(__file__))\n schemadir = os.path.join(vwpydir, 'modelschema')\n modelschemas = {}\n for f in os.listdir(schemadir):\n with open(os.path.join(schemadir, f)) as schema_file:\n if f.endswith('.json'):\n data = json.load(schema_file)\n modelschemas[data['model']] = data\n if model:\n if model in modelschemas:\n return modelschemas[model]\n else:\n return {}\n return modelschemas\n","sub_path":"vwpy/modelschema/modelschema.py","file_name":"modelschema.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621072032","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n#import plotly.express as px\nfrom os import path\n# from wordcloud import WordCloud, STOPWORDS\nimport re\n# from nltk.corpus import stopwords\n# from nltk.tokenize import word_tokenize\n# from nltk.stem.porter import PorterStemmer\n# from nltk.stem import WordNetLemmatizer\nfrom pickle import dump, load\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n# import nltk\n# nltk.download('stopwords')\n# nltk.download('wordnet')\n\n\ndataset_loc = \"SMSSpamCollection\"\nimage_loc = \"spam_img.png\"\n\n# Data\ndef load_data(dataset_loc):\n df=pd.read_csv(dataset_loc,sep='\\t',names=['target','message'])\n df['length'] = df['message'].apply(len)\n return df\n\ndef load_description(df):\n\n st.header(\"EDA (Exploratory Data Analysis) \")\n # Preview of the dataset\n preview = st.radio(\"Choose one\", (\"Top\", \"Bottom\"))\n if(preview == \"Top\"):\n st.write(df.head())\n if(preview == \"Bottom\"):\n st.write(df.tail())\n\n # display the whole dataset\n if(st.checkbox(\"Show complete Dataset\")):\n st.write(df)\n\n # Show shape\n if(st.checkbox(\"Display the shape\")):\n st.write(df.shape)\n dim = st.radio(\"Rows/Columns?\", (\"Rows\", \"Columns\"))\n if(dim == \"Rows\"):\n st.write(\"Number of Rows\", df.shape[0])\n if(dim == \"Columns\"):\n st.write(\"Number of Columns\", df.shape[1])\n\n # show columns\n if(st.checkbox(\"Show the Columns\")):\n st.write(df.columns)\n\n if(st.checkbox('Counts of Unique values')):\n st.write(df.label.value_counts())\n\n # show info \n if(st.checkbox(\"Show the Data Description\")):\n st.write(df.describe(include='all'))\n \n if(st.checkbox('Describe using Group Labels')):\n st.write(df.groupby('label').describe())\n\ndef graph(df):\n st.subheader(\"Graphs of Target column :\")\n dim = st.radio(\"Bar Graph/Pie Chart?\", (\"Bar Graph\", \"Pie Chart\"))\n if(dim == \"Bar Graph\"):\n sns.countplot(x=\"target\", data=df)\n plt.xlabel(\"TARGET\")\n plt.ylabel(\"FREQUENCY\")\n plt.title(\" BAR PLOT OF : TARGET\")\n st.pyplot()\n if(dim == \"Pie Chart\"):\n plt.title(\" PIE CHART OF : TARGET\")\n df[\"target\"].value_counts().plot(kind = 'pie', explode = [0, 0.1], autopct = '%1.1f%%', shadow = True)\n plt.ylabel(\"Spam vs Ham\")\n plt.legend([\"Ham\", \"Spam\"])\n st.pyplot()\n if(st.checkbox(\"OBSERVATION 1.1 :\")):\n st.write('''\n 1. Tagret column have 2 unique vaules (ham and spam).\n 2. There are 4825 (86.6%) of ham messages.\n 3. There are 747 (13.4%) of spam messages.\n ''') \n\n st.subheader(\"Histogram of 'HAM' and 'SPAM' with respect to Length :\")\n df.hist(column='length',by='target', bins=50)\n st.pyplot()\n if(st.checkbox(\"OBSERVATION 1.2 :\")):\n st.write('''\n 1. Looks like spam messages are generally longer than ham messages.\n 2. Bulk of ham has length below 100, for spam it is above 100.\n 3. We will check if this feature is useful for the classification task.\n ''') \n\n\ndef cleaned(df_sms):\n\n# Join all messages to make one paragraph. \n words_ = ' '.join(df_sms['message'])\n \n# change all data into lower case.\n word_=words_.lower()\n\n# removes all word like(https,www.)\n c_word = \" \".join([word for word in word_.split()\n if 'http' not in word\n and 'www.' not in word\n ])\n \n# removes all special characters and digits.\n word_sms=''\n letters_only_sms = re.sub(\"[^a-zA-Z]\", \" \",c_word)\n \n# removes all stopwords like (the,we,are,it,if......)\n words = letters_only_sms.split()\n words = [w for w in words if not w in stopwords.words(\"english\")]\n \n# removes all words which have length less than 2.\n for a in words:\n if len(a)<3:\n words.remove(a)\n\n# again make all words into paragraph.\n for i in words:\n word_sms=word_sms+\" \"+i\n\n# return that paragraph.\n return word_sms\n\ndef word_cloud(df):\n st.subheader(\"Treating 'SPAM / HAM' messages\")\n dim = st.radio(\"Spam/Ham?\", (\"Spam\", \"Ham\"))\n if(dim == \"Ham\"):\n df_ham = df.loc[df['target']=='ham', :]\n st.write(df_ham.head())\n if (path.exists(\"wc_ham.png\")):\n pass\n if(dim == \"Spam\"):\n df_spam = df.loc[df['target']=='spam', :]\n st.write(df_spam.head())\n if (path.exists(\"wc_spam.png\")):\n st.image(\"wc_spam.png\", use_column_width = True)\n\ndef preprocess(raw_msg):\n\n stemmer = PorterStemmer()\n # Removing words like (http,www.)\n cleaned = \" \".join([word for word in raw_msg.split()\n if 'http' not in word\n and 'www.' not in word\n ])\n\n # Removing special characters and digits\n letters_only = re.sub(\"[^a-zA-Z]\", \" \",cleaned)\n\n # change sentence to lower case\n letters_only = letters_only.lower()\n\n # tokenize into words\n words = letters_only.split()\n \n # remove stop words \n words = [w for w in words if not w in stopwords.words(\"english\")]\n\n # Stemming\n words = [stemmer.stem(word) for word in words]\n\n clean_sent = \" \".join(words)\n \n return clean_sent\n \ndef predict(msg):\n \n # Loading pretrained CountVectorizer from pickle file\n vectorizer = load(open('countvectorizer.pkl', 'rb'))\n \n # Loading pretrained logistic classifier from pickle file\n classifier = load(open('logit_model.pkl', 'rb'))\n \n # Preprocessing the tweet\n clean_msg = preprocess(msg)\n \n # Converting text to numerical vector\n clean_msg_encoded = vectorizer.transform([clean_msg])\n \n # Converting sparse matrix to dense matrix\n msg_input = clean_msg_encoded.toarray()\n \n # Prediction\n prediction = classifier.predict(msg_input)\n \n return prediction\n\ndef test():\n st.header(\"Prediction\")\n st.sidebar.subheader(\"Spam ham Predictor\") # Changed this line\n msg = st.text_input('Enter your Message : ')\n\n prediction = predict(msg)\n\n if(msg):\n st.subheader(\"Prediction:\")\n if(prediction == 0):\n st.image(\"spam.jpg\", use_column_width = True)\n else:\n st.image(\"ham.jpg\", use_column_width = True)\n\n\n\n# Main\ndef main():\n\n # sidebar\n #load_sidebar()\n\n # Title/ text\n st.title('SMS Spam Collection Data Set')\n st.image(image_loc, use_column_width = True)\n st.text(\"Predict the Message 'SPAM' or 'HAM'.\")\n\n # loading the data\n df = load_data(dataset_loc)\n\n # display description\n load_description(df)\n\n graph(df)\n\n word_cloud(df)\n \n # test()\n\nif(__name__ == '__main__'):\n main()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"556229136","text":"class Movie():\n \"\"\"Class that represents a Movie\"\"\"\n def __init__(self, movie_title, storyline, poster_image,\n trailer_youtube, duration):\n \"\"\"Inits all data of a movie\n Args:\n movie_title(str): Movie title\n storyline(str): Movie storyline\n poster_image(str): Movie poster image\n trailer_youtube(str): Movie youtube url\n duration(str): Duration of the movie\n \"\"\"\n self.title = movie_title\n self.storyline = storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n self.duration = duration\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"564074823","text":"import datetime\nfrom django.contrib.auth.models import User\n\nfrom apio.api_exceptions import *\nfrom .models import Ledger\n\n\nclass LedgerList:\n \"\"\"\n This provides all ledgers belonging to a user. It also has query string params\n for filtering on from and to dates. You must provide both fields to properly query.\n Specifying just one field not currently supported.\n \"\"\"\n def query_set_all(self):\n user = User.objects.filter(id=self.kwargs.get('id')).first()\n start = self.request.GET.get('from')\n stop = self.request.GET.get('to')\n if start or stop:\n try:\n start = datetime.datetime.strptime(start, \"%d-%m-%Y\").date()\n stop = datetime.datetime.strptime(stop, \"%d-%m-%Y\").date()\n except (TypeError, ValueError) as e:\n # handling for invalid format or missing field\n raise InvalidDateException\n except Exception as e:\n # handling for any rare error that may come in any special circumstance\n raise NotFoundException\n # if both dates specified and valid, return the filtered result\n return Ledger.objects.filter(customer=user, transaction_date__range=[start, stop])\n # if no query string provided, return all\n return Ledger.objects.filter(customer=user)\n","sub_path":"dehaat/financialaccount/ledger.py","file_name":"ledger.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468342375","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import CheckFailure\n\n\nclass ErrorHandler:\n def __init__(self, bot):\n self.bot = bot\n\n async def on_command_error(self, error, ctx):\n channel = ctx.message.channel\n if isinstance(error, commands.MissingRequiredArgument):\n await self.send_cmd_help(ctx, \"Error: Missing Required Argument: \" + \"{}\".format(' '.join(error.args)))\n elif isinstance(error, commands.BadArgument):\n await self.send_cmd_help(ctx, \"Error: Bad Argument\")\n elif isinstance(error, commands.CommandOnCooldown):\n await self.bot.send_message(channel, f\"_This command is currently on cooldown. Try again in `{error.retry_after:.0f}` seconds._\")\n elif isinstance(error, commands.CommandNotFound):\n pass\n elif isinstance(error, CheckFailure):\n await self.bot.send_message(channel, \"`Error: You don't have the required permissions to use this command.`\")\n else:\n print(error)\n\n async def send_cmd_help(self, ctx, error_msg):\n if ctx.invoked_subcommand:\n command = ctx.subcommand\n else:\n command = ctx.command\n pages = self.bot.formatter.format_help_for(ctx, command)\n for page in pages:\n em = discord.Embed(description=page.strip(\"```\").replace('<', '[').replace('>', ']'),\n colour=discord.Colour(0x5e51a8))\n em.set_footer(text=error_msg)\n await self.bot.send_message(ctx.message.channel, embed=em)\n\n\ndef setup(bot):\n bot.add_cog(ErrorHandler(bot))\n","sub_path":"error_handler.py","file_name":"error_handler.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"6955836","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n__author__ = 'Andy'\n\ndef export_apis(file, locals):\n import types,os,inspect\n route = {}\n _tmp = os.path.basename(file).lower()\n module = _tmp[:_tmp.rfind('.py')]\n for [def_name,def_obj] in locals.items():\n if type( def_obj ) == types.FunctionType and def_name[0] != '_':\n orig_func = getattr(def_obj, '_orig_func_', def_obj)\n argspec = inspect.getargspec(orig_func)\n if None == argspec[3]:\n _len = len(argspec[0])\n else:\n _len = len(argspec[0]) - len(argspec[3])\n args = [[x,1] for x in argspec[0][:_len]] + [[x,0] for x in argspec[0][_len:]]\n route_key = module+'.'+def_name\n route_val = { 'func':def_obj, 'args':args,\n 'log_level': getattr(orig_func, '_log_level_','ERROR') }\n # logging.info('export:: %s => %s' % ( route_key, route_val ))\n route[ route_key ] = route_val\n return route","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195325267","text":"\n\n#calss header\nclass _HAVERSACK():\n\tdef __init__(self,): \n\t\tself.name = \"HAVERSACK\"\n\t\tself.definitions = [u'a bag, often made from strong, rough cloth, with one or two shoulder straps']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_haversack.py","file_name":"_haversack.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376242501","text":"import csv\n\nwords = []\nwith open(\"termlist\") as tsv:\n for line in csv.reader(tsv, delimiter=\"\\t\"):\n words.append(line[1])\n\nfor i in range(len(words)):\n\tfilename = \"file\" + str(i) + \".txt\"\n\tfilename = words[i] + \".txt\"\n\tf = open(filename, 'w')\n\tf.write(words[i])\n\tf.close()","sub_path":"package_testing/temp/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"291110446","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def printTree(self, root: TreeNode) -> List[List[str]]:\n def getMaxDepth(node):\n if not node: return 0\n \n return max(getMaxDepth(node.left), getMaxDepth(node.right)) + 1\n \n max_d = getMaxDepth(root)\n n = 2**max_d - 1\n res = [[''] * n for _ in range(max_d)]\n \n def fill(node, l, r, lv):\n if not node: return\n mid = (r + l) // 2\n res[lv][mid] = str(node.val)\n fill(node.left, l, mid - 1, lv + 1)\n fill(node.right, mid + 1, r, lv + 1)\n \n fill(root, 0, n - 1, 0)\n return res\n \n \n \n \n # q = collections.deque([(root, 0)])\n # while q:\n # node, lv = q.popleft()\n # if len(res) == lv:\n # res.append([\"\"] * n)\n # res[lv]\n ","sub_path":"Python/655_Print Binary Tree.py","file_name":"655_Print Binary Tree.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"640414061","text":"'''\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nConsider null strs([]), or the contents are null([\"\",\"\"])\nConsider len(strs)>1, if len(strs)==0: return strs[0]\n\n'''\n\n\nclass Solution:\n\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return \"\"\n res = \"\"\n limit = min(map(lambda x: len(x), strs))\n for i in range(limit):\n cur = strs[0][i]\n for ele in strs[1:]:\n if ele[i] != cur:\n return res\n res += cur\n return res\n\n\ndef main():\n\n print(Solution().longestCommonPrefix([\"hello\", \"heabc\", \"hell\"]))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python/Easy/14. Longest Common Prefix.py","file_name":"14. Longest Common Prefix.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"261321022","text":"from unittest.mock import MagicMock\nfrom senza.aws import resolve_topic_arn\n\n\ndef test_create(monkeypatch):\n sns = MagicMock()\n topic = {'TopicArn': 'arn:123:mytopic'}\n sns.get_all_topics.return_value = {'ListTopicsResponse': {'ListTopicsResult': {'Topics': [topic]}}}\n monkeypatch.setattr('boto.sns.connect_to_region', MagicMock(return_value=sns))\n\n assert 'arn:123:mytopic' == resolve_topic_arn('myregion', 'mytopic')\n","sub_path":"tests/test_aws.py","file_name":"test_aws.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148955513","text":"#N行複数列の文字列複数を行列として入力\r\ntable = [input().split() for l in range(3)]\r\nN = int(input())\r\narray = [input() for i in range(N)]\r\nbingo = \"\"\r\n\r\nfor k in range(3):\r\n for j in range(3):\r\n if table[k][j] in array:\r\n table[k][j] = 0\r\n\r\n table[k][j] = int(table[k][j])\r\n\r\nfor l in range(3):\r\n if table[l][0] + table[l][1] + table[l][2] == 0:\r\n bingo = \"Yes\"\r\n\r\nfor l in range(3):\r\n if table[0][l] + table[1][l] + table[2][l] == 0:\r\n bingo = \"Yes\"\r\n\r\nif table[0][0] + table[1][1] + table[2][2] == 0 or table[2][0] + table[1][1] + table[2][0] ==0:\r\n bingo = \"Yes\"\r\n\r\nif bingo == \"\":\r\n bingo = \"No\"\r\n\r\nprint(bingo)","sub_path":"過去問/ABC/ABC157/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"576734013","text":"# Question 9 - Write a Python program to calculate number of days between two dates.\n\nfrom datetime import date\n\n\ndef diff_date(d1, d2):\n date11 = date(int(date1.split(\"/\")[0]), int(date1.split(\"/\")[1]), int(date1.split(\"/\")[2]))\n date22 = date(int(date2.split(\"/\")[0]), int(date2.split(\"/\")[1]), int(date2.split(\"/\")[2]))\n return abs(date11-date22)\n\n\nprint(\"NOTE: Please input the value in [yyyy/mm/dd] format only\")\ndate1 = input(\"Please input date 1 : \")\ndate2 = input(\"Please input date 2 : \")\n\n\nprint(\"The Difference in dates =>\", diff_date(date1, date2))\n\n\n","sub_path":"Python/Assignment1/question9.py","file_name":"question9.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497470737","text":"from PyQt5.QtWidgets import QWidget, QGridLayout, QLabel, QHBoxLayout, QPushButton\nfrom PyQt5.QtCore import Qt, pyqtSignal\n\nfrom app.data.database import DB\n\nfrom app.extensions.custom_gui import QHLine\nfrom app.editor.multi_combo_box_list import MultiComboBoxListWithCheckbox\nfrom app.editor.item_editor.item_model import get_pixmap\n\nclass ItemListWidget(QWidget):\n items_updated = pyqtSignal()\n\n def __init__(self, title, parent=None):\n super().__init__(parent)\n self.window = parent\n \n self.item_list = MultiComboBoxListWithCheckbox(DB.items, get_pixmap, self)\n self.item_list.item_changed.connect(self.activate)\n\n self.layout = QGridLayout(self)\n self.layout.setSpacing(0)\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.layout.addWidget(self.item_list, 3, 0, 1, 2)\n self.setLayout(self.layout)\n\n label = QLabel(title)\n label.setAlignment(Qt.AlignBottom)\n self.layout.addWidget(label, 0, 0)\n\n header1 = QLabel(\"Item ID\")\n header1.setAlignment(Qt.AlignBottom | Qt.AlignLeft)\n self.layout.addWidget(header1, 2, 0)\n\n header2 = QLabel(\"Droppable\")\n header2.setAlignment(Qt.AlignBottom | Qt.AlignRight)\n self.layout.addWidget(header2, 2, 1)\n\n hline = QHLine()\n self.layout.addWidget(hline, 1, 0, 1, 2)\n\n hbox = QHBoxLayout()\n hbox.setSpacing(0)\n hbox.setContentsMargins(0, 0, 0, 0)\n\n add_button = QPushButton(\"+\")\n add_button.setMaximumWidth(30)\n add_button.clicked.connect(self.add_new_item)\n\n remove_button = QPushButton(\"-\")\n remove_button.setMaximumWidth(30)\n remove_button.clicked.connect(self.remove_last_item)\n\n hbox.addWidget(remove_button, alignment=Qt.AlignRight)\n hbox.addWidget(add_button, alignment=Qt.AlignRight)\n\n self.layout.addLayout(hbox, 0, 1, alignment=Qt.AlignRight)\n\n def set_current(self, items):\n self.item_list.set_current(items)\n\n def add_new_item(self):\n if DB.items:\n new_item = DB.items[0].nid\n self.item_list.add_item(new_item)\n self.activate()\n\n def remove_last_item(self):\n if self.item_list.length() > 0:\n self.item_list.remove_item_at_index(self.item_list.length() - 1)\n self.activate()\n\n def activate(self):\n self.items_updated.emit()\n\n def get_items(self):\n return self.item_list.index_list[:]\n\n def set_color(self, color_list):\n self.item_list.set_color(color_list)\n","sub_path":"app/editor/item_list_widget.py","file_name":"item_list_widget.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"445072749","text":"N = int(input())\ndp = [list(map(int, input().split())) for _ in range(N)]\nfor i in range(1, len(dp)):\n dp[i][0] += min(dp[i-1][1], dp[i-1][2])\n dp[i][1] += min(dp[i-1][0], dp[i-1][2])\n dp[i][2] += min(dp[i-1][0], dp[i-1][1])\nprint(min(dp[N-1]))\n\n\"\"\"\nDP문제 진짜 현타오네\n코드가 이렇게 간단할 수가 있나\n배열 그대로 들고 이동하면서\n해당 집이 해당 색으로 집을 칠할 때의 \n최소값들을 전부 구하면서 이동한다\n\"\"\"","sub_path":"알고리즘/온라인저지/2022/09/0911/RGB거리.py","file_name":"RGB거리.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"283606576","text":"import pygame\nimport random\nimport math\nfrom pygame import mixer\n\npygame.init()\n\nscreen = pygame.display.set_mode((800, 600))\nbackground = pygame.image.load('background.png')\n\nmixer.music.load('music.wav')\nmixer.music.play(-1)\n\npygame.display.set_caption(\"vrinda's space force\")\nicon = pygame.image.load('alien.png')\npygame.display.set_icon(icon)\n\nplayerImg = pygame.image.load('spaceship.png')\nplayerX = 370\nplayerY = 480\nplayerX_change = 0\n\nenemyImg = []\nenemyX = []\nenemyY = []\nenemyX_change = []\nenemyY_change = []\nnumber_of_enemies = 6\n\nfor i in range(number_of_enemies):\n enemyImg.append(pygame.image.load('enemy.png'))\n enemyX.append(random.randint(0, 735))\n enemyY.append(random.randint(50, 150))\n enemyX_change.append(4)\n enemyY_change.append(40)\n\nmissileImg = pygame.image.load('nuclear.png')\nmissileX = 0\nmissileY = 480\nmissileX_change = 0\nmissileY_change = 10\nmissile_state = \"ready\"\n\nscore_value = 0\nfont = pygame.font.Font('freesansbold.ttf', 32)\n\nover_font = pygame.font.Font('freesansbold.ttf', 68)\n\ntextX = 10\ntestY = 10\n\n\ndef show_score(x, y):\n score = font.render(\"Vrinda's Score : \" + str(score_value), True, (255, 255, 255))\n screen.blit(score, (x, y))\n\n\ndef game_over_text():\n over_text = over_font.render(\"GAME OVER\", True, (255, 255, 255))\n screen.blit(over_text, (200, 250))\n\n\ndef player(x, y):\n screen.blit(playerImg, (x, y))\n\n\ndef enemy(x, y, i):\n screen.blit(enemyImg[i], (x, y))\n\n\ndef fire_missile(x, y):\n global missile_state\n missile_state = \"Fire\"\n screen.blit(missileImg, (x + 16, y + 10))\n\n\ndef isCollision(enemyX, enemyY, missileX, missileY):\n distance = math.sqrt((math.pow(enemyX - missileX, 2)) + (math.pow(enemyY - missileY, 2)))\n if distance < 27:\n return True\n else:\n return False\n\n\nrunning = True\n\nwhile running:\n\n screen.fill((0, 0, 0))\n screen.blit(background, (0, 0))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n print('incorrect key pressed')\n if event.key == pygame.K_LEFT:\n playerX_change = -4\n print('left arrow is pressed')\n if event.key == pygame.K_RIGHT:\n playerX_change = 4\n print('right arrow is pressed')\n if event.key == pygame.K_SPACE:\n if missile_state is \"ready\":\n missile_sound = mixer.Sound('shoot.wav')\n missile_sound.play()\n missileX = playerX\n fire_missile(missileX, missileY)\n print('spacebar is pressed')\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n print('key released')\n\n playerX += playerX_change\n\n if playerX <= 0:\n playerX = 0\n if playerX >= 736:\n playerX = 736\n\n for i in range(number_of_enemies):\n\n if enemyY[i] > 440:\n for j in range(number_of_enemies):\n enemyY[j] = 2000\n game_over_text()\n break\n\n enemyX[i] += enemyX_change[i]\n\n if enemyX[i] <= 0:\n enemyX_change[i] = 4\n enemyY[i] += enemyY_change[i]\n elif enemyX[i] >= 736:\n enemyX_change[i] = -4\n enemyY[i] += enemyY_change[i]\n\n collision = isCollision(enemyX[i], enemyY[i], missileX, missileY)\n if collision:\n collision_sound = mixer.Sound('collision.wav')\n collision_sound.play()\n missileY = 480\n missile_state = \"ready\"\n score_value += 1\n enemyX[i] = random.randint(0, 735)\n enemyY[i] = random.randint(50, 150)\n\n enemy(enemyX[i], enemyY[i], i)\n\n if missileY <= 0:\n missileY = 480\n missile_state = \"ready\"\n if missile_state is \"Fire\":\n fire_missile(missileX, missileY)\n missileY -= missileY_change\n\n player(playerX, playerY)\n show_score(textX, testY)\n pygame.display.update()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479313149","text":"import numpy as np\nimport tensorflow as tf\n\nimport RegressBoxes\n\n# dimension of this array is\n# (batch size, num boxes, 4)\nboxes = np.array(\n # Batch Size\n [\n # boxes\n [\n # 4 coords\n [0, 0, 1, 1],\n [0.5, 0.5, 0.6, 0.6],\n ]\n ],\n).astype(\"float32\")\n\n# deltas is the same shape as boxes\n# and during operation contains the predicted\n# deltas from anchors\ndeltas = np.array(\n # batch size\n [\n # boxes\n [\n # coords\n [0.1, 0.1, 0.1, 0.1],\n [-0.2, -0.2, 0.2, 0.2],\n ]\n ]\n).astype(\"float32\")\n\noutcome = np.array([[[0.02, 0.02, 1.02, 1.02], [0.496, 0.496, 0.604, 0.604]]])\n\n\ndef test_apply_bbox_deltas():\n \"\"\"Test code that computes deltas from anchor boxes.\"\"\"\n\n with tf.compat.v1.Session().as_default():\n pred_boxes = RegressBoxes.apply_bbox_deltas(boxes, deltas)\n\n res = pred_boxes.eval()\n\n np.testing.assert_array_almost_equal(res, outcome)\n\n\ndef test_regress_boxes_layer():\n mean = np.array([0.5, 0.5, 0.5, 0.5])\n std = np.array([0.1, 0.1, 0.1, 0.1])\n layer = RegressBoxes.RegressBoxes(mean=mean, std=std)\n\n np.testing.assert_array_equal(layer.mean, mean)\n np.testing.assert_array_equal(layer.std, std)\n\n\ndef test_regress_boxes_layer_set_anchors():\n \"\"\"\n Make sure RegressBoxes works when anchors are baked-in as weights.\n \"\"\"\n\n with tf.compat.v1.Session().as_default():\n layer_baked = RegressBoxes.RegressBoxes(anchor_shape=boxes.shape)\n layer_baked.set_anchors(boxes)\n\n out_baked = layer_baked([deltas])\n\n np.testing.assert_array_almost_equal(out_baked.eval(), outcome)\n\n\ndef test_regress_boxes_layer_input_anchors():\n \"\"\"\n Make sure RegressBoxes works when anchors is set at runtime\n as an input.\n \"\"\"\n\n with tf.compat.v1.Session().as_default():\n layer = RegressBoxes.RegressBoxes()\n\n out = layer([boxes, deltas])\n\n np.testing.assert_array_almost_equal(out.eval(), outcome)\n\n\ndef test_regress_boxes_layer_compute_output_shape():\n\n layer = RegressBoxes.RegressBoxes()\n shape = layer.compute_output_shape([[10], [10]])\n\n assert shape == [10]\n\n","sub_path":"test_RegressBoxes.py","file_name":"test_RegressBoxes.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155368567","text":"\"\"\"\nMerge two sorted linked lists and return it as a new list.\nThe new list should be made by splicing together the nodes of the first two lists.\n\"\"\"\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution1(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n fakeHead = ListNode(0)\n curr = fakeHead\n\n while l1 and l2:\n if l1.val < l2.val:\n curr.next = l1\n l1 = l1.next\n else:\n curr.next = l2\n l2 = l2.next\n curr = curr.next\n\n curr.next = l1 if l1 else l2\n return fakeHead.next\n\n\nclass Solution(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n if not l1: return l2\n if not l2: return l1\n\n if l1.val < l2.val:\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n\n\n","sub_path":"easy/MergeTwoSortedLists.py","file_name":"MergeTwoSortedLists.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"304377694","text":"from db import db\nfrom datetime import datetime\nfrom models.category import CategoryModel\n\nfrom config import DEBUG\n\n\nclass AchievementModel(db.Model):\n __tablename__ = \"achievements\"\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n desc = db.Column(db.String(128), nullable=False)\n goal = db.Column(db.Integer, nullable=False)\n below = db.Column(db.Boolean, nullable=False)\n\n created = db.Column(db.DateTime, server_default=db.func.now())\n updated = db.Column(db.DateTime, server_default=db.func.now())\n deleted = db.Column(db.DateTime, server_default=None)\n\n category_id = db.Column(db.Integer,\n db.ForeignKey(\"categories.id\"),\n nullable=False)\n category = db.relationship(\"CategoryModel\")\n\n __table_args__ = (db.CheckConstraint(goal > 0, name=\"positive_goal\"),)\n\n def __init__(self, name, desc, goal, below, category):\n self.name = name\n self.desc = desc\n self.goal = goal\n self.below = below\n self.category_id = getattr(\n CategoryModel.find_existing_by_name(category), \"id\", None)\n\n def json(self):\n if DEBUG:\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"desc\": self.desc,\n \"goal\": self.goal,\n \"below\": self.below,\n \"category\": getattr(self.category, \"name\", None),\n \"image\": getattr(self.category, \"image_name\", None),\n \"created\": self.created.timestamp(),\n \"updated\": self.updated.timestamp(),\n \"deleted\": None if self.deleted is None else self.deleted.timestamp()\n }\n return {\n \"name\": self.name,\n \"desc\": self.desc,\n \"goal\": self.goal,\n \"below\": self.below,\n \"category\": getattr(self.category, \"name\", None),\n \"image\": getattr(self.category, \"image_name\", None)\n }\n\n @classmethod\n def find_by_name(cls, name):\n return cls.query.filter_by(name=name).first()\n\n @classmethod\n def find_existing_by_name(cls, name):\n return cls.query.filter_by(name=name).filter_by(deleted=None).first()\n\n @classmethod\n def find_all(cls):\n return cls.query.all()\n\n @classmethod\n def find_new(cls, last_fetch):\n return cls.query.filter(\n cls.created > datetime.fromtimestamp(last_fetch),\n cls.deleted == None\n )\n\n @classmethod\n def find_deleted(cls, last_fetch):\n return cls.query.filter(\n cls.created <= datetime.fromtimestamp(last_fetch),\n cls.deleted > datetime.fromtimestamp(last_fetch)\n )\n\n @classmethod\n def find_updated(cls, last_fetch):\n return cls.query.filter(\n cls.created <= datetime.fromtimestamp(last_fetch),\n cls.deleted == None,\n cls.updated > datetime.fromtimestamp(last_fetch)\n )\n\n def update(self, data):\n for k in data:\n if k == \"category\":\n setattr(self, \"category_id\",\n getattr(CategoryModel.find_existing_by_name(data[k]), \"id\", None))\n else:\n setattr(self, k, data[k])\n setattr(self, \"updated\", datetime.now())\n\n def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n\n def delete_from_db(self):\n self.deleted = datetime.now()\n db.session.add(self)\n db.session.commit()\n","sub_path":"models/achievement.py","file_name":"achievement.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513631222","text":"import NDensity as nd\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n# Set the limits to the colorbar and number of color bins\nvmin = -4\nvmax = 0\nncolor = 17\ncmap_name = 'jet'\n\n# Grab all of the files we're going to read in\ndc_files = glob.glob(\"*dc.dat\")\ndc_files.sort()\n\n# Initialize the plot\nfig, axes = plt.subplots(2, 2, figsize=(16, 16))\n\n# Generate the color data for colorbar\ncmap = plt.get_cmap(cmap_name)\ncNorm = matplotlib.colors.Normalize(vmin=vmin,\n vmax=vmax)\nscalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)\nlevels = matplotlib.ticker.MaxNLocator(nbins=ncolor).\\\n tick_values(vmin, vmax)\n\nz = [[-100 for i in range(100)] for j in range(100)]\nim = plt.contourf(z, levels=levels, cmap=cmap)\n\n# Move to the first subplot\nplt.subplot(221)\n\n# Limit files to UCXBs\nstart_sys = 7\nend_sys = 8\nfiles_to_plot = dc_files[start_sys:end_sys]\n\n# Plot the UCXBs\nfor num in range(len(files_to_plot)):\n system = files_to_plot[num]\n print(system)\n dp = nd.density_profile(file_name=system)\n dp.plot_density(solo_fig=False, colorbar=False,\n min_color=vmin, max_color=vmax,\n color_map=cmap_name)\n\n# Set the x and y limits\nplt.ylim(0.925, 4)\nplt.xlim(-0.7, 1)\n\n# Remove the labels and add a title\nplt.tick_params(labelbottom='off', labelsize=15)\nplt.title(\"4U 1636-53 Observed Ratio\", fontsize=16)\n\n# Move to second subplot\nplt.subplot(222)\n\n# Limit files to short period systems\nstart_sys = 8\nend_sys = 9\nfiles_to_plot = dc_files[start_sys:end_sys]\n\n# Plot the short period systems\nfor num in range(len(files_to_plot)):\n system = files_to_plot[num]\n print(system)\n dp = nd.density_profile(file_name=system)\n dp.plot_density(solo_fig=False, colorbar=False,\n min_color=vmin, max_color=vmax,\n color_map=cmap_name)\n\n# Set the x and y limits\nplt.ylim(0.925, 4)\nplt.xlim(-0.7, 1)\n\n# Remove the labels and add a title\nplt.tick_params(labelbottom='off', labelsize=15)\nplt.title(r\"GX 9+9 Observed Ratio\", fontsize=16)\n\n# Move to the third subplot\nplt.subplot(223)\n\n# Limit files to medium period systems\nstart_sys = 9\nend_sys = 10\nfiles_to_plot = dc_files[start_sys:end_sys]\n\n# Plot the medium period systems\nfor num in range(len(files_to_plot)):\n system = files_to_plot[num]\n print(system)\n dp = nd.density_profile(file_name=system)\n dp.plot_density(solo_fig=False, colorbar=False,\n min_color=vmin, max_color=vmax,\n color_map=cmap_name)\n\n# Set the x and y limits\nplt.ylim(0.925, 4)\nplt.xlim(-0.7, 1)\n\n# Remove labels and add a title\nplt.tick_params(labelbottom='off', labelsize=15)\nplt.title(r\"4U 1735-444 Observed Ratio\", fontsize=16)\n\n# Move to fourth subplot\nplt.subplot(224)\n\n# Limit files to long period system\nstart_sys = 10\nend_sys = 11\nfiles_to_plot = dc_files[start_sys:end_sys]\n\n# Plot the long period system\nfor num in range(len(files_to_plot)):\n system = files_to_plot[num]\n print(system)\n dp = nd.density_profile(file_name=system)\n dp.plot_density(solo_fig=False, colorbar=False,\n min_color=vmin, max_color=vmax,\n color_map=cmap_name)\n\n# Set the x and y limits\n# plt.ylim(1.8, 5)\n# plt.xlim(-0.2, 1.5)\nplt.ylim(0.925, 4)\nplt.xlim(-0.7, 1)\n\n# Remove labels and add a title\nplt.tick_params(labelleft='off', labelsize=15)\nplt.title(r\"2A 1822-371 Observed Ratio\", fontsize=16)\n\n# Adjust the spacing of the figure\n# fig.tight_layout(w_pad=0.6, h_pad=0.6)\n\n# Create space on right hand side for color bar\nplt.subplots_adjust(bottom=0.08, left=0.08, right=0.84, top=0.92)\n\n# Create the colorbar\nposition = fig.add_axes([0.86, 0.08, 0.02, 0.84])\ncb = fig.colorbar(im, cax=position)\ncb.set_label(r'$\\log_{10}(f_{ \\mathrm{obs}, i})$', fontsize=22)\ncb.ax.tick_params(labelsize=15)\n\n# Create an empty plot to place labels\nfig.add_subplot(111, frameon=False)\nplt.tick_params(labelcolor='None', top='off', bottom='off',\n left='off', right='off')\n\n# Create labels for entire group of plots\nplt.xlabel(r'$\\log_{10}$ (Period/days)', fontsize=22)\nplt.ylabel(r'Mass ($M_\\odot$)', fontsize=22)\nplt.savefig(\"Short_Grid.pdf\")\n","sub_path":"plot_short_NDensity.py","file_name":"plot_short_NDensity.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"194306132","text":"class CourseSchedu:\n def canFinish(self, numCourses, prerequisites):\n if numCourses==0:\n return True\n in_degree=[0 for _ in range(numCourses)]\n adj=[set() for _ in range(numCourses)]\n \n for second, first in prerequisites:\n in_degree[second]+=1\n adj[first].add(second)\n\n queue=[]\n count=0\n for second in range(len(in_degree)):\n if in_degree[second]==0:\n queue.append(second)\n \n while queue:\n top=queue.pop(0)\n count+=1\n for t in adj[top]:\n in_degree[t]-=1\n if in_degree[t]==0:\n queue.append(t)\n return count==numCourses","sub_path":"SolutionsPY/CourseSchedule.py","file_name":"CourseSchedule.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"338763860","text":"import os, sys\nfrom itertools import combinations\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..')) # 'rules' module\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))\nfrom typing import List, Dict, Union, Tuple\nfrom collections import defaultdict, Set\nimport numpy as np\nfrom SequenceGenerator import SequenceGenerator\nfrom pprint import pprint\nfrom PramRulesArributes import PramRulesArributes, rules_attr_to_json\n\n\nclass RuleGenBase:\n\n def __init__(self, precursor: Union[List, Tuple, str], successor: Union[List, Tuple, str],\n only_precursor_count: int, both_precursor_successor_count: int, only_successor_count: int,\n total_data_count: int, precursor_attribute=None, successor_attribute=None):\n\n # precursor, successor, only_precursor_count, both_precursor_successor_count, only_successor_count, total_data_count, precursor_attribute\n\n self.total_data_count = total_data_count\n self.only_successor_count = only_successor_count\n self.both_precursor_successor_count = both_precursor_successor_count\n self.only_precursor_count = only_precursor_count\n self.precursor = precursor\n self.successor = successor\n self.successor_attribute = successor_attribute\n if self.successor_attribute is None:\n self.successor_attribute = ['attribute_' + str(i) for i in range(len(precursor))]\n self.precursor_attribute = precursor_attribute\n if self.precursor_attribute is None:\n self.precursor_attribute = ['attribute_' + str(i) for i in range(len(precursor))]\n self.precursor_str = ' '.join(precursor).strip()\n self.successor_str = ' '.join(successor).strip()\n self.probability = self.both_precursor_successor_count / self.only_precursor_count\n self.g_score = self._get_g_score()\n\n def _get_g_score(self):\n n1_x_y = self.both_precursor_successor_count\n n2_x_not_y = self.only_precursor_count - self.both_precursor_successor_count if self.only_precursor_count - self.both_precursor_successor_count > 0 else 0\n n3_not_x_y = self.only_successor_count - self.both_precursor_successor_count if self.only_successor_count - self.both_precursor_successor_count > 0 else 0\n n4_not_x_not_y = self.total_data_count + self.both_precursor_successor_count - (\n self.only_successor_count + self.only_precursor_count)\n g_score = self._compute_g_score(n1_x_y, n2_x_not_y, n3_not_x_y, n4_not_x_not_y)\n return g_score\n\n def _compute_g_score(self, n1_x_y, n2_x_not_y, n3_not_x_y, n4_not_x_not_y):\n total = n1_x_y + n2_x_not_y + n3_not_x_y + n4_not_x_not_y\n if total == 0:\n return 0\n n1_hat = (n1_x_y + n2_x_not_y) * (n1_x_y + n3_not_x_y) / total\n n2_hat = (n1_x_y + n2_x_not_y) * (n2_x_not_y + n4_not_x_not_y) / total\n n3_hat = (n3_not_x_y + n4_not_x_not_y) * (n1_x_y + n3_not_x_y) / total\n n4_hat = (n3_not_x_y + n4_not_x_not_y) * (n2_x_not_y + n4_not_x_not_y) / total\n g_score = 2 * (n1_x_y * np.log(n1_x_y / n1_hat) +\n (n2_x_not_y * np.log(n2_x_not_y / n2_hat)) +\n (n3_not_x_y * np.log(n3_not_x_y / n3_hat))\n + (n4_not_x_not_y * np.log(n4_not_x_not_y / n4_hat)))\n return g_score\n\n def __repr__(self):\n all_val = str(self.__dict__)\n return all_val\n\n\ndef test_RuleGenBase():\n r = RuleGenBase(successor='s', precursor='p', only_precursor_count=10, only_successor_count=12,\n both_precursor_successor_count=6, total_data_count=100)\n print(r)\n\n\ndef get_sample_seq():\n transition = {'S': {'S': 0.7, 'I': 0.3, 'R': 0}, 'I': {'S': 0, 'I': 0.5, 'R': 0.5},\n 'R': {'S': 0.3, 'I': 0.0, 'R': 0.7}}\n SIR = SequenceGenerator(transition_prob=transition)\n sequence_data = SIR.generate_states(current_state='S', nt=2000)\n return sequence_data\n\n\ndef compute_g_score(n1_x_y, n2_x_not_y, n3_not_x_y, n4_not_x_not_y):\n total = n1_x_y + n2_x_not_y + n3_not_x_y + n4_not_x_not_y\n if total == 0:\n return 0\n n1_hat = (n1_x_y + n2_x_not_y) * (n1_x_y + n3_not_x_y) / total\n n2_hat = (n1_x_y + n2_x_not_y) * (n2_x_not_y + n4_not_x_not_y) / total\n n3_hat = (n3_not_x_y + n4_not_x_not_y) * (n1_x_y + n3_not_x_y) / total\n n4_hat = (n3_not_x_y + n4_not_x_not_y) * (n2_x_not_y + n4_not_x_not_y) / total\n g_score = 2 * (n1_x_y * np.log(n1_x_y / n1_hat) +\n (n2_x_not_y * np.log(n2_x_not_y / n2_hat)) +\n (n3_not_x_y * np.log(n3_not_x_y / n3_hat))\n + (n4_not_x_not_y * np.log(n4_not_x_not_y / n4_hat)))\n return g_score\n\n\ndef generate_n_tokens(seq: List, width):\n n_token = list(zip(*[seq[i:] for i in range(width)]))\n return list((set(n_token)))\n\n\ndef read_clean_data(file_path='data/sir_sex.txt'):\n with open(file_path, 'r') as file:\n data = file.readlines()\n clean_data = []\n\n for i in range(len(data)):\n data[i] = data[i].rstrip()\n if len((data[i])) < 3:\n continue\n item = data[i]\n prec, succ = item.split(':')\n prec = tuple(prec.split(','))\n succ = tuple(succ.split(','))\n clean_data.append([prec, succ])\n # pprint(clean_data)\n p_s_token_dict = defaultdict(int)\n for prec, succ in clean_data:\n p_s_token_dict[(prec, succ, 1)] += 1\n print(p_s_token_dict)\n return clean_data, p_s_token_dict\n\n\ndef get_clean_data(sequence_data=None, precursor_width=None, successor_width=None, lagtime_between=None):\n '''\n :param sequence_data:\n :param precursor_width:\n :param successor_width:\n :param lagtime_between:\n :return: List of List [[precursor comma separated][successor comma separated],.....]\n '''\n rule_gen_list: List[RuleGenBase] = []\n\n if sequence_data is not None:\n seq = sequence_data\n else:\n # successor_width = 2\n # precursor_width = 3\n # time_lag_between = 6\n successor_width = 1\n precursor_width = 1\n lagtime_between = 1\n seq = ['S', 'S', 'R', 'I', 'I', 'I', 'R', 'S', 'S', 'S', 'S', 'I', 'R', 'S', 'I', 'I', 'I', 'I', 'R', 'I', 'I',\n 'I', 'I', 'R', 'R', 'R', 'R', 'R', 'S', 'I', 'R', 'R', 'R', 'I', 'I', 'I', 'I', 'I', 'R', 'R', 'R', 'R',\n 'R', 'R', 'R', 'R', 'R', 'I', ]\n seq = get_sample_seq()\n num_rules = None\n\n # Rule generation Code Begins\n token_width = lagtime_between if lagtime_between > (\n successor_width + precursor_width) else successor_width + precursor_width\n\n # Extracting tokens from sequence\n # p_s_token_dict: key is tuple of precursor , successor, width and value is count of the combination\n p_s_token_dict = defaultdict(int)\n p_s_token_list = []\n for i in range(len(seq)):\n if i + token_width >= len(seq):\n break\n p_tok = tuple(seq[i:(i + precursor_width)])\n s_tok = tuple(seq[i + token_width - successor_width: i + token_width])\n\n # Extra for downstream tasks like writing tokens to file etc\n temp_p_tok = p_tok\n temp_s_tok = s_tok\n if len(p_tok) == 1:\n temp_p_tok = p_tok[0]\n else:\n temp_p_tok = ','.join(p_tok).strip()\n if len(s_tok) == 1:\n temp_s_tok = s_tok[0]\n else:\n temp_s_tok = ','.join(s_tok).strip()\n p_s_token_list.append(''.join([temp_p_tok, ' : ', temp_s_tok]))\n p_s_token_dict[(p_tok, s_tok, token_width)] += 1\n total_token_count: int = 0\n for cnt in p_s_token_dict.values():\n total_token_count += cnt\n print(total_token_count)\n write_to_file: bool = False\n if write_to_file:\n pprint(p_s_token_list)\n with open('data/token_data.txt', 'w') as file:\n for item in p_s_token_list:\n file.write(item)\n file.write('\\n')\n return p_s_token_list, p_s_token_dict\n\n\ndef msdd_algorithm_simple(sequence_data=None, precursor_width=None, successor_width=None, lagtime_between=None,\n dependency_evaluator_fn=None, num_rules=None, data_type_vertical=False):\n '''\n :param sequence_data: Stream of data (sequence of categorical features) of format A,B,...(precursor):B,D...(successor)\n :param precursor_width: width of the Rule head\n :param successor_width: width\n :param lagtime_between: time between rule and effect\n :param dependency_evaluator_fn: fundtion to evaluate best rule\n :param num_rules: total Number of expected rules\n :return: rules list as dictionary {}\n '''\n\n rule_gen_list: List[RuleGenBase] = []\n\n if data_type_vertical:\n p_s_token_list, p_s_token_dict = read_clean_data()\n else:\n p_s_token_list, p_s_token_dict = get_clean_data(sequence_data, precursor_width, successor_width,\n lagtime_between)\n\n total_token_count: int = 0\n # Contains all the key from all the combination in precursor\n all_precursor_combination_set = set()\n\n # Contains keys as tuple of 1 : frozen set all combination of precursor for precursors, 2 successor and value as count of the combination\n all_combination_p_s_dict: Dict[Set, Tuple] = {}\n\n # contains keys as tuple of 1: individual keys from the set of all the keys 2: Successor and value as count\n ind_combination_precursors_successor_count_dict: Dict = defaultdict(int)\n\n # contains key : Precursor tuple, value as count of tuple\n ind_precursor_count_dict: Dict = defaultdict(int)\n\n # contains key : Successor tuple, value as count of tuple\n ind_successor_count_dict: Dict = defaultdict(int)\n\n for p_s_key, count_val in p_s_token_dict.items():\n precursor_l, successor_l, width = p_s_key\n local_precursor_set = set()\n for i in range(1, len(precursor_l) + 1):\n items = combinations(precursor_l, i)\n items = list(items)\n for item in items:\n all_precursor_combination_set.add(item)\n local_precursor_set.add(item)\n\n local_precursor_set = frozenset(local_precursor_set)\n new_key_p_s = tuple([local_precursor_set, successor_l])\n all_combination_p_s_dict[new_key_p_s] = count_val\n # pprint(all_precursor_combination_set)\n # pprint(all_combination_p_s_dict)\n\n token_combination_total_count = 0\n total_successor_count = 0\n total_precursor_count = 0\n\n for unique_prec_key in all_precursor_combination_set:\n for combined_p_s_key, count_val in all_combination_p_s_dict.items():\n precursor_m, successor_m = combined_p_s_key\n if unique_prec_key in precursor_m:\n new_ind_p_s_key = tuple([unique_prec_key, successor_m])\n ind_combination_precursors_successor_count_dict[new_ind_p_s_key] += count_val\n ind_precursor_count_dict[unique_prec_key] += count_val\n token_combination_total_count += count_val\n ind_successor_count_dict[successor_m] += count_val\n\n print('token_combination_total_count = ', token_combination_total_count)\n\n for _, count_val in ind_successor_count_dict.items():\n total_successor_count += count_val\n print('total_successor_count : ', total_successor_count)\n\n for _, count_val in ind_precursor_count_dict.items():\n total_precursor_count += count_val\n print('total_precursor_count : ', total_precursor_count)\n\n for p_s_key, combined_count_val in ind_combination_precursors_successor_count_dict.items():\n prec, succ = p_s_key\n total_prec_count = ind_precursor_count_dict[prec]\n total_succ_count = ind_successor_count_dict[succ]\n rule_gen_list.append(RuleGenBase(precursor=prec, successor=succ, only_precursor_count=total_prec_count,\n both_precursor_successor_count=combined_count_val,\n only_successor_count=total_succ_count,\n total_data_count=token_combination_total_count))\n\n rule_gen_list.sort(key=lambda x: x.g_score, reverse=True)\n rule_gen_list = rule_gen_list[:num_rules]\n # for r in rule_gen_list:\n # pprint(r)\n\n final_rule_gen_list: Dict[Tuple[str], List[RuleGenBase]] = defaultdict(list)\n pram_rule_attr_list: List[PramRulesArributes] = []\n\n for r in rule_gen_list:\n final_rule_gen_list[r.precursor].append(r)\n for r_prec, base_rule_list in final_rule_gen_list.items():\n pram_rule_attr: PramRulesArributes = PramRulesArributes(attribute=base_rule_list[0].precursor_attribute,\n precursor=base_rule_list[0].precursor)\n for base_rule in base_rule_list:\n pram_rule_attr.add_new_successor(value=base_rule.successor,\n probability=base_rule.probability,\n successorAttribute=base_rule.successor_attribute)\n pram_rule_attr_list.append(pram_rule_attr)\n rule_json_value = rules_attr_to_json(pram_rule_attr_list)\n print(rule_json_value)\n return rule_json_value\n\n\n# def convert_to_vertical():\n\n\ndef test_msdd_simple():\n rule_dict = msdd_algorithm_simple(data_type_vertical=True, num_rules=4)\n print(rule_dict)\n with open('jsonData/new_rule.json', 'w') as file:\n file.write(rule_dict)\n # generate_class_from_rule_dict(rule_dict)\n\n\ndef test_msdd_simple2():\n transition = {'S': {'S': 0.7, 'I': 0.3, 'R': 0}, 'I': {'S': 0, 'I': 0.5, 'R': 0.5},\n 'R': {'S': 0.3, 'I': 0.0, 'R': 0.7}}\n SIR = SequenceGenerator(transition_prob=transition)\n sequence_data = SIR.generate_states(current_state='S', nt=2000)\n rule_dict = msdd_algorithm_simple(sequence_data, precursor_width=1, successor_width=1, lagtime_between=1,\n dependency_evaluator_fn=None, num_rules=None)\n pprint(rule_dict)\n # generate_class_from_rule_dict_large(rule_dict)\n\n\ndef clean_data(l: List):\n return [x for x in l if x not in ['', ' ', ',', '\\n']]\n\n\ndef test_read_clean_data():\n clean_data, p_s_token_dict = read_clean_data()\n pprint(clean_data)\n pprint(p_s_token_dict)\n\n\nif __name__ == '__main__':\n print('Running Code')\n\n test_msdd_simple()\n # test_read_clean_data\n # test_msdd_simple2()\n # algorithm_multi_sequence_data()\n # test_algorithm_multi_seq_data()\n # create_income_sir_data()\n # read_data_lower_middle_SIR()\n\n\ndef get_sample_seq2():\n transition = {'S': {'S': 0.9, 'I': 0.1, 'R': 0}, 'I': {'S': 0, 'I': 0.8, 'R': 0.2},\n 'R': {'S': 0.1, 'I': 0.0, 'R': 0.9}}\n SIR = SequenceGenerator(transition_prob=transition)\n sequence_data = SIR.generate_states(current_state='S', nt=2000)\n return sequence_data\n\n\ndef generate_class_from_rule_dict(rule_dict):\n import_str = \"from pram.data import GroupSizeProbe, ProbeMsgMode\\nfrom pram.entity import Group, GroupQry, GroupSplitSpec, Site\\nfrom pram.rule import GoToRule, DiscreteInvMarkovChain, TimeInt, Rule\\nfrom pram.sim import Simulation\\n\\n\\n\"\n rule_string = \"class Autogenerated(Rule):\\n\\tdef apply(self, pop, group, iter, t):\\n\\t\\t\"\n rule_string = import_str + rule_string\n condition_string = \"\\t\\tif group.has_attr({'flu': '\"\n condition_string_2 = \"'}): return [\"\n g_spec_str_1 = \"GroupSplitSpec(p= \"\n g_spec_str_2 = \", attr_set={ 'flu': '\"\n g_spec_str_3 = \"' }),\"\n condition_string_3 = \"]\\n\"\n\n for upper_index, (key, val) in enumerate(rule_dict.items()):\n rs = condition_string + key[0] + condition_string_2\n for i, (child_key, child_val) in enumerate(val.items()):\n p_minus = 0\n if i > len(val) - 1:\n p = p_minus\n else:\n p = child_val\n p_minus += p\n rss = g_spec_str_1 + str(p) + g_spec_str_2 + child_key[0] + g_spec_str_3\n rs += rss\n rs += condition_string_3\n rule_string = rule_string + '\\n' + rs\n\n print(rule_string)\n with open('Autogenerated.py', 'w') as file:\n file.writelines(rule_string)\n\n\ndef generate_class_from_rule_dict_large(rule_dict, class_index=1, has_attr='flu', set_attr='flu'):\n import_str = \"from pram.data import GroupSizeProbe, ProbeMsgMode\\nfrom pram.entity import Group, GroupQry, GroupSplitSpec, Site\\nfrom pram.rule import GoToRule, DiscreteInvMarkovChain, TimeInt, Rule\\nfrom pram.sim import Simulation\\n\\n\\n\"\n rule_string = \"class Autogenerated{}(Rule):\\n\\tdef apply(self, pop, group, iter, t):\\n\\t\\t\".format(class_index)\n rule_string = import_str + rule_string\n condition_string = \"\\t\\tif group.has_attr({'\" + has_attr + \"': \"\n condition_string_2 = \"}): return [\"\n g_spec_str_1 = \"GroupSplitSpec(p= \"\n g_spec_str_2 = \", attr_set={ '\" + set_attr + \"': \"\n g_spec_str_3 = \" }),\"\n condition_string_3 = \"]\\n\"\n\n for upper_index, (key, val) in enumerate(rule_dict.items()):\n if len(key) < 2:\n rs = condition_string + \"'\" + key[0] + \"'\" + condition_string_2\n else:\n rs = condition_string + str(key) + condition_string_2\n\n for i, (child_key, child_val) in enumerate(val.items()):\n p_minus = 0\n if i > len(val) - 1:\n p = p_minus\n else:\n p = child_val\n p_minus += p\n print(type(child_key))\n if len(child_key) < 2:\n rss = g_spec_str_1 + str(p) + g_spec_str_2 + \"'\" + child_key[0] + \"'\" + g_spec_str_3\n elif type(child_key) == 'str' or type(child_key == 'numpy.str_'):\n rss = g_spec_str_1 + str(p) + g_spec_str_2 + \"'\" + child_key + \"'\" + g_spec_str_3\n else:\n rss = g_spec_str_1 + str(p) + g_spec_str_2 + str(child_key) + g_spec_str_3\n rs += rss\n rs += condition_string_3\n rule_string = rule_string + '\\n' + rs\n\n print(rule_string)\n new_file_name = 'Autogenerated{}.py'.format(class_index)\n with open(new_file_name, 'w') as file:\n file.writelines(rule_string)\n\n\ndef test_msdd_simple():\n rule_dict = msdd_algorithm_simple()\n generate_class_from_rule_dict(rule_dict)\n\n\ndef get_pram_attribute_from_rule(rules_dict: Dict):\n pram_rule_attr = PramRulesArributes()\n\n # with open('data/sir_sex.txt','r') as file:\n # data = file.readlines()\n # clean_data = []\n #\n # for i in range(len(data)):\n # data[i] = data[i].rstrip()\n # if len((data[i])) < 3:\n # continue\n # item = data[i]\n # prec, succ = item.split(':')\n # prec = tuple(prec.split(','))\n # succ = tuple(succ.split(','))\n # clean_data.append([prec, succ])\n # # pprint(clean_data)\n # p_s_token_dict = defaultdict(int)\n # for prec, succ in clean_data:\n # p_s_token_dict[(prec,succ,1)] += 1\n # print(p_s_token_dict)\n # return clean_data, p_s_token_dict\n","sub_path":"msdd_algorithm/utils/AlgorithmBak1.py","file_name":"AlgorithmBak1.py","file_ext":"py","file_size_in_byte":18986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"338117120","text":"# -*- coding: utf-8 -*-\nfrom odoo import http\n\nimport base64\nfrom odoo.http import request,content_disposition\nfrom odoo.addons.web.controllers.main import serialize_exception\n\nclass DownloadFile(http.Controller):\n \n @http.route('/web/binary/savefile_custom', type='http', auth=\"public\")\n @serialize_exception\n def saveas(self, model, field, id=None, filename_field=None,file_name=None, **kw):\n \"\"\" Download link for files stored as binary fields.\n\n If the ``id`` parameter is omitted, fetches the default value for the\n binary field (via ``default_get``), otherwise fetches the field for\n that precise record.\n\n :param str model: name of the model to fetch the binary from\n :param str field: binary field\n :param str id: id of the record from which to fetch the binary\n :param str filename_field: field holding the file's name, if any\n :returns: :class:`werkzeug.wrappers.Response`\n \"\"\"\n Model = request.env[model]\n fields = [field]\n if filename_field:\n fields.append(filename_field)\n if id:\n id = int(id)\n res = Model.browse(id).read(fields)[0]\n else:\n res = Model.default_get(fields)\n filecontent = base64.b64decode(res.get(field) or '')\n content_type = kw.get('content_type', 'application/octet-stream')\n if not filecontent:\n return request.not_found()\n else:\n filename = '%s_%s' % (model.replace('.', '_'), id)\n if file_name:\n filename = file_name \n elif filename_field:\n filename = res.get(filename_field, '') or filename\n \n if id and kw.get(\"delete_document\",False):\n Model.sudo().browse(id).write({field:False})\n return request.make_response(filecontent,\n [('Content-Type', content_type),\n ('Content-Disposition', content_disposition(filename))])\n ","sub_path":"import_product_inventory/controller/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"536858620","text":"import matplotlib.pylab as plt\nimport numpy as np\nimport pandas as pd\n\n\nclass BoxPlot:\n\n def __init__(self, values=None, objects=None, title='', position=''):\n self.values = values\n self.objects = objects\n self.title = title\n self.position = position\n self.basic_position = np.arange(len(self.objects))\n self.data = np.array(self.values)\n\n def box_plot(self, bar_name=''):\n plt.title(self.title)\n if self.position == 'vertical':\n bplot = plt.boxplot(self.data, 0, 'rs', vert=True, positions=self.basic_position, patch_artist=True)\n plt.xticks(self.basic_position, self.objects)\n plt.ylabel(bar_name)\n for patch in bplot['boxes']:\n patch.set(color='green')\n for median in bplot['medians']:\n median.set(color='black')\n elif self.position == 'horizontal':\n bplot = plt.boxplot(self.data, 0, 'rs', vert=False, positions=self.basic_position, patch_artist=True)\n plt.yticks(self.basic_position, self.objects)\n plt.xlabel(bar_name)\n for patch in bplot['boxes']:\n patch.set(color='green')\n for median in bplot['medians']:\n median.set(color='black')\n","sub_path":"boxplot.py","file_name":"boxplot.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17338506","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 29 15:55:52 2016\n\n@author: liuzheng\n\"\"\"\n\nimport os\n#import random\nimport numpy as np\nimport glob\n\ndef makeTrainList_distractedDrivers(datapath, prefix):\n\n countList = []\n count = 0\n labelList = []\n label = -1\n nameList = []\n imageList = os.listdir(datapath)\n # imageList.sort()\n for imgName in imageList:\n print('reading data %s'%(imgName))\n label = np.int(imgName[4])\n count += 1\n nameList.append(imgName)\n labelList.append(label)\n countList.append(count)\n \n \n print('randperm')\n index = range(len(countList))\n np.random.shuffle(index)\n countList = np.array(countList)\n countList = countList[index]\n labelList = np.array(labelList)\n labelList = labelList[index]\n nameList = np.array(nameList)\n nameList = nameList[index]\n \n print('writing')\n f = open(prefix+'trainDatalist_distractedDrivers.lst', 'w')\n for i in range(len(index)):\n f.writelines(\"%d \\t %d \\t %s\\n\"%(countList[i], labelList[i], nameList[i]))\n \n f.close()\n","sub_path":"CNN/MakeDataList.py","file_name":"MakeDataList.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568855798","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef countingValleys(n, s):\n y = 0\n y_list = [0]\n\n for i in list(s):\n if i == \"D\":\n y -= 1\n y_list.append(y)\n else:\n y += 1\n y_list.append(y)\n \n return sum(y_list[i:i+2] == [-1, 0] for i in range(n))\n\n\ndef countingBalleys(n, s):\n s_list = list(s)\n x = np.arange(0, n, 1.0)\n y = []\n y_plot = 0\n\n for i in s_list:\n if i == \"D\":\n y_plot -= 1\n y.append(y_plot)\n elif i == \"U\":\n y_plot += 1\n y.append(y_plot)\n return x, y\n\nprint(countingValleys(8,'UDDDUDUU'))\n\n'''\ncountingBalleys(12,'DDUUDDUDUUUD')\n\nplt.plot(x, y)\nplt.title(\"Result\")\n\nplt.show()\n'''","sub_path":"HackerRank/Python/CountingValleys.py","file_name":"CountingValleys.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266709267","text":"from mechanics.events import Trigger\nfrom mechanics.events import DamageEvent, HealingEvent\n\n\ndef mana_drain_callback(amount, percentage_mana_drained, percentage_healed):\n\n def _drain_cb(t,e:DamageEvent):\n\n mana_initially = e.target.mana\n\n e.target.mana -= ( e.target.mana * percentage_mana_drained + amount)\n\n drained = mana_initially - e.target.mana\n healed = drained * percentage_healed\n\n HealingEvent(healed, target=e.source)\n\n return _drain_cb\n\ndef build_mana_drain_trigger(unit, amount, percentage_drain, percentage_heal):\n\n\n cb = mana_drain_callback(amount, percentage_drain, percentage_heal)\n\n trig = Trigger(DamageEvent,\n platform=unit.game.events_platform,\n conditions={lambda t,e : e.source.uid == unit.uid,\n lambda t,e: e.amount > 0},\n callbacks=[cb])\n return trig\n\n\n","sub_path":"cntent/abilities/mana_drain/trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"279672717","text":"def eh_primo (x):\n if x == 1:\n return False\n if x % 2 == 0 and x!=2:\n return False\n\n i=3\n while i list: \n \"\"\" retorna todas as permutacoes possiveis \"\"\" \n\n ls_permutacao = list() \n \n if len(palavra) <= 1: \n return [palavra]\n \n for _ in palavra: \n letras_restantes = \"\".join([rest for rest in palavra if rest != _ ])\n ls_permutacao.append(_)\n ls_permutacao.append(letras_restantes)\n for i in self.permutacoes(letras_restantes):\n ls_permutacao.append(_ + i) \n \n return set(ls_permutacao)\n\n def numero_permutacoes(self, num:int) -> int: \n \"\"\" retorna a quantidade possivel de permutacoes \"\"\" \n\n if num == 1: \n return 1 \n\n return num * self.numero_permutacoes(num - 1) ","sub_path":"permutation/mimic.py","file_name":"mimic.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281926314","text":"__author__ = 'shivroychowdhury'\n\nfrom geom import *\nfrom pygame import draw, mouse, key, font\nimport pygame\nfrom math import atan2, degrees, sqrt\nfrom random import randint\n\nSCREEN_BOUNDS = []\nTANKS = {}\nBULLETS = {}\nNEXT_BULLET_ID = 9000\nNEXT_TANK_ID = 1000\n\ndef stepTankland():\n moveMainTank()\n setMainTankFireDirection()\n for bullet in BULLETS.values():\n moveBullet(bullet)\n delids = []\n for tank in TANKS.values():\n tank['tick'] += 1\n tank['tick'] = tank['tick'] % tank['fire delay']\n if tank['health'] <= 0:\n delids.append(tank['id'])\n if 'ai' in tank:\n tank['health'] += 0.5\n else:\n tank['health'] += 0.1\n if tank['health'] >= 100: tank['health'] = 100\n doTankAI(tank)\n for id in delids:\n del TANKS[id]\n for tank in TANKS.values():\n drawTank(tank)\n for bullet in BULLETS.values():\n drawBullet(bullet)\n if len(TANKS) == 1 and MAIN_TANK['id'] in TANKS:\n return 'GAME OVER: YOU WON. ENDING IN FIVE SECONDS'\n if MAIN_TANK['id'] not in TANKS:\n return 'GAME OVER: YOU LOST. ENDING IN FIVE SECONDS'\n return None\n\n#Fire the main tank. Used for mouse button down input\ndef fireMainTank():\n fireTank(MAIN_TANK, MAIN_TANK['fire direction'])\n\n#Set the main fire direction based on the mouse location\ndef setMainTankFireDirection():\n x, y = mouse.get_pos()\n x -= XOFFS\n y -= YOFFS\n y *= -1\n lx, ly = MAIN_TANK['x'], MAIN_TANK['y']\n x -= lx\n y -= ly\n MAIN_TANK['fire direction'] = degrees(atan2(x, y))\n\n#Get the direction in which the tank is moving\ndef getMainTankMoveDirection():\n x = 0\n y = 0\n keys = key.get_pressed()\n if keys[pygame.K_a]: x -= 1\n if keys[pygame.K_s]: y -= 1\n if keys[pygame.K_d]: x += 1\n if keys[pygame.K_w]: y += 1\n if x == 0 and y == 0: return None\n return degrees(atan2(x, y))\n\n#Move the main tank\ndef moveMainTank():\n direction = getMainTankMoveDirection()\n if direction != None:\n moveTank(MAIN_TANK, direction)\n\n#Set the Pygame surface for Tankland\ndef setTanklandScreen(s, w, h):\n global SCREEN_BOUNDS\n global SCREEN\n global XOFFS\n global YOFFS\n SCREEN = s\n XOFFS = w / 2\n YOFFS = h / 2\n SCREEN_BOUNDS = [[-XOFFS, -YOFFS],\n [-XOFFS, YOFFS],\n [XOFFS, YOFFS],\n [XOFFS, -YOFFS]]\n\n#Set the main tank. Change colors and behavior.\ndef setTanklandMainTank(tank):\n global MAIN_TANK\n tank['shapes']['body color'] = 'red'\n tank['shapes']['turner color'] = 'white'\n tank['shapes']['barrel color'] = 'blue'\n tank['damage'] = 20\n tank['fire delay'] = 1\n tank['speed'] = 5\n tank['fire error'] = 5\n MAIN_TANK = tank\n\n#Add tank and give it an ID\ndef addTank(tank):\n global NEXT_TANK_ID\n tank['id'] = NEXT_TANK_ID\n TANKS[NEXT_TANK_ID] = tank\n NEXT_TANK_ID += 1\n\n#Base tank\ndef baseTank():\n return {'id': 0,\n 'health': 100,\n 'x': 0,\n 'y': 0,\n 'move direction': 0,\n 'speed': 2.5,\n 'bullet speed': 10,\n 'damage': 10,\n 'fire direction': 0,\n 'fire error': 0,\n 'fire delay': 25,\n 'tick': 0,\n 'shapes': {'body': [[-1, -1.5],\n [-1, 1.5],\n [1, 1.5],\n [1, -1.5]],\n 'body color': 'yellow green',\n 'turner': [[-0.70, -0.70],\n [-0.70, 0.70],\n [0.70, 0.70],\n [0.70, -0.70]],\n 'turner color': 'green',\n 'barrel': [[0.3, 0.7],\n [0.3, 2.5],\n [-0.3, 2.5],\n [-0.3, 0.7]],\n 'barrel color': 'dark green'},\n 'bounds': [[-1, -1.5],\n [-1, 1.5],\n [1, 1.5],\n [1, -1.5]], }\n\n#Add a tank and designate it as an AI\ndef addTankAI(num):\n for i in range(num):\n tank = baseTank()\n tank['ai'] = True\n tank['x'] = randint(-400, 400)\n tank['y'] = randint(-300, 300)\n tank['fire delay'] = 20\n tank['fire error'] = 15\n addTank(tank)\n\n#Do the tank AI if it is an AI\ndef doTankAI(tank):\n if 'ai' not in tank: return\n ax, ay = tank['x'], tank['y']\n lx, ly = MAIN_TANK['x'], MAIN_TANK['y']\n lx -= ax\n ly -= ay\n fd = degrees(atan2(lx, ly)) #Get the distance from the AI to the MAIN_TANK\n distance = sqrt(lx ** 2 + ly ** 2)\n MAX = sqrt((2*XOFFS)**2+(2*YOFFS)**2)\n tank['fire error'] = int(15 * (1- distance/MAX)) #Fire error decreases with distance from 15 to 0\n tank['bullet speed'] = 10 * (1 + distance/MAX) #Bullet speed increases with distance from 10 to 20\n fireTank(tank, fd)\n\n#Get the polygons of the tank\ndef tankPolygons(tank):\n lx, ly = tank['x'], tank['y']\n body = scalePolygon(tank['shapes']['body'], 15)\n body = rotatePolygon(body, tank['move direction'])\n body = translatePolygon(body, lx, ly)\n bturner = scalePolygon(tank['shapes']['turner'], 15)\n bturner = rotatePolygon(bturner, tank['fire direction'])\n bturner = translatePolygon(bturner, lx, ly)\n gturner = scalePolygon(tank['shapes']['turner'], .9 * 15)\n gturner = rotatePolygon(gturner, tank['fire direction'])\n gturner = translatePolygon(gturner, lx, ly)\n barrel = scalePolygon(tank['shapes']['barrel'], 15)\n barrel = rotatePolygon(barrel, tank['fire direction'])\n barrel = translatePolygon(barrel, lx, ly)\n return [[body, tank['shapes']['body color']],\n [bturner, 'black'],\n [gturner, tank['shapes']['turner color']],\n [barrel, tank['shapes']['barrel color']]]\n\n#Draw the tank with its health at the center\ndef drawTank(tank):\n global myfont\n tPolygons = tankPolygons(tank)\n for polygon, color in tPolygons:\n cPolygon = cameraTransformPolygon(polygon)\n draw.polygon(SCREEN, pygame.Color(color), translatePolygon(cPolygon, XOFFS, YOFFS))\n #Draw the health of the tank on its center\n myfont = font.Font(None, 20)\n label = myfont.render(\"%d\" % tank['health'], 1, (0, 0, 0))\n x, y = tank['x'], tank['y']\n SCREEN.blit(label, (XOFFS + x - 8, YOFFS - y - 8))\n\n#Get the bounds of the tank for checking intersections\ndef tankBounds(tank):\n lx, ly = tank['x'], tank['y']\n bounds = tank['bounds']\n bounds = scalePolygon(bounds, 15)\n bounds = rotatePolygon(bounds, tank['move direction'])\n bounds = translatePolygon(bounds, lx, ly)\n return bounds\n\n#Move the tank\ndef moveTank(tank, direction):\n olddir = tank['move direction']\n oldx = tank['x']\n oldy = tank['y']\n tank['move direction'] = direction\n speed = tank['speed']\n dx = speed * sin(radians(direction))\n dy = speed * cos(radians(direction))\n tank['x'] += dx\n tank['y'] += dy\n #Check if the tank is still within bounds, else reset\n if not isPointInPolygon([tank['x'], tank['y']], SCREEN_BOUNDS):\n tank['move direction'] = olddir\n tank['x'] = oldx\n tank['y'] = oldy\n return\n #Check if the tank is not intersecting another tank, otherwise reset\n for t in TANKS.values():\n if t['id'] != tank['id'] and interstects(tankBounds(t), tankBounds(tank)):\n tank['move direction'] = olddir\n tank['x'] = oldx\n tank['y'] = oldy\n break\n\n#FIRE IN THE HOLE\ndef fireTank(tank, direction):\n if tank['tick'] != 0: return\n global NEXT_BULLET_ID\n tank['fire direction'] = direction\n p = [0, 2.95]\n p = rotatePoint(p, direction)\n x, y = p\n x += tank['x']\n y += tank['y']\n #Make a bullet\n bullet = {'firing tank id': tank['id'],\n 'id': NEXT_BULLET_ID,\n 'damage': tank['damage'],\n 'x': x,\n 'y': y,\n 'direction': tank['fire direction'] + randint(-tank['fire error'], tank['fire error']),\n 'speed': tank['bullet speed'],\n 'shape': [[-0.3, -0.3],\n [-0.3, 0.3],\n [0.3, 0.3],\n [0.3, -0.3]],\n 'bounds': [[-0.3, -0.3],\n [-0.3, 0.3],\n [0.3, 0.3],\n [0.3, -0.3]]}\n BULLETS[NEXT_BULLET_ID] = bullet\n NEXT_BULLET_ID += 1\n tank['tick'] = 1\n\n#Get the bounds of the bullet\ndef bulletBounds(bullet):\n lx, ly = bullet['x'], bullet['y']\n bounds = bullet['bounds']\n bounds = scalePolygon(bounds, 15)\n bounds = rotatePolygon(bounds, bullet['direction'])\n bounds = translatePolygon(bounds, lx, ly)\n return bounds\n\n#Move the bullet\ndef moveBullet(bullet):\n direction = bullet['direction']\n speed = bullet['speed']\n dx = speed * sin(radians(direction))\n dy = speed * cos(radians(direction))\n bullet['x'] += dx\n bullet['y'] += dy\n #If the bullet is out of the screen, remove it\n if not isPointInPolygon([bullet['x'], bullet['y']], SCREEN_BOUNDS):\n del BULLETS[bullet['id']]\n return\n #Check if the bullet intersects a tank\n for t in TANKS.values():\n if t['id'] == bullet['firing tank id']: continue\n #If the bullet hits a tank, damage the tank and remove the bullet\n if interstects(bulletBounds(bullet), tankBounds(t)):\n t['health'] -= bullet['damage']\n del BULLETS[bullet['id']]\n break\n\n#Draw the bullet\ndef drawBullet(bullet):\n polygon = bulletBounds(bullet)\n cPolygon = cameraTransformPolygon(polygon)\n draw.polygon(SCREEN, pygame.Color('black'), translatePolygon(cPolygon, XOFFS, YOFFS))","sub_path":"tankland.py","file_name":"tankland.py","file_ext":"py","file_size_in_byte":9799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151452182","text":"import os\r\nimport shutil\r\nimport sys\r\nimport tarfile\r\nimport urllib.request\r\nimport configparser\r\n\r\nfrom config import *\r\nfrom lib.helper import *\r\nfrom lib.tech.python import Python\r\n\r\n\r\nclass Django (Python):\r\n\ttechDescription = 'Django is a high-level Python Web framework that encourages rapid development and clean, pragmatic design.'\r\n\r\n\tdef __init__ (self, **kwargs):\r\n\t\tsuper (Django, self).__init__ (**kwargs)\r\n\r\n\t\tself.projectName = self.serverName.replace ('.', '_')\r\n\r\n\t\tself.nginxConf['server']['location'] = {\r\n\t\t\t'/' :\r\n\t\t\t\t{\r\n\t\t\t\t\t'proxy_pass' : 'http://unix:' + self.socket,\r\n\t\t\t\t\t'proxy_set_header': [\r\n\t\t\t\t\t\t'Host $http_host',\r\n\t\t\t\t\t\t'X-Real-IP $remote_addr',\r\n\t\t\t\t\t\t'X-Forwarded-Proto $scheme',\r\n\t\t\t\t\t\t'X-Forwarded-For $proxy_add_x_forwarded_for'\r\n\t\t\t\t\t]\r\n\t\t\t\t},\r\n\t\t\t'/static': {\r\n\t\t\t\t'alias' : CreatePath (Config.serverRoot, self.serverName, Config.serverDirs['document_root'], self.projectName, 'static'),\r\n\t\t\t\t'autoindex': 'on'\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tself.nginxConf['server']['root'] = CreatePath (Config.serverRoot, self.serverName, Config.serverDirs['document_root'], self.projectName)\r\n\r\n\t\tself.techConf = {\r\n\t\t\t'pythonpath': self.nginxConf['server']['root'],\r\n\t\t\t'bind' : 'unix:' + self.socket,\r\n\t\t\t'workers' : 4,\r\n\t\t\t# 'user': 'nobody',\r\n\t\t}\r\n\r\n\t\tself.confFile = CreatePath (Config.serverRoot, self.serverName, Config.serverDirs['config'], 'gunicorn_config.py')\r\n\r\n\t\tself.serverStart = 'gunicorn -c {} -D {} --reload'.format (self.confFile, self.projectName + '.wsgi')\r\n\r\n\tdef load (self):\r\n\t\ttry:\r\n\t\t\tif self.checkGunicorn ():\r\n\t\t\t\tdestinationDir = CreatePath (CreatePath (Config.serverRoot, self.serverName, Config.serverDirs['document_root']))\r\n\r\n\t\t\t\tos.chdir (destinationDir)\r\n\t\t\t\tos.system ('pip install --upgrade Django')\r\n\t\t\t\tos.system ('django-admin startproject {}'.format (self.projectName))\r\n\r\n\t\t\t\tfw = open (os.path.join (destinationDir, self.projectName, self.projectName, 'settings.py'), 'a')\r\n\t\t\t\tfw.write ('STATIC_ROOT = os.path.join(BASE_DIR, \"static\")')\r\n\t\t\t\tfw.close ()\r\n\r\n\t\t\t\tos.chdir (os.path.join (destinationDir, self.projectName))\r\n\t\t\t\tos.system ('python manage.py collectstatic')\r\n\t\t\t\tos.system ('python manage.py migrate')\r\n\t\t\t\tos.system ('python manage.py createsuperuser')\r\n\r\n\t\t\t\tprint ('Новая версия Django успешно загружена!')\r\n\t\t\t\treturn True\r\n\t\t\telse:\r\n\t\t\t\tprint ('Ошибка! Не удалось загрузить Django по причине отсутствия Gunicorn')\r\n\t\t\t\treturn False\r\n\t\texcept:\r\n\t\t\tprint ('Ошибка загрузки Django!')\r\n\t\t\treturn False\r\n","sub_path":"lib/tech/django.py","file_name":"django.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478655600","text":"'''\nName: part2.py\nAuthor: Zhengchao Yu \nDate: 10/1/2014\nSection: 4\nE-Mail: zy3@umbc.edu\nAssignment Description: \nProgram takes an input from the user and prints prime or not prime.\n'''\nimport math \n\ndef main():\n\n # Prompt the user to enter a prime number \n primeNumber = int(input(\"Enter a prime number: \"))\n prime = True\n # Set i = primeNumber\n i = primeNumber\n\n # This while loop will determine if the number is prime.\n # Decrement i >= 3\n while i >= 3:\n\n i = i - 1\n # Check to see if remainder == 0 \n if (primeNumber % i) == 0:\n prime = False\n\n # If primeNumber == True, print (is prime) \n if prime == True: \n print(primeNumber,\"is prime\")\n\n # If primeNumber == False, print (is not prime)\n else:\n print(primeNumber, \"is not prime\")\n\nmain()\n","sub_path":"HW03/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"593357628","text":"import random\nimport numpy as np\n\n\n\ndef get_level(s_x,s_y,fake):\n global level,way\n while 1:\n way=[]\n try:\n size=s_x,s_y\n level=np.zeros(size)\n start_x=random.randint(0,s_x-1)\n start_y=random.randint(0,s_y-1)\n level[start_x,start_y]=1\n way.append(str(start_x)+\",\"+str(start_y))\n get_way(s_x,s_y,start_x,start_y,8,0)\n for i in range(fake):\n start_x,start_y=fake_start(s_x,s_y,8)\n #print(start_x,start_y)\n if level[start_x,start_y]!=8:\n level[start_x,start_y]=5\n way.append(str(start_x)+\",\"+str(start_y))\n get_way(s_x,s_y,start_x,start_y,5,1)\n #print(level)\n break\n except:\n get_level(s_x,s_y,fake)\n return level\n\n\ndef get_way(s_x,s_y,start_x,start_y,mark,isfake):\n global level,way\n moved=''\n while 1:\n move=[]\n non=0\n if start_x>=1 and level[start_x-1,start_y]==0:\n if start_x>=2 and level[start_x-2,start_y]!=0:\n non=1\n if start_y==0 and level[start_x-1,start_y+1]==0 and non!=1:\n move.append(\"up\")\n elif start_y==s_y-1 and level[start_x-1,start_y-1]==0 and non!=1:\n move.append(\"up\")\n elif level[start_x-1,start_y+1]==0 and level[start_x-1,start_y-1]==0 and non!=1:\n move.append(\"up\")\n non=0 \n if start_y>=1 and level[start_x,start_y-1]==0:\n if start_y>=2 and level[start_x,start_y-2]!=0:\n non=1\n if start_x==0 and level[start_x+1,start_y-1]==0 and non!=1:\n move.append(\"left\")\n elif start_x==s_x-1 and level[start_x-1,start_y-1]==0 and non!=1:\n move.append(\"left\")\n elif level[start_x-1,start_y-1]==0 and level[start_x+1,start_y-1]==0 and non!=1:\n move.append(\"left\")\n non=0 \n if start_x<(s_x-2) and level[start_x+1,start_y]==0:\n if start_x<(s_x-3) and level[start_x+2,start_y]!=0:\n non=1\n if start_y==0 and level[start_x+1,start_y+1]==0 and non!=1:\n move.append(\"down\")\n elif start_y==s_y-1 and level[start_x+1,start_y-1]==0 and non!=1:\n move.append(\"down\")\n elif level[start_x+1,start_y+1]==0 and level[start_x+1,start_y-1]==0 and non!=1:\n move.append(\"down\")\n non=0 \n if start_y<(s_y-2) and level[start_x,start_y+1]==0:\n if start_y<(s_y-3) and level[start_x,start_y+2]!=0:\n non=1\n if start_x==0 and level[start_x+1,start_y+1]==0 and non!=1:\n move.append(\"right\")\n elif start_x==s_x-1 and level[start_x-1,start_y+1]==0 and non!=1:\n move.append(\"right\")\n elif level[start_x-1,start_y+1]==0 and level[start_x+1,start_y+1]==0 and non!=1:\n move.append(\"right\")\n \n if len(move)!=0:\n go=move[random.randint(0,len(move)-1)]\n if go==\"up\":\n start_x=start_x-1\n elif go==\"down\":\n start_x=start_x+1\n elif go==\"left\":\n start_y=start_y-1\n elif go==\"right\":\n start_y=start_y+1\n level[start_x,start_y]=mark\n way.append(str(start_x)+','+str(start_y))\n go=''\n else:\n if isfake==0:\n level[start_x,start_y]=3\n break\n \ndef fake_start(s_x,s_y,mark):\n while 1: \n fakepoint=way[random.randint(0,len(way)-1)]\n i=int(fakepoint.split(\",\")[0])\n j=int(fakepoint.split(\",\")[1])\n move=[]\n if j<=s_x-2 and level[i,j+1]==0:\n move.append(\"r\")\n if j>=1 and level[i,j-1]==0:\n move.append(\"l\")\n if i<=s_y-2 and level[i+1,j]==0:\n move.append(\"d\")\n if i>=1 and level[i-1,j]==0:\n move.append(\"u\")\n if len(move)!=0:\n move=move[random.randint(0,len(move)-1)]\n if move==\"r\":\n j=j+1\n elif move==\"l\":\n j=j+1\n elif move==\"u\":\n i=i-1\n else:\n i=i+1\n break\n else:\n pass\n return i,j\n\n\n\n#get_level(20,20,1)\n","sub_path":"itlab3/level_map.py","file_name":"level_map.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"463231328","text":"import numpy as np\n\nclass Panel:\n \"\"\"\n Contains panel data structure\n \"\"\"\n def __init__(self, x1, y1, x2, y2):\n \"\"\"\n Sets the panel bound points and compute the panel center and length.\n \n Parameters\n ----------\n x1, y1 : coordinate of the first end point\n x2, y2 : coordinate of the second end point\n \"\"\"\n from math import sqrt\n self.x1 = x1\n self.y1 = y1\n self.x2 = x2\n self.y2 = y2\n self.xc = 0.5*(x1+x2)\n self.yc = 0.5*(y1+y2)\n self.length = sqrt((x2-x1)**2+(y2-y1)**2)\n\n self.vortex_strength = 0.\n self.source_strength = 0.\n\n # Angle between the panel and the (Ox) axis\n self.alpha = np.arctan2((y2-y1),(x2-x1))\n # Angle between the panel normal and the (Ox) axis\n self.beta = self.alpha + np.pi / 2.\n\n def source_influence_analytic(self,x,y,beta):\n \"\"\"\n Compute the influence term at (x,y) for the source panel\n using the analytic expression \n\n Parameters\n ----------\n x,y : numpy array\n coordinate of the colocation points where the influence is computed\n beta : numpy array (of the same size than x and y)\n angle measured from (Ox) of the direction on which the influence coefficient has to be projected\n\n Returns\n ----------\n I : influence parameter computed by integration\n \"\"\"\n\n # global to panel coordinates : rotation by alpha of the reference frame\n xp = np.cos(self.alpha) * (x-self.x1) + np.sin(self.alpha) * (y-self.y1) \n yp = - np.sin(self.alpha) * (x-self.x1) + np.cos(self.alpha) * (y-self.y1) \n local_x1 = 0.\n local_x2 = self.length\n local_y1 = 0.\n local_y2 = 0.\n r1 = np.sqrt( xp**2 + yp**2 ) # local_x1=local_y1=0\n r2 = np.sqrt( (xp-local_x2)**2 + yp**2 ) # local_z2=0\n theta1 = np.arctan2( yp, xp )\n theta2 = np.arctan2( yp, (xp-local_x2) )\n up = np.log(r1/r2) / (2. * np.pi)\n vp = (theta2 - theta1) / (2. * np.pi)\n # panel velocity to global velocity : rotation by alpha of the vector components\n u = np.cos(self.alpha) * up - np.sin(self.alpha) * vp\n v = np.sin(self.alpha) * up + np.cos(self.alpha) * vp\n # Influence = projection along provided beta angle (not self.beta)\n I = u * np.cos(beta) + v * np.sin(beta)\n return I\n\n def compute_source_analytic(self,X,Y):\n \"\"\"\n Create the source panel and compute U,V,PHI,PSI\n\n Parameters\n ----------\n X,Y : numpy array \n coordinate of the field points where the fields are computed\n\n \"\"\"\n from elementary_flows import Distributed_constant_Source\n src=Distributed_constant_Source(self.x1,self.y1,self.x2,self.y2,self.source_strength)\n src.compute_velocity_analytic(X,Y)\n self.source = src\n\n \n def vortex_influence_analytic(self,x,y,beta):\n \"\"\"\n Compute the influence term at (x,y) for the vortex panel\n using the analytic expression \n\n Parameters\n ----------\n x,y : numpy array\n coordinate of the colocation points where the influence is computed\n beta : numpy array (of the same size than x and y)\n angle measured from (Ox) of the direction on which the influence coefficient has to be projected\n\n Returns\n ----------\n I : influence parameter computed by integration\n \"\"\"\n\n # global to panel coordinates : rotation by alpha of the reference frame\n xp = np.cos(self.alpha) * (x-self.x1) + np.sin(self.alpha) * (y-self.y1) \n yp = - np.sin(self.alpha) * (x-self.x1) + np.cos(self.alpha) * (y-self.y1) \n local_x1 = 0.\n local_x2 = self.length\n local_y1 = 0.\n local_y2 = 0.\n r1 = np.sqrt( xp**2 + yp**2 ) # local_x1=local_y1=0\n r2 = np.sqrt( (xp-local_x2)**2 + yp**2 ) # local_z2=0\n theta1 = np.arctan2( yp, xp )\n theta2 = np.arctan2( yp, (xp-local_x2) )\n up = (theta2 - theta1) / (2. * np.pi)\n vp = -np.log(r1/r2) / (2. * np.pi)\n # panel velocity to global velocity : rotation by alpha of the vector components\n u = np.cos(self.alpha) * up - np.sin(self.alpha) * vp\n v = np.sin(self.alpha) * up + np.cos(self.alpha) * vp\n # Influence = projection along provided beta angle (not self.beta)\n I = u * np.cos(beta) + v * np.sin(beta)\n return I\n \n def compute_vortex_analytic(self,X,Y):\n \"\"\"\n Create the source panel and compute U,V,PHI,PSI\n\n Parameters\n ----------\n X,Y : numpy array \n coordinate of the field points where the fields are computed\n\n \"\"\"\n from elementary_flows import Distributed_constant_Vortex\n vrt=Distributed_constant_Vortex(self.x1,self.y1,self.x2,self.y2,self.vortex_strength)\n vrt.compute_velocity_analytic(X,Y)\n self.vortex = vrt\n\nclass NACA_Airfoil:\n \"\"\"\n Contain NACA airfoil parameters and associated functions to compute the form\n \"\"\"\n def __init__(self, number, chord,sharp=True):\n \"\"\"\n Sets the airfoil parameter.\n \n Parameters\n ----------\n number: 4 digit string \n NACA number\n chord : float\n chord of the airfoil\n sharp: boolean\n sharp or blunt TE (default sharp=True)\n\n \"\"\"\n self.number = number\n self.camber_max = int(number[0])/100.\n self.camber_pos = int(number[1])/10.\n self.thickness = int(number[2:4])/100.\n self.chord = chord\n self.coeff = np.array((0.2966,-0.1260,-0.3516,0.2843,-0.1015))\n self.expo = np.array((0.5,1,2,3,4))\n if sharp:\n self.coeff[-1]=-0.1033\n\n def half_thickness(self,x):\n \"\"\"\n Compute the half thickness of symmetric NACA \n \n Parameters\n ----------\n x: coordinates along the chord\n \"\"\" \n xc = x / self.chord\n def yt(xc):\n y_t = np.zeros_like(xc)\n for ic in range(5):\n y_t += self.coeff[ic] * xc**(self.expo[ic])\n y_t *= self.thickness / 0.2\n return y_t\n\n self.shape = yt(xc) * self.chord\n\n\n def camber(self,x):\n \"\"\"\n Compute the mean camber line of NACA 4 digits\n \n Parameters\n ----------\n x: coordinates along the chord\n \"\"\" \n xc = x / self.chord\n\n def yc(xc):\n y_c = np.zeros_like(xc)\n xc_up = xc < self.camber_pos\n xc_dn = xc >= self.camber_pos\n if np.any(xc_up):\n y_c[xc_up] = self.camber_max / self.camber_pos**2 * (2.*self.camber_pos*xc[xc_up]-xc[xc_up]**2)\n y_c[xc_dn] = self.camber_max / (1.-self.camber_pos)**2 * (1.-2.*self.camber_pos + 2.*self.camber_pos*xc[xc_dn]-xc[xc_dn]**2)\n \n return y_c\n\n self.mean_camber_line = yc(xc) * self.chord\n\n def deriv_camber(self,x):\n \"\"\"\n Compute the mean camber line derivative of NACA 4 digits\n \n Parameters\n ----------\n x: coordinates along the chord\n \"\"\" \n xc = x / self.chord\n\n def dycdx(xc):\n dy_c = np.zeros_like(xc)\n xc_up = xc < self.camber_pos\n xc_dn = xc >= self.camber_pos\n if np.any(xc_up):\n dy_c[xc_up] = 2. * self.camber_max / self.camber_pos**2 * (self.camber_pos-xc[xc_up])\n dy_c[xc_dn] = 2.*self.camber_max / (1.-self.camber_pos)**2 * (self.camber_pos -xc[xc_dn])\n \n return dy_c\n\n self.deriv_camber_fct = dycdx(xc)\n\n def profile_side_shape(self,x):\n \"\"\"\n Compute the suction side of NACA 4 digits\n \n Parameters\n ----------\n x: coordinates along the chord\n\n Returns\n -------\n xU: coordinates along the x-axis of the upper edge of the profile\n yU: coordinates along the y-axis of the upper edge of the profile\n xL: coordinates along the x-axis of the lower edge of the profile\n yL: coordinates along the y-axis of the lower edge of the profile\n \"\"\"\n self.half_thickness(x)\n if self.camber_max>0:\n self.camber(x)\n self.deriv_camber(x)\n theta = np.arctan(self.deriv_camber_fct)\n xU = x - self.shape * np.sin(theta)\n yU = self.mean_camber_line + self.shape * np.cos(theta)\n xL = x + self.shape * np.sin(theta)\n yL = self.mean_camber_line - self.shape * np.cos(theta)\n else:\n xU = x\n yU = self.shape\n xL = x\n yL = - self.shape\n \n return xU,yU,xL,yL\n\n","sub_path":"12-notebook_source_vortex_panels/panel_struct.py","file_name":"panel_struct.py","file_ext":"py","file_size_in_byte":8853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"631090890","text":"from ftw.builder import Builder\nfrom ftw.builder import create\nfrom ftw.bumblebee.interfaces import IBumblebeeDocument\nfrom ftw.bumblebee.tests.helpers import asset as bumblebee_asset\nfrom ftw.bumblebee.tests.helpers import DOCX_CHECKSUM\nfrom ftw.bumblebee.tests.helpers import get_queue\nfrom ftw.testbrowser import browsing\nfrom opengever.core.testing import OPENGEVER_FUNCTIONAL_BUMBLEBEE_LAYER\nfrom opengever.document.interfaces import ICheckinCheckoutManager\nfrom opengever.dossier.interfaces import ITemplateDossierProperties\nfrom opengever.testing import assets\nfrom opengever.testing import FunctionalTestCase\nfrom opengever.testing import obj2brain\nfrom opengever.testing.helpers import create_document_version\nfrom plone import api\nfrom plone.namedfile.file import NamedBlobFile\nfrom plone.rfc822.interfaces import IPrimaryFieldInfo\nfrom plone.uuid.interfaces import IUUID\nfrom zope.component import getMultiAdapter\nfrom zope.event import notify\nfrom zope.lifecycleevent import ObjectModifiedEvent\n\n\nclass TestBumblebeeIntegrationWithDisabledFeature(FunctionalTestCase):\n \"\"\"Test integration of ftw.bumblebee.\"\"\"\n\n def setUp(self):\n super(TestBumblebeeIntegrationWithDisabledFeature, self).setUp()\n\n self.document = create(Builder('document')\n .attach_file_containing(\n bumblebee_asset('example.docx').bytes(),\n u'example.docx'))\n\n def test_bumblebee_checksum_is_calculated_for_opengever_docs(self):\n self.assertEquals(\n DOCX_CHECKSUM,\n IBumblebeeDocument(self.document).get_checksum())\n\n def test_opengever_documents_have_a_primary_field(self):\n fieldinfo = IPrimaryFieldInfo(self.document)\n self.assertEqual('file', fieldinfo.fieldname)\n\n @browsing\n def test_document_preview_is_hidden(self, browser):\n dossier = create(Builder('dossier'))\n document = create(Builder('document').within(dossier))\n\n browser.login().visit(document, view='tabbedview_view-overview')\n\n self.assertEqual(0, len(browser.css('.documentPreview')))\n\n\nclass TestBumblebeeIntegrationWithEnabledFeature(FunctionalTestCase):\n\n maxDiff = None\n layer = OPENGEVER_FUNCTIONAL_BUMBLEBEE_LAYER\n\n @browsing\n def test_document_preview_is_visible(self, browser):\n dossier = create(Builder('dossier'))\n document = create(Builder('document').within(dossier))\n\n browser.login().visit(document, view='tabbedview_view-overview')\n\n self.assertEqual(1, len(browser.css('.documentPreview')))\n\n @browsing\n def test_link_previews_to_bumblebee_overlay_document(self, browser):\n dossier = create(Builder('dossier'))\n document = create(Builder('document').within(dossier))\n\n create(Builder('document').within(dossier))\n\n browser.login().visit(document, view=\"tabbedview_view-overview\")\n\n preview_element = browser.css('.documentPreview .showroom-item')\n self.assertEqual(\n 'http://nohost/plone/dossier-1/document-1/@@bumblebee-overlay-document',\n preview_element.first.get('data-showroom-target'))\n\n def test_does_not_queue_bumblebee_storing_if_not_digitally_available(self):\n create(Builder('document'))\n queue = get_queue()\n self.assertEquals(0, len(queue), 'Expected no job in the queue.')\n\n def test_prevents_checked_out_document_checksum_update(self):\n document = create(Builder('document')\n .attach_file_containing(\n bumblebee_asset('example.docx').bytes(),\n u'example.docx')\n .checked_out())\n\n self.assertEquals(\n DOCX_CHECKSUM,\n IBumblebeeDocument(document).get_checksum())\n\n document.update_file(filename=u'foo.txt',\n content_type='text/plain',\n data='foo')\n notify(ObjectModifiedEvent(document))\n\n self.assertEquals(\n DOCX_CHECKSUM,\n IBumblebeeDocument(document).get_checksum())\n\n def test_queues_bumblebee_storing_after_document_checkin(self):\n dossier = create(Builder('dossier'))\n document = create(Builder('document')\n .within(dossier)\n .attach_file_containing(\n 'foo',\n u'example.docx')\n .checked_out())\n queue = get_queue()\n queue.reset()\n\n document.update_file(filename=u'example.docx',\n content_type='text/plain',\n data=bumblebee_asset('example.docx').bytes())\n manager = getMultiAdapter((document, self.portal.REQUEST),\n ICheckinCheckoutManager)\n manager.checkin()\n\n self.assertEquals(1, len(queue), 'Expected 1 job in the queue.')\n job, = queue.queue\n\n self.assertDictEqual(\n {'application': 'local',\n 'file_url': ('http://nohost/plone/bumblebee_download' +\n '?checksum={}'.format(DOCX_CHECKSUM) +\n '&uuid={}'.format(IUUID(document))),\n 'salt': IUUID(document),\n 'checksum': DOCX_CHECKSUM,\n 'deferred': False,\n 'url': '/plone/dossier-1/document-1/bumblebee_trigger_storing'},\n job)\n\n def test_queues_bumblebee_storing_after_revert_to_previous_version(self):\n dossier = create(Builder('dossier'))\n document = create(Builder('document')\n .within(dossier)\n .attach_file_containing(\n 'foo', u'example.docx'))\n create_document_version(document, 1,\n data=bumblebee_asset('example.docx').bytes())\n create_document_version(document, 2)\n queue = get_queue()\n queue.reset()\n\n manager = getMultiAdapter((document, self.portal.REQUEST),\n ICheckinCheckoutManager)\n manager.revert_to_version(1)\n\n self.assertEquals(1, len(queue), 'Expected 1 job in the queue.')\n job, = queue.queue\n\n self.assertDictEqual(\n {'application': 'local',\n 'file_url': ('http://nohost/plone/bumblebee_download' +\n '?checksum={}'.format(DOCX_CHECKSUM) +\n '&uuid={}'.format(IUUID(document))),\n 'salt': IUUID(document),\n 'checksum': DOCX_CHECKSUM,\n 'deferred': False,\n 'url': '/plone/dossier-1/document-1/bumblebee_trigger_storing'},\n job)\n\n @browsing\n def test_updates_checksum_and_queue_storing_after_docproperty_update(self, browser):\n api.portal.set_registry_record('create_doc_properties',\n interface=ITemplateDossierProperties,\n value=True)\n\n dossier = create(Builder('dossier'))\n document = create(Builder('document')\n .within(dossier)\n .checked_out()\n .with_asset_file('with_gever_properties.docx'))\n\n # update\n document.file = NamedBlobFile(\n data=assets.load(u'with_gever_properties_update.docx'),\n filename=u'with_gever_properties.docx')\n checksum_before = IBumblebeeDocument(document).update_checksum()\n\n browser.login().open(document)\n browser.click_on('without comment')\n\n self.assertNotEqual(\n checksum_before, IBumblebeeDocument(document).get_checksum(),\n 'Document checksum not updated after docproperties update.')\n self.assertNotEqual(\n checksum_before, obj2brain(document).bumblebee_checksum,\n 'Document checksum not updated after docproperties update.')\n","sub_path":"opengever/document/tests/test_bumblebee.py","file_name":"test_bumblebee.py","file_ext":"py","file_size_in_byte":7969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"247157849","text":"#-*- encoding:utf-8 -*-\n#filename: regex_note.py\n\nfrom mytools import newline,endline,newpart,line\n\n#Simple patterns\nnewline('Simple patterns')\nline('Matching Characters')\nprint(\n'''\nmetacharacters:\n . ^ $ * + ? { } [ ] \\ | ( )\n []:指定字符类,其他元字符在字符类中不会被激活,只是普通字符\n [^]:指定匹配不包含在字符类中的字符\n \\: 转义字符,以及特殊字符序列\n \\d: 匹配任意十进制数字字符,等同于[0-9]\n \\D: 匹配任意非数字字符,等同于[^0-9]\n \\s: 匹配任意空白字符,等同于[ \\t\\n\\r\\f\\v]\n \\S: 匹配任意非空白字符,等同于[^ \\t\\n\\r\\f\\v]\n \\w: 匹配任意数字字母字符,等同于[a-zA-Z0-9_]\n \\W: 匹配任意非数字字母字符,等同于[^a-zA-Z0-9_]\n 字符序列可以包含在字符类中[]\n .: 匹配出newline之外的任意字符\n'''\n)\n\nline('Repeating Things')\nprint(\n'''\n *: 匹配该字符之前字符重复出现 0次或更多\n +:匹配该字符之前字符重复出现 1次或更多\n ?: 匹配该字符之前字符重复出现 0 或1 次\n {m,n}: 匹配之前字符重复出现 m 到n 次\n'''\n)\nendline('Simple patterns')\n#------------------------------\n\n\nnewpart()\n\n#Using Regular Expresions\nnewline('Using Regex')\nline('Compiling Regex')\nimport re\np=re.compile('ab*')\nprint(p)\n\nline('Compiling Regex with options flags')\np=re.compile('ab*',re.IGNORECASE)\nprint(p)\n\nline('Performing Matches')\nprint(\n'''\n match():\n search()\n findall()\n finditer()\n'''\n)\n\n\nm=p.match(\"abB\")\nprint(m)\n\nline('Match Objects')\nprint(\n'''\ngroup()\nstart()\nend()\nspan()\n'''\n)\nprint( m.group())\nprint('start',m.start())\nprint('end',m.end())\nprint('span ',m.span())\n\np=re.compile('\\d+')\ns='12 drummers drumming, 11 pipers piping, 10 lords a-leaping'\nm=p.match(s)\nprint(m)\nprint(p.findall(s))\nitr=p.finditer(s)\nfor i in itr: print(i)\n\nline('Module-level Functions')\nprint( re.match(r'\\d+',s))\n\n\nline('Compilation Flags')\nprint(\n'''\nASCII,A: 使得几个转义如\\w,\\b,\\s 和 \\d只匹配ASCII字符\nDOTALL,S: 使得.匹配任何字符\nIGNORECASE,I: 忽略大小写敏感匹配\nLOCALE,L: 使用本地敏感匹配\nMULTILINE,M: 多行匹配,影响^ 和 $\nVERBOSE,X: 让健壮RE生效,使得RE更加整洁移动\n'''\n)\nendline('Using Regex')\n#------------------------------\n\n\n#More Pattern Power\nnewline('More Pattern Power')\nline('More Metacharacters')\nprint(\n'''\n |: 选择 or\n ^: 匹配行的开始\n $: 匹配行的结束\n \\A:只匹配字符串的开始\n \\Z: 只匹配字符串的结尾\n \\b: 匹配单词边界\n \\B: 只匹配当前位置不是单词边界\n'''\n)\n\n\nline('Grouping')\ns='''\nFrom: author@example.com\nUser-Agent: Thunderbird 1.5.0.9 (X11/20061227)\nMIME-Version: 1.0\nTo: editoer@example.com\n'''\np=re.compile('(a)b')\nm=p.match('ab')\nprint( m.groups())\nprint( m.group(1))\n\nline('Non-capturing and Named Groups')\n'''\n(?:patterns):Non capturing\n(?P): Name Groups\n(?P=name): backreferences in Named groups to replace \\num\n'''\n\n\nline('Lookahead Assertions')\n'''\n(?=...): Positive lookahead assertion\n\n(?!...): Negative lookahead assertion\n.*[.](?!bat$|exe$).*$\n'''\nendline('More Pattern Power')\n#------------------------------\n\n\n\nnewpart()\n\n\n#Modifying Strings\nnewline('Modifying Strings')\n\nline('Splitting Strings')\n'''\nsplit()\n.split(string[,maxsplit=0])\n'''\np=re.compile(r'\\s')\nprint(p.split(s,5))\n\nline('Search and Replace')\n'''\n.sub(replacement,string[,count=0])\n.subn(replacement,string[,count=0])\n'''\np=re.compile('(blue|white|red)')\nst='blue socks and red shoes'\nprint( p.sub('color',st))\n\n\nendline('Modifying Strings')\n#------------------------------\n\n\n\n\nnewpart()\n\n\n#Common Problems\nnewline('Common Problems')\n\nline('Using String Methods')\nts='swordfish'\nns=ts.replace('word','deed')\nprint(ns)\n\nline('Match vs search')\n'''\nmatch: only check if the RE matches at the begining of the string\nsearch: will scan forward through the string for a match\n'''\nprint( re.match('super','superstition').span())\nprint( re.match('super','insuperale'))\n\nprint( re.search('super','superstition').span())\nprint( re.search('super','insuperable').span())\n\n\nline('Greedy vs Non-Greedy')\ns='Title'\nprint( len(s))\nline('Greedy')\nprint( re.match('<.*>',s).span())\nprint( re.match('<.*>',s).group())\n\nline('Non-Greedy')\nprint( re.match('<.*?>',s).span())\nprint( re.match('<.*?>',s).group())\n\n\nline('Using re.VERBOSE')\n'''\nthis option could make re patterns more readable\n'''\n\nendline('Common Problems')\n#------------------------------\n","sub_path":"python/regex/regex_not.py","file_name":"regex_not.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"508007963","text":"import os\r\nimport sys\r\nfrom time import time\r\n\r\nversion = sys.version_info[0]\r\n\r\n#compatibility\r\ntry:\r\n if version < 3:\r\n from Tkinter import *\r\n from tkFileDialog import askopenfilename, asksaveasfilename\r\n from tkMessageBox import showerror, showinfo\r\n else:\r\n from tkinter import *\r\n from tkinter.filedialog import askopenfilename, asksaveasfilename\r\n from tkinter.messagebox import showerror, showinfo\r\nexcept ImportError:\r\n print(\"The tkinter (GUI) library couldn't be found on your system. Tkinter usually ships with Python, but Debian, RedHat, Fedora and Ubuntu provide a package for it.\\n\" +\r\n \"If you see this message, please install tkinter for Python %i.%i.%i.\\n\" % sys.version_info[:3] +\r\n \"More info can be found at: http://tkinter.unpythonic.net/wiki/How_to_install_Tkinter\\n\" +\r\n \"Alternatively, you can run the assembler without GUI with run_nogui.sh or run_nogui.bat\")\r\n exit()\r\n \r\nfrom assembler import AssemblyParser, to_vhdl\r\n\r\ndef mtime():\r\n return int(time() * 1000)\r\n\r\nclass Application(Frame):\r\n def __init__(self, master=None):\r\n Frame.__init__(self, master, width = 350, height = 100, bg = \"white\")\r\n master.minsize(350, 100)\r\n master.title(\"Delta I Assembler\")\r\n if os.name == \"nt\":\r\n master.wm_iconbitmap(bitmap = \"tudelft.ico\")\r\n else:\r\n master.wm_iconbitmap(bitmap = \"@tudelft.xbm\")\r\n master.resizable(0,0)\r\n self.pack_propagate(0) \r\n self.pack(side = \"top\", fill = \"both\", expand = True)\r\n self.create_widgets()\r\n \r\n def asm_browse(self):\r\n asm_fname = askopenfilename(filetypes=[(\"Assembly file\", \"*.asm\"), (\"All files\",\"*.*\")])\r\n if len(asm_fname) < 1: return\r\n self.asm_input.delete(0, END)\r\n self.asm_input.insert(0, asm_fname)\r\n self.asm_input.xview(len(asm_fname)- 1)\r\n\r\n def vhd_browse(self):\r\n vhd_fname = asksaveasfilename(defaultextension=\".vhd\", filetypes=[(\"VHD file\", \"*.vhd\"), (\"All files\",\"*.*\")])\r\n if len(vhd_fname) < 1: return\r\n self.vhd_input.delete(0, END)\r\n self.vhd_input.insert(0, vhd_fname)\r\n self.vhd_input.xview(len(vhd_fname)- 1)\r\n\r\n def convert(self):\r\n input_fname = self.asm_input.get()\r\n output_fname = self.vhd_input.get()\r\n if len(input_fname) < 1:\r\n showerror(\"Delta I Assembler\", \"No input file specified\")\r\n return\r\n if len(output_fname) < 1:\r\n showerror(\"Delta I Assembler\", \"No output file specified\")\r\n return\r\n try:\r\n f = open(input_fname)\r\n contents = f.read()\r\n f.close()\r\n begin = mtime()\r\n parser = AssemblyParser(contents)\r\n instructions = parser.get_instructions()\r\n script = to_vhdl(instructions, True, parser.codelines)\r\n delta = mtime() - begin\r\n f = open(output_fname, \"w\")\r\n f.write(script)\r\n f.close()\r\n showinfo(\"Delta I Assembler\", \"VHD file written to '%s'\\nIt took %i milliseconds to translate.\" % (output_fname, delta))\r\n except Exception as ex:\r\n showerror(\"Delta I Assembler\", str(ex))\r\n \r\n def create_widgets(self):\r\n self.title_label = Label(self, text = \"Translate Delta I assembly\", font = (\"Helvetica\", 15), bg = \"white\")#, font=font.Font(family=\"Helvetica\", size=12))\r\n self.title_label.grid(row = 0, column = 0, padx = 5, pady = 5, sticky = NW)\r\n \r\n self.form = Frame(self, bg = \"white\")\r\n self.form.grid(row = 1, column = 0, padx = 7, pady = 10)\r\n \r\n self.asm_label = Label(self.form, text = \"Input ASM file:\", font = (\"Helvetica\", 10), bg = \"white\")\r\n self.asm_label.grid(row = 0, column = 0, sticky = NW)\r\n self.asm_input = Entry(self.form)\r\n self.asm_input.grid(row = 0, column = 1)\r\n self.asm_input_select = Button(self.form, text = \"Browse\", width = 10, command = self.asm_browse, bg = \"white\")\r\n self.asm_input_select.grid(row = 0, column = 2, padx = 10)\r\n \r\n self.vhd_label = Label(self.form, text = \"Output VHD file:\", font = (\"Helvetica\", 10), bg = \"white\")\r\n self.vhd_label.grid(row = 1, column = 0, sticky = NW, pady = 3)\r\n self.vhd_input = Entry(self.form)\r\n self.vhd_input.grid(row = 1, column = 1, padx = 10)\r\n self.vhd_input_select = Button(self.form, text = \"Browse\", width = 10, command = self.vhd_browse, bg = \"white\")\r\n self.vhd_input_select.grid(row = 1, column = 2, padx = 10)\r\n \r\n self.start_button = Button(self.form, text = \"Translate\", bg = \"white\", command = self.convert)\r\n self.start_button.grid(row = 2, column = 1, columnspan = 2, padx = 10, pady= 20, sticky = N + E + S + W)\r\n\r\n \r\nroot = Tk()\r\napp = Application(master=root)\r\napp.mainloop()\r\ntry: root.destroy()\r\nexcept: pass","sub_path":"assembler/assembler_gui.py","file_name":"assembler_gui.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"257364667","text":"from django.shortcuts import get_object_or_404\nfrom django.contrib import messages\n\nfrom catalog.models import ProductSize, ProductStock\nfrom catalog.forms import QuickAddToCart\nfrom cart import cart_funcs\n\n# [small, medium, large, etc.]\nSIZES = ProductSize.get_sizes()\n\n\ndef manage_quick_add_to_cart(request, category):\n \"\"\"\n Called only from catalog.views.ShowProductsByCategory.__call__ or catalog.views.ShowProductsByCreator.__call__.\n 1. Manages the product layout view (grid or list)\n 2. Manages if the user clicks on the `remove` button inside the cart box\n 3. Manages if the user clicks on the `ADD TO CART` button appeared in each product\n :param request: The user's request\n :param category: instance\n :return: None or str (`success` or `warning`)\n \"\"\"\n # copy POST data from the form\n post_data = request.POST.copy()\n\n if post_data.get('product_layout'):\n request.session['product_layout'] = request.POST.get('product_layout')\n return None\n\n elif post_data.get('submit') == 'Remove':\n cart_funcs.remove_from_cart(request)\n return None\n\n elif post_data.get('quick_add_to_cart_form') == 'Add To Cart':\n product_slug = post_data.get('product_slug')\n product_size = post_data.get('product_size', None)\n if product_size not in SIZES:\n product_size = None\n\n if product_size:\n prod_stock = get_object_or_404(ProductStock, product__slug=product_slug, size__name_en=product_size.title())\n else:\n prod_stock = get_object_or_404(ProductStock, product__slug=product_slug)\n cart_form = QuickAddToCart(request, post_data, chosen_prod_stock=prod_stock)\n\n if cart_form.is_valid():\n msg, type_msg = cart_funcs.quick_add_to_cart(request, prod_stock, category)\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n if type_msg == 'success':\n messages.success(request, msg, extra_tags='redirect_only')\n return 'success'\n elif type_msg == 'warning':\n messages.warning(request, msg, extra_tags='redirect_only')\n return 'warning'","sub_path":"catalog/quick_add_to_cart.py","file_name":"quick_add_to_cart.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"372792404","text":"from js9 import j\n\ndef init_actions_(service, args):\n dependencies = {\n 'enable': ['install'],\n 'execute_routeros_script': ['install'],\n 'disable': ['install']\n }\n return dependencies\n\n\ndef input(job):\n service = job.service\n if job.model.args.get('location', \"\") == \"\":\n raise j.exceptions.Input(\"Location argument cannot be empty. Cannot continue input of %s\" % service)\n\n\ndef init(job):\n service = job.service\n if 'g8client' not in service.producers:\n raise j.exceptions.AYSNotFound(\"No producer g8client found. Cannot continue init of %s\" % service)\n g8client = service.producers[\"g8client\"][0]\n\n users = service.model.data.uservdc\n for user in users:\n uservdc = service.aysrepo.serviceGet('uservdc', user.name)\n service.consume(uservdc)\n\n accountservice = None\n if service.model.data.account == \"\":\n service.model.data.account = g8client.model.data.account\n acc = service.model.data.account\n # get the service if it exists or create it\n # search for that acc.\n try:\n accountservice = service.aysrepo.serviceGet(\"account\", acc)\n except:\n accargs = {'g8client': g8client.name}\n accountactor = service.aysrepo.actorGet(\"account\")\n accountservice = accountactor.serviceCreate(g8client.model.data.account, accargs)\n accountservice.saveAll()\n service.consume(accountservice)\n\n service.saveAll()\n\n\ndef authorization_user(space, service):\n authorized_users = space.authorized_users\n userslist = service.producers.get('uservdc', [])\n user_exists = True\n users = []\n for u in userslist:\n if u.model.data.provider != '':\n users.append(u.model.dbobj.name + \"@\" + u.model.data.provider)\n else:\n users.append(u.model.dbobj.name)\n\n # Authorize users\n for user in users:\n if user not in authorized_users:\n user_exists = False\n for uvdc in service.model.data.uservdc:\n if uvdc.name == user.split('@')[0]:\n if user_exists:\n space.update_access(username=user, right=uvdc.accesstype)\n else:\n space.authorize_user(username=user, right=uvdc.accesstype)\n\n # Unauthorize users not in the schema\n for user in authorized_users:\n if user not in users:\n space.unauthorize_user(username=user)\n\n\ndef install(job):\n import time\n service = job.service\n if 'g8client' not in service.producers:\n raise j.exceptions.AYSNotFound(\"No producer g8client found. Cannot continue install of %s\" % service)\n g8client = service.producers[\"g8client\"][0]\n config_instance = \"{}_{}\".format(g8client.aysrepo.name, g8client.model.data.instance)\n cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path=\"/root/.ssh/ays_repos_key\")\n acc = cl.account_get(service.model.data.account)\n\n # Set limits\n # if space does not exist, it will create it\n externalnetworkId = service.model.data.externalNetworkID\n externalnetworkId = None if externalnetworkId == -1 else externalnetworkId\n space = acc.space_get(name=service.model.dbobj.name,\n location=service.model.data.location,\n create=True,\n maxMemoryCapacity=service.model.data.maxMemoryCapacity,\n maxVDiskCapacity=service.model.data.maxDiskCapacity,\n maxCPUCapacity=service.model.data.maxCPUCapacity,\n maxNumPublicIP=service.model.data.maxNumPublicIP,\n maxNetworkPeerTransfer=service.model.data.maxNetworkPeerTransfer,\n externalnetworkId=externalnetworkId\n )\n\n # add space ID to data\n service.model.data.cloudspaceID = space.model['id']\n service.model.save()\n authorization_user(space, service)\n\n # update capacity incase cloudspace already existed update it\n space.model['maxMemoryCapacity'] = service.model.data.maxMemoryCapacity\n space.model['maxVDiskCapacity'] = service.model.data.maxDiskCapacity\n space.model['maxNumPublicIP'] = service.model.data.maxNumPublicIP\n space.model['maxCPUCapacity'] = service.model.data.maxCPUCapacity\n space.model['maxNetworkPeerTransfer'] = service.model.data.maxNetworkPeerTransfer\n space.save()\n\n status = space.model['status']\n timeout_limit = time.time() + 60\n while time.time() < timeout_limit:\n if status == 'DEPLOYED':\n break\n time.sleep(5)\n status = cl.api.cloudapi.cloudspaces.get(cloudspaceId=service.model.data.cloudspaceID)['status']\n else:\n raise j.exceptions.Timeout(\"VDC not yet deployed\")\n\n\ndef get_user_accessright(username, service):\n for u in service.model.data.uservdc:\n if u.name == username:\n return u.accesstype\n\n\ndef processChange(job):\n service = job.service\n\n args = job.model.args\n category = args.pop('changeCategory')\n\n if 'g8client' not in service.producers:\n raise j.exceptions.AYSNotFound(\"No producer g8client found. Cannot continue processChange of %s\" % service)\n g8client = service.producers[\"g8client\"][0]\n\n config_instance = \"{}_{}\".format(g8client.aysrepo.name, g8client.model.data.instance)\n cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path=\"/root/.ssh/ays_repos_key\")\n acc = cl.account_get(service.model.data.account)\n\n # Get given space, raise error if not found\n space = acc.space_get(name=service.model.dbobj.name,\n location=service.model.data.location,\n create=False)\n if category == \"dataschema\" and service.model.actionsState['install'] == 'ok':\n for key, value in args.items():\n if key == 'uservdc':\n # value is a list of (uservdc)\n if not isinstance(value, list):\n raise j.exceptions.Input(message=\"%s should be a list\" % key)\n if 'uservdc' in service.producers:\n for s in service.producers['uservdc']:\n if not any(v['name'] == s.name for v in value):\n service.model.producerRemove(s)\n for v in value:\n accessRight = v.get('accesstype', '')\n if v['name'] == s.name and accessRight != get_user_accessright(s.name, service):\n name = s.name + '@' + s.model.data.provider if s.model.data.provider else s.name\n space.update_access(name, accessRight)\n for v in value:\n userservice = service.aysrepo.serviceGet('uservdc', v['name'])\n if userservice not in service.producers.get('uservdc', []):\n service.consume(userservice)\n elif key == 'location' and service.model.data.location != value:\n raise RuntimeError(\"Cannot change attribute location\")\n setattr(service.model.data, key, value)\n\n authorization_user(space, service)\n\n # update capacity incase cloudspace already existed update it\n space.model['maxMemoryCapacity'] = service.model.data.maxMemoryCapacity\n space.model['maxVDiskCapacity'] = service.model.data.maxDiskCapacity\n space.model['maxNumPublicIP'] = service.model.data.maxNumPublicIP\n space.model['maxCPUCapacity'] = service.model.data.maxCPUCapacity\n space.save()\n\n service.save()\n\n\ndef uninstall(job):\n service = job.service\n if 'g8client' not in service.producers:\n raise j.exceptions.AYSNotFound(\"No producer g8client found. Cannot continue uninstall of %s\" % service)\n\n g8client = service.producers[\"g8client\"][0]\n config_instance = \"{}_{}\".format(g8client.aysrepo.name, g8client.model.data.instance)\n cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path=\"/root/.ssh/ays_repos_key\")\n acc = cl.account_get(service.model.data.account)\n space = acc.space_get(service.model.dbobj.name, service.model.data.location)\n space.delete()\n\ndef enable(job):\n \"\"\"\n This action will enable the vdc.\n \"\"\"\n service = job.service\n\n if 'g8client' not in service.producers:\n raise j.exceptions.AYSNotFound(\"No producer g8client found. Cannot continue enabling %s\" % service)\n\n g8client = service.producers[\"g8client\"][0]\n config_instance = \"{}_{}\".format(g8client.aysrepo.name, g8client.model.data.instance)\n cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path=\"/root/.ssh/ays_repos_key\")\n acc = cl.account_get(service.model.data.account)\n # Get space, raise error if not found\n space = acc.space_get(name=service.model.dbobj.name,\n location=service.model.data.location,\n create=False)\n space.enable('The space should be enabled.')\n service.model.data.disabled = False\n service.saveAll()\n\ndef disable(job):\n \"\"\"\n This action will disable the vdc.\n \"\"\"\n service = job.service\n\n if 'g8client' not in service.producers:\n raise j.exceptions.AYSNotFound(\"No producer g8client found. Cannot continue disabling %s\" % service)\n\n g8client = service.producers[\"g8client\"][0]\n config_instance = \"{}_{}\".format(g8client.aysrepo.name, g8client.model.data.instance)\n cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path=\"/root/.ssh/ays_repos_key\")\n acc = cl.account_get(service.model.data.account)\n # Get space, raise error if not found\n space = acc.space_get(name=service.model.dbobj.name,\n location=service.model.data.location,\n create=False)\n space.disable('The space should be disabled.')\n service.model.data.disabled = True\n service.saveAll()\n\n\ndef execute_routeros_script(job):\n service = job.service\n if 'g8client' not in service.producers:\n raise j.exceptions.AYSNotFound(\"No producer g8client found. Cannot continue executing of %s\" % service)\n script = service.model.data.script\n if not script:\n raise j.exceptions.AYSNotFound(\"Param script can't be empty. Cannot continue executing of %s\" % service)\n script.replace(\"\\n\", \";\")\n g8client = service.producers[\"g8client\"][0]\n config_instance = \"{}_{}\".format(g8client.aysrepo.name, g8client.model.data.instance)\n cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path=\"/root/.ssh/ays_repos_key\")\n acc = cl.account_get(service.model.data.account)\n space = acc.space_get(service.model.dbobj.name, service.model.data.location)\n space.execute_routeros_script(script)\n","sub_path":"templates/vdc/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":10850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"236457061","text":"# -*- coding: utf-8 -*-\n\nfrom modularodm.fields.foreign import BaseForeignField\nfrom .lists import AbstractForeignList\n\n\nclass AbstractForeignField(BaseForeignField):\n\n _list_class = AbstractForeignList\n _is_foreign = True\n _uniform_translator = False\n\n def __init__(self, *args, **kwargs):\n super(AbstractForeignField, self).__init__(*args, **kwargs)\n self._backref_field_name = kwargs.get('backref', None)\n self._is_foreign = True\n self._is_abstract = True\n\n def get_schema_class(self, schema):\n return self._schema_class.get_collection(schema)\n\n def get_primary_field(self, schema):\n schema_class = self.get_schema_class(schema)\n return schema_class._fields[schema_class._primary_name]\n\n def get_foreign_object(self, value):\n return self.get_schema_class(value[1])\\\n .load(value[0])\n\n def to_storage(self, value, translator=None):\n\n if value is None:\n return None\n if not hasattr(value, '__iter__'):\n value = (value._primary_key, value._name)\n return (\n self.get_primary_field(value[1])\\\n .to_storage(value[0], translator),\n value[1]\n )\n\n def from_storage(self, value, translator=None):\n\n if value is None:\n return None\n return (\n self.get_primary_field(value[1])\\\n .from_storage(value[0], translator),\n value[1]\n )\n\n def _to_primary_key(self, value):\n\n if value is None:\n return None\n if hasattr(value, '_primary_key'):\n return value._primary_key\n\n def __set__(self, instance, value, safe=False, literal=False):\n if hasattr(value, '_primary_key'):\n value = (\n value._primary_key,\n value._name\n )\n elif isinstance(value, tuple) or isinstance(value, list):\n if len(value) != 2:\n raise ValueError('Value must have length 2')\n elif value is not None:\n raise TypeError('Value must be StoredObject, tuple, or None')\n super(AbstractForeignField, self).__set__(\n instance, value, safe=safe, literal=literal\n )\n\n def __get__(self, instance, owner, check_dirty=True):\n value = super(AbstractForeignField, self).__get__(\n instance, None, check_dirty\n )\n if value is None:\n return None\n return self.get_foreign_object(value)\n","sub_path":"modularodm/fields/abstractforeignfield.py","file_name":"abstractforeignfield.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369899273","text":"#!/usr/bin/env python3\n\nT = int(input())\n\ndef not_eliminated(idx, v):\n global s, N, X\n t = X-v\n for i in range(N):\n if i == idx:\n continue\n t -= max(0, s[idx]+v-s[i])\n return t < 0\n\ndef binsearch(l, h, f, i):\n ans = h\n while h-l > 1e-7:\n m = (l+h)/2\n\n if f(i, m*X):\n h = m\n else:\n l = m\n return h\n\nfor case in range(1, T+1):\n s = list(map(int, input().split()))[1:]\n N = len(s)\n X = sum(s)\n\n #print(s, N, X)\n #print(not_eliminated(0, X-1))\n\n ans = [0]*N\n ans = [binsearch(0, 1, not_eliminated, i) for i in range(N)]\n\n #print(\"Case #{}: {}\".format(case, \" \".join(str(a) for a in ans)))\n print(\"Case #{}: {}\".format(case, \" \".join(\"{:.8%}\".format(a)[:-1] for a in ans)))\n","sub_path":"solutions_1480487_1/Python/Kosie/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"488364523","text":"# tests for GME-311 and GME-297\r\nfrom __future__ import with_statement\r\n\r\nimport sys\r\nimport os.path\r\nimport unittest\r\nfrom GPyUnit.util import DispatchEx\r\n\r\nclass TestRefportConnectionInvariantUnderMoves(unittest.TestCase):\r\n def __init__(self, input_file, fco_to_move, destination_model, name=None, use_disp=None, **kwds):\r\n super(TestRefportConnectionInvariantUnderMoves, self).__init__('test', **kwds)\r\n self.input_file = input_file\r\n self.fco_to_move = fco_to_move\r\n self.destination_model = destination_model\r\n name = name if name else os.path.splitext(self.input_file)[0]\r\n self._testMethodDoc = name\r\n self.output_file = name + \"-output.mga\"\r\n self.correct_file = name + \"-correct.mga\"\r\n if use_disp:\r\n self._move_fcos = self._move_fcos_disp\r\n\r\n\r\n def test(self):\r\n \"\"\"\r\n Regression test: given self.input_file, move self.fco_to_move to self.destination_model. Then check self.output_file against self.correct_file\r\n \"\"\"\r\n def _adjacent_file(file):\r\n import os.path\r\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), file)\r\n from GPyUnit import util\r\n util.register_xmp(_adjacent_file('GME297ModelRefportTest.xmp'))\r\n with util.disable_early_binding():\r\n self.project = DispatchEx(\"Mga.MgaProject\")\r\n self.project.Open(\"MGA=\" + _adjacent_file(self.input_file))\r\n self.territory = self.project.BeginTransactionInNewTerr()\r\n\r\n fco_to_move = self.project.ObjectByPath(self.fco_to_move)\r\n OBJTYPE_FOLDER = 6\r\n if fco_to_move.ObjType == OBJTYPE_FOLDER:\r\n tomove = DispatchEx(\"Mga.MgaFolders\")\r\n else:\r\n tomove = DispatchEx(\"Mga.MgaFCOs\")\r\n tomove.Append(fco_to_move)\r\n\r\n destination = self.project.ObjectByPath(self.destination_model)\r\n if destination.ObjType == OBJTYPE_FOLDER:\r\n destination.MoveFolderDisp(fco_to_move)\r\n else:\r\n self._move_fcos(destination, fco_to_move, tomove)\r\n #destination.MoveFCOs(tomove, None, None)\r\n\r\n self.project.CommitTransaction()\r\n self.project.Save(\"MGA=\" + _adjacent_file(self.output_file))\r\n self.territory.Destroy()\r\n self.project.Close()\r\n\r\n import GPyUnit.util.mgadiff as mgadiff\r\n if not mgadiff.compare(_adjacent_file(self.correct_file), _adjacent_file(self.output_file)):\r\n self.fail(\"Reference file '%s' does not match output '%s'\" % (self.correct_file, self.output_file))\r\n # print \"Reference file '%s' matches output '%s'\" % (self.correct_file, self.output_file)\r\n \r\n def _move_fcos(self, destination, fco_to_move, col_to_move):\r\n import platform\r\n if platform.system() == 'Java':\r\n import org.isis.jaut.Variant\r\n destination.MoveFCODisp(fco_to_move, org.isis.jaut.Variant.create(org.isis.jaut.Variant.VT_UNKNOWN))\r\n else:\r\n destination.MoveFCOs(col_to_move, None, None)\r\n\r\n def _move_fcos_disp(self, destination, fco_to_move, col_to_move):\r\n import platform\r\n if platform.system() == 'Java':\r\n import org.isis.jaut.Variant\r\n destination.MoveFCODisp(fco_to_move, org.isis.jaut.Variant.create(org.isis.jaut.Variant.VT_UNKNOWN))\r\n else:\r\n destination.MoveFCODisp(fco_to_move, None)\r\n\r\ndef suite():\r\n suite = unittest.TestSuite()\r\n suite.addTest(TestRefportConnectionInvariantUnderMoves(input_file=\"test1.mga\", fco_to_move=\"/Test1/Folder1/A/B\", destination_model=\"/Test1/Folder2/C\"))\r\n suite.addTest(TestRefportConnectionInvariantUnderMoves(input_file=\"test2.mga\", fco_to_move=\"/Test2/Subtypes/A/BSubtypeRef\", destination_model=\"/Test2/Destination/Destination\"))\r\n suite.addTest(TestRefportConnectionInvariantUnderMoves(input_file=\"test1.mga\", fco_to_move=\"/Test1/Folder1/A/RefB\", destination_model=\"/Test1/Folder2/C\", name=\"test3\"))\r\n suite.addTest(TestRefportConnectionInvariantUnderMoves(input_file=\"test4.mga\", fco_to_move=\"/Test4/Folder1/A/RefRefB\", destination_model=\"/Test4/Folder2/C\"))\r\n suite.addTest(TestRefportConnectionInvariantUnderMoves(input_file=\"test5.mga\", fco_to_move=\"/Test4/Folder2\", destination_model=\"/Test4/Folder3\"))\r\n suite.addTest(TestRefportConnectionInvariantUnderMoves(input_file=\"test4.mga\", fco_to_move=\"/Test4/Folder1/A/RefRefB\", destination_model=\"/Test4/Folder2/C\", name=\"test6\", use_disp=True))\r\n return suite\r\n\r\nif __name__ == \"__main__\":\r\n runner = unittest.TextTestRunner()\r\n runner.run(suite())\r\n","sub_path":"Tests/GPyUnit/GME_297/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"110822592","text":"\"\"\"\nTHIS FILE IS JUST FOR TESTING\nshows the structure and management of DBConnector\n1. create a file with your credentials\n - create \"connection\" folder. If it doesn't exists, create it\n - create \"credentials.txt\" file. It will contain the connection information\n \n- file content example:\nlocalhost\nusername\nmySuperPassword\ndataBase_name\n\nIn this test, example will be made with temp_states\n\"\"\"\nfrom starwars_project.DBConnector import DBConnector\nconn = DBConnector('connection/credentials.txt')\n\ndf = conn.read_sql('SELECT * FROM temp_states')\nprint(df.describe())","sub_path":"test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507414687","text":"\n\n#calss header\nclass _TACKY():\n\tdef __init__(self,): \n\t\tself.name = \"TACKY\"\n\t\tself.definitions = [u'of cheap quality or in bad style: ', u'sticky; (especially of paint or glue) not completely dry']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_tacky.py","file_name":"_tacky.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"446126370","text":"import time\nfrom selenium import webdriver\n\nprint(\"Please note that this will download the latest Python installation executible for you.\")\ninstallType = input(\"Enter the type of installation you require (web/executable): \")\n\ndriver = webdriver.Chrome(\"C:/Personal/Learning/Software/Chromedriver/v.77/chromedriver.exe\") # Optional argument, if not specified will search path.\ndriver.get('http://www.google.com/')\n#time.sleep(3) # Let the user actually see something!\n\nsearch_box = driver.find_element_by_name('q')\nsearch_box.send_keys('Python download')\nsearch_box.submit()\n#time.sleep(3) # Let the user actually see something!\n\ndownload_page_btn = driver.find_element_by_class_name('LC20lb')\ndownload_page_btn.click()\n\n#time.sleep(3)\n\ndownload_py = driver.find_element_by_partial_link_text('Download Python')\ndownload_py.click()\n\n#driver.find_element_by_partial_link_text\n\nif installType.lower() == \"executable\":\n install_file = driver.find_element_by_partial_link_text('Windows x86-64 executable installer')\nelif installType.lower() == \"web\":\n install_file == driver.find_element_by_partial_link_text('Windows x86-64 web-based installer')\nelse:\n print(\"You have entered an invalid selection.\")\n\n\ntime.sleep(100)\n#driver.quit() ","sub_path":"downloadPython.py","file_name":"downloadPython.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426169698","text":"#coding=utf8\n\"\"\"\nDjango settings for icloud project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '6p!s!mop75qbsxm#fm$6@kwj!x9z_xf$2i8x1v+gj7^zfw=(lm'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'ifile',\n 'doora',\n 'django_summernote',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'icloud.urls'\n\nWSGI_APPLICATION = 'icloud.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n # 'ENGINE': 'django.db.backends.sqlite3',\n # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'ENGINE': 'django.db.backends.mysql', # mysql 可以改成 'postgresql_psycopg2', 'postgresql', 'sqlite3' or 'oracle'.\n 'NAME': 'icloud', # 数据库名\n 'USER': 'root', # sqlite3 不使用此配置\n 'PASSWORD': '12345', # sqlite3 不使用此配置\n 'HOST': 'localhost',\n 'PORT': '',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'zh-cn'\n\nTIME_ZONE = 'Asia/Shanghai'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\n# APPEND_SLASH=False # slash issue\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"static\"),\n # '/var/www/static/',\n)\n\n# logging.basicConfig(\n# level = logging.DEBUG,\n# format = '%(levelname)s %(module)s.%(funcName)s Line:%(lineno)d%(message)s',\n# filename = BASE_DIR + '/log/filelog.log',\n# )\n\nroot = logging.getLogger()\nif len(root.handlers) == 0: #避免重复\n level = logging.DEBUG\n filename = BASE_DIR + '/log/filelog.log'\n format = '%(levelname)s %(module)s.%(funcName)s Line:%(lineno)d%(message)s'\n hdlr = TimedRotatingFileHandler(filename,\"midnight\",1,5)\n fmt = logging.Formatter(format)\n hdlr.setFormatter(fmt)\n root.addHandler(hdlr)\nroot.setLevel(level)","sub_path":"icloud/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550866074","text":"#!/usr/bin/env python\n\"\"\"Submit l1analysis histogram jobs\"\"\"\nimport argparse\nimport os\n\nQUEUE = \"8nh\"\n\nPARSER = argparse.ArgumentParser()\nPARSER.add_argument('-d', '--default')\nPARSER.add_argument('-n', '--new')\nPARSER.add_argument('-q', '--queue')\nARGS = PARSER.parse_args()\nif(ARGS.queue):\n QUEUE = ARGS.queue\n\nBASECMD = \"bsub -q \" + QUEUE\n\nif(ARGS.default):\n cmd = BASECMD\n cmd += \" -o def.log \\'l1jetanalysis.exe def \"\n cmd += ARGS.default\n cmd += \"; cp l1analysis_def.root '`pwd`\"\n os.system(cmd)\nif(ARGS.new):\n cmd = BASECMD\n cmd += \" -o new.log \\'l1jetanalysis.exe new \"\n cmd += ARGS.new\n cmd += \"; cp l1analysis_new_cond.root '`pwd`\"\n os.system(cmd)\n","sub_path":"scripts/submit_analysis_jobs.py","file_name":"submit_analysis_jobs.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519845369","text":"import tensorflow as tf\n\n# values for nodes\nT, F = 1., -1.\nbias = 1.\n\n# Training Set of 4 inputs\ntraining_in = [\n [T, T, bias],\n [T, F, bias],\n [F, T, bias],\n [F, F, bias],\n]\n\n# Output patterns for an AND input set\ntraining_out = [\n [T],\n [F],\n [F],\n [F],\n]\n\n# # Output patterns for an OR input set\n# training_out = [\n# [T],\n# [F],\n# [F],\n# [F],\n# ]\n\n# Weights (3 x 1 tensor to match input patterns)\n# Values randomly initialised\nweight = tf.Variable(tf.random_normal([3, 1]))\n\n\n# Step Activation Function\n# If x > 0 return 1 else return -1\ndef step(x):\n is_greater = tf.greater(x, 0)\n to_float = tf.to_float(is_greater)\n to_double = tf.multiply(to_float, 2)\n return tf.subtract(to_double, 1)\n\n\n# Output function\noutput = step(tf.matmul(training_in, weight))\n\n# Error function\nerror = tf.subtract(training_out, output)\n\n# Mean Square Error\nmse = tf.reduce_mean(tf.square(error))\n\n# Calculating the weight adjustment determined by error\ndelta = tf.matmul(training_in, error, transpose_a=True)\n\n# Assignment the adjustment value to the weight tensor\ntrain = tf.assign(weight, tf.add(weight, delta))\n\n# Create a TensorFlow session\nsession = tf.Session()\nsession.run(tf.initialize_all_variables())\n\n# Initial error and the target value\nerr, target = 1, 0\n\n# The number of epoch 0-10\nepoch, max_epochs = 0, 10\n\n# Run the network\n# Will run until mean square error is 0 or epochs exceed 10\nwhile err > target and epoch < max_epochs:\n epoch += 1\n err, _ = session.run([mse,train])\n print('epoch: ', epoch, 'mse: ', err)","sub_path":"Single-Layer-Perceptron.py","file_name":"Single-Layer-Perceptron.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"514867761","text":"import numpy as np\nimport cv2 as cv\nimport os\nimport argparse\nimport csv\nfrom PIL import Image\n\nparser = argparse.ArgumentParser(description='image coparsion')\nparser.add_argument('--path', default='./image')\nparser.add_argument('--iter', type=int, default=10000, help='iteration of out image by GAN')\nargs = parser.parse_args()\n\n\ndef split_RGB(img):\n img = np.array(img)\n\n img_R = img.copy()\n img_R[:, :, (1, 2)] = 0\n img_G = img.copy()\n img_G[:, :, (0, 2)] = 0\n img_B = img.copy()\n img_B[:, :, (0, 1)] = 0\n return img_R, img_G, img_B\n\n\ndef main():\n filename = args.path + '/pixel_value_error.csv'\n\n print(' Strat calcuration of pixel value error')\n\n n = len([name for name in os.listdir(args.path)])\n\n with open(filename, 'a') as f:\n fieldname = ['iter', 'R', 'G', 'B', 'Rstd', 'Gstd', 'Bstd']\n writer = csv.DictWriter(f, fieldnames=fieldname)\n writer.writeheader()\n f.close()\n\n for i in range(1, int(n / 2) + 1):\n img_a = Image.open('./image/image_gen_{0:08d}.png'.format(i * args.iter))\n img_b = Image.open('./image/image_gt_{0:08d}.png'.format(i * args.iter))\n img_a.show()\n\n img_a_R, img_a_G, img_a_B = split_RGB(img_a)\n img_b_R, img_b_G, img_b_B = split_RGB(img_b)\n\n R = np.sum(abs(((img_a_R + 1) - (img_b_R + 1)) / (img_b_R + 1))) / 256 / 256\n G = np.sum(abs(((img_a_G + 1) - (img_b_G + 1)) / (img_b_G + 1))) / 256 / 256\n B = np.sum(abs(((img_a_B + 1) - (img_b_B + 1)) / (img_b_B + 1))) / 256 / 256\n Rstd = np.std(abs(img_a_R - img_b_R))\n Gstd = np.std(abs(img_a_G - img_b_G))\n Bstd = np.std(abs(img_a_B - img_b_B))\n\n with open(filename, 'a') as f:\n fieldname = ['iter', 'R', 'G', 'B', 'Rstd', 'Gstd', 'Bstd']\n writer = csv.DictWriter(f, fieldnames=fieldname)\n writer.writerow({'iter': i * args.iter, 'R': R, 'G': G, 'B': B, 'Rstd': Rstd, 'Gstd': Gstd, 'Bstd': Bstd})\n f.close()\n\n print('finish')\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"script/pixel_value_error.py","file_name":"pixel_value_error.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"581895606","text":"\"\"\"Tools for the spatial analysis of neighborhood change.\"\"\"\n\nfrom collections import namedtuple\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\nfrom libpysal.weights import attach_islands\nfrom libpysal.weights.contiguity import Queen, Rook\nfrom libpysal.weights.distance import KNN\n\nfrom .._data import _Map\nfrom .cluster import (\n affinity_propagation,\n azp,\n gaussian_mixture,\n hdbscan,\n kmeans,\n max_p,\n skater,\n spectral,\n spenc,\n ward,\n ward_spatial,\n)\n\nModelResults = namedtuple(\n \"model\", [\"X\", \"columns\", \"labels\", \"instance\", \"W\"], rename=False\n)\n\n\ndef cluster(\n gdf,\n n_clusters=6,\n method=None,\n best_model=False,\n columns=None,\n verbose=False,\n time_var=\"year\",\n id_var=\"geoid\",\n scaler=None,\n **kwargs,\n):\n \"\"\"Create a geodemographic typology by running a cluster analysis on the study area's neighborhood attributes.\n\n Parameters\n ----------\n gdf : pandas.DataFrame\n long-form (geo)DataFrame containing neighborhood attributes\n n_clusters : int\n the number of clusters to model. The default is 6).\n method : str\n the clustering algorithm used to identify neighborhood types\n best_model : bool\n if using a gaussian mixture model, use BIC to choose the best\n n_clusters. (the default is False).\n columns : list-like\n subset of columns on which to apply the clustering\n verbose : bool\n whether to print warning messages (the default is False).\n time_var: str\n which column on the dataframe defines time and or sequencing of the\n long-form data. Default is \"year\"\n id_var: str\n which column on the long-form dataframe identifies the stable units\n over time. In a wide-form dataset, this would be the unique index\n scaler: str or sklearn.preprocessing.Scaler\n a scikit-learn preprocessing class that will be used to rescale the\n data. Defaults to StandardScaler\n\n Returns\n -------\n gdf : geopandas.GeoDataFrame\n GeoDataFrame with a column of neighborhood cluster labels\n appended as a new column. If cluster method exists as a column on the DataFrame\n then the column will be incremented.\n\n model : named tuple\n A tuple with attributes X, columns, labels, instance, W, which store the\n input matrix, column labels, fitted model instance, and spatial weights matrix\n\n model_name : str\n name of model to be stored in a Community\n\n \"\"\"\n # if we already have a column named after the clustering method, then increment it.\n if method in gdf.columns.tolist():\n model_name = method + str(len(gdf.columns[gdf.columns.str.startswith(method)]))\n else:\n model_name = method\n if not columns:\n raise ValueError(\"You must provide a subset of columns as input\")\n if not method:\n raise ValueError(\"You must choose a clustering algorithm to use\")\n\n times = gdf[time_var].unique()\n gdf = gdf.set_index([time_var, id_var])\n\n # this is the dataset we'll operate on\n data = gdf.copy()[columns]\n data = data.dropna(how=\"any\", subset=columns)\n\n # if the user doesn't specify, use the standard scalar\n if not scaler:\n scaler = StandardScaler()\n for time in times:\n data.loc[time] = scaler.fit_transform(data.loc[time].values)\n # the rescalar can create nans if a column has no variance, so fill with 0\n data = data.fillna(0)\n\n specification = {\n \"ward\": ward,\n \"kmeans\": kmeans,\n \"affinity_propagation\": affinity_propagation,\n \"gaussian_mixture\": gaussian_mixture,\n \"spectral\": spectral,\n \"hdbscan\": hdbscan,\n }\n\n # run the cluster model then join the labels back to the original data\n model = specification[method](\n data, n_clusters=n_clusters, best_model=best_model, verbose=verbose, **kwargs\n )\n labels = model.labels_.astype(str)\n data = data.reset_index()\n clusters = pd.DataFrame(\n {model_name: labels, time_var: data[time_var], id_var: data[id_var]}\n )\n clusters.set_index([time_var, id_var], inplace=True)\n gdf = gdf.join(clusters, how=\"left\")\n gdf = gdf.reset_index()\n results = ModelResults(\n X=data.values, columns=columns, labels=model.labels_, instance=model, W=None\n )\n return gdf, results, model_name\n\n\ndef cluster_spatial(\n gdf,\n n_clusters=6,\n spatial_weights=\"rook\",\n method=None,\n columns=None,\n threshold_variable=\"count\",\n threshold=10,\n time_var=\"year\",\n id_var=\"geoid\",\n scaler=None,\n weights_kwargs=None,\n **kwargs,\n):\n \"\"\"Create a *spatial* geodemographic typology by running a cluster\n analysis on the metro area's neighborhood attributes and including a\n contiguity constraint.\n\n Parameters\n ----------\n gdf : geopandas.GeoDataFrame\n long-form geodataframe holding neighborhood attribute and geometry data.\n n_clusters : int\n the number of clusters to model. The default is 6).\n spatial_weights : str ('queen' or 'rook') or libpysal.weights object\n spatial weights matrix specification`. By default, geosnap will calculate Rook\n weights, but you can also pass a `libpysal.weights` object for more control\n over the specification.\n method : str\n the clustering algorithm used to identify neighborhood types\n columns : list-like\n subset of columns on which to apply the clustering\n threshold_variable : str\n for max-p, which variable should define `p`. The default is \"count\",\n which will grow regions until the threshold number of polygons have\n been aggregated\n threshold : numeric\n threshold to use for max-p clustering (the default is 10).\n time_var: str\n which column on the dataframe defines time and or sequencing of the\n long-form data. Default is \"year\"\n id_var: str\n which column on the long-form dataframe identifies the stable units\n over time. In a wide-form dataset, this would be the unique index\n weights_kwargs: dict\n If passing a `libpysal.weights` instance to spatial_weights, these additional\n keyword arguments that will be passed to the weights constructor\n scaler: str or sklearn.preprocessing.Scaler\n a scikit-learn preprocessing class that will be used to rescale the\n data. Defaults to StandardScaler\n\n Returns\n -------\n gdf : geopandas.GeoDataFrame\n GeoDataFrame with a column of neighborhood cluster labels\n appended as a new column. If cluster method exists as a column on the DataFrame\n then the column will be incremented.\n\n models : dict of named tuples\n tab-completable dictionary of named tuples keyed on the Community's time variable\n (e.g. year). The tuples store model results and have attributes X, columns, labels,\n instance, W, which store the input matrix, column labels, fitted model instance,\n and spatial weights matrix\n\n model_name : str\n name of model to be stored in a Community\n\n \"\"\"\n if method in gdf.columns.tolist():\n model_name = method + str(len(gdf.columns[gdf.columns.str.startswith(method)]))\n else:\n model_name = method\n if not columns:\n raise ValueError(\"You must provide a subset of columns as input\")\n if not method:\n raise ValueError(\"You must choose a clustering algorithm to use\")\n\n times = gdf[time_var].unique()\n gdf = gdf.set_index([time_var, id_var])\n\n # this is the dataset we'll operate on\n data = gdf.copy()[columns + [\"geometry\"]]\n\n contiguity_weights = {\"queen\": Queen, \"rook\": Rook}\n\n if spatial_weights in contiguity_weights.keys():\n W = contiguity_weights[spatial_weights]\n else:\n W = spatial_weights\n\n specification = {\n \"azp\": azp,\n \"spenc\": spenc,\n \"ward_spatial\": ward_spatial,\n \"skater\": skater,\n \"max_p\": max_p,\n }\n\n # if the user doesn't specify, use the standard scalar\n if not scaler:\n scaler = StandardScaler()\n\n models = _Map()\n ws = {}\n clusters = []\n gdf[model_name] = np.nan\n # loop over each time period, standardize the data and build a weights matrix\n for time in times:\n df = data.loc[time].dropna(how=\"any\", subset=columns).reset_index()\n df[time_var] = time\n df[columns] = scaler.fit_transform(df[columns].values)\n\n if weights_kwargs:\n w0 = W.from_dataframe(df, **weights_kwargs)\n else:\n w0 = W.from_dataframe(df)\n w1 = KNN.from_dataframe(df, k=1)\n ws = [w0, w1]\n\n if threshold_variable and threshold_variable != \"count\":\n data[threshold_variable] = gdf[threshold_variable]\n threshold_var = data.threshold_variable.values\n ws[0] = attach_islands(ws[0], ws[1])\n\n elif threshold_variable == \"count\":\n threshold_var = np.ones(len(data.loc[time]))\n ws[0] = attach_islands(ws[0], ws[1])\n\n else:\n threshold_var = None\n\n model = specification[method](\n df[columns],\n w=ws[0],\n n_clusters=n_clusters,\n threshold_variable=threshold_var,\n threshold=threshold,\n **kwargs,\n )\n\n labels = model.labels_.astype(str)\n clusters = pd.DataFrame(\n {model_name: labels, time_var: df[time_var], id_var: df[id_var]}\n )\n clusters = clusters.drop_duplicates(subset=[id_var])\n clusters.set_index([time_var, id_var], inplace=True)\n gdf.update(clusters)\n results = ModelResults(\n X=df[columns].values,\n columns=columns,\n labels=model.labels_,\n instance=model,\n W=ws[0],\n )\n models[time] = results\n\n gdf = gdf.reset_index()\n\n return gdf, models, model_name\n","sub_path":"geosnap/analyze/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":9936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"13749536","text":"#!/usr/bin/python\nimport sys\n\nclass Solution:\n \"\"\"\n @param: matrix: A list of lists of integers\n @param: target: An integer you want to search in matrix\n @return: An integer indicate the total occurrence of target in the given matrix\n \"\"\"\n def searchMatrix(self, matrix, target):\n # write your code here\n if not matrix or not matrix[0]:\n return 0\n y = 0\n x = len(matrix[0])-1\n count = 0\n while x >= 0 and y < len(matrix):\n if matrix[y][x] == target:\n count += 1\n y += 1\n x -= 1\n elif matrix[y][x] < target:\n y += 1\n else:\n x -= 1\n return count\n\ndef main():\n aa = Solution()\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"LintCode/search2DMatrix2.py","file_name":"search2DMatrix2.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"181544541","text":"import torch\nfrom torch.utils.data import TensorDataset, RandomSampler, DataLoader\nfrom transformers import BertTokenizer, BertConfig, BertForSequenceClassification, AdamW, get_linear_schedule_with_warmup\nfrom metrics import get_metrics\n\nfrom keras.preprocessing.sequence import pad_sequences\nfrom tqdm import tqdm, trange\nimport os\nimport numpy as np\n\n\ndef bert_train(model, device, train_dataloader, eval_dataloader, output_dir, num_epochs, warmup_proportion, weight_decay,\n learning_rate, adam_epsilon, save_best=False):\n \"\"\"Training loop for bert fine-tuning. Save best works with F1 only currently.\"\"\"\n\n t_total = len(train_dataloader) * num_epochs\n warmup_steps = len(train_dataloader) * warmup_proportion\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n train_iterator = trange(int(num_epochs), desc=\"Epoch\")\n model.to(device)\n tr_loss_track = []\n eval_metric_track = []\n output_filename = os.path.join(output_dir, 'pytorch_model.bin')\n f1 = float('-inf')\n\n for _ in train_iterator:\n model.train()\n model.zero_grad()\n tr_loss = 0\n nr_batches = 0\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\")\n for step, batch in enumerate(epoch_iterator):\n tr_loss = 0\n input_ids, input_mask, labels = batch\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n outputs = model(input_ids, attention_mask=input_mask, labels=labels)\n loss = outputs[0]\n loss.backward()\n optimizer.step()\n scheduler.step()\n tr_loss += loss.item()\n nr_batches += 1\n model.zero_grad()\n\n print(\"Evaluating the model on the evaluation split...\")\n metrics = bert_evaluate(model, eval_dataloader, device)\n eval_metric_track.append(metrics)\n if save_best:\n if f1 < metrics['f1']:\n model.save_pretrained(output_dir)\n torch.save(model.state_dict(), output_filename)\n print(\"The new value of f1 score of \" + str(metrics['f1']) + \" is higher then the old value of \" +\n str(f1) + \".\")\n print(\"Saving the new model...\")\n f1 = metrics['f1']\n else:\n print(\"The new value of f1 score of \" + str(metrics['f1']) + \" is not higher then the old value of \" +\n str(f1) + \".\")\n\n tr_loss = tr_loss / nr_batches\n tr_loss_track.append(tr_loss)\n\n if not save_best:\n model.save_pretrained(output_dir)\n # tokenizer.save_pretrained(output_dir)\n torch.save(model.state_dict(), output_filename)\n\n return tr_loss_track, eval_metric_track\n\n\ndef bert_evaluate(model, eval_dataloader, device):\n \"\"\"Evaluation of trained checkpoint.\"\"\"\n model.to(device)\n model.eval()\n predictions = []\n true_labels = []\n data_iterator = tqdm(eval_dataloader, desc=\"Iteration\")\n for step, batch in enumerate(data_iterator):\n input_ids, input_mask, labels = batch\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n\n with torch.no_grad():\n outputs = model(input_ids, token_type_ids=None, attention_mask=input_mask)\n\n #loss is only output when labels are provided as input to the model\n logits = outputs[0]\n print(type(logits))\n logits = logits.to('cpu').numpy()\n label_ids = labels.to('cpu').numpy()\n\n for label, logit in zip(label_ids, logits):\n true_labels.append(label)\n predictions.append(np.argmax(logit))\n\n #print(predictions)\n #print(true_labels)\n metrics = get_metrics(true_labels, predictions)\n return metrics\n\n\ndef bert_extract_CLS_embedding(model, dataloader, device):\n \"\"\"For each datapoint extracts embeddings for CLS token from the last layer and returns a list of CLS embeddings\n NOTE: input model should have the option 'output_hidden_states' set to True when initialized\n \"\"\"\n model.to(device)\n model.eval()\n cls_embedding = []\n for step, batch in enumerate(dataloader):\n input_ids, input_mask = batch\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n\n with torch.no_grad():\n outputs = model(input_ids, token_type_ids=None, attention_mask=input_mask)\n\n all_hidden_states = outputs[-1]\n last_layer = all_hidden_states[-1]\n last_layer = last_layer.to('cpu')\n #print(\"Shape:\")\n #print(last_layer.shape)\n cls_embedding.append(last_layer[0][0])\n #print(len(cls_embedding))\n\n return cls_embedding\n\n","sub_path":"src/BERT_sampling/bert_ml_sentiment_classifier.py","file_name":"bert_ml_sentiment_classifier.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426339760","text":"#a安全分析增删改查\nimport json\nimport requests\nfrom globalpkg.global_var import *\nfrom tools.tool import *\n\n#临时cookies\ncookies={'JSESSIONID': '719FF49AB0E6CB255165409E8ACB4C9Fqoevbc'}\nprint(cookies)\nname = ran_name_with_str()\nprint(\"作业预约名称\",name)\n\n\n#开始作业预约\n#拼写预约URL\nwork_appoint_id_l = sql_query_work_appointid+1\nprint(\"作业预约NEW ID:work_appoint_id_l\",work_appoint_id_l)\nurl2='http://192.168.6.27:6030/hse/HSE_WORK_APPOINT/cardSave?parentEntityId=&parentFuncCode=&topEntityId=%d&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.3707947936681053&contentType=json&ajax=true&tid=1'%(work_appoint_id_l,work_appoint_id_l)\n#作业预约请求头\nheaders={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'csrf': 'bd95a01c276341b89715228e81d0ca3f',\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36',\n 'Content-Type': 'text/plain',\n }\n#作业许可大票数据\ndata = {\n\t\"tableName\": \"hse_work_appoint\",\n\t\"iscontractor\": \"0\",\n\t\"workunitname_no\": \"\",\n\t\"territorialunitid\": 2000000003339,\n\t\"worktaskid_no\": 0,\n\t\"isreport\": \"0\",\n\t\"territorialunitname\": \"运行一部\",\n\t\"territorialunitcode\": \"CS8082020\",\n\t\"wf_audit_state\": \"6\",\n\t\"status\": \"draft\",\n\t\"dataStatus\": 0,\n\t\"ver\": 1,\n\t\"created_by\": \"\",\n\t\"created_dt\": now,\n\t\"updated_by\": \"\",\n\t\"updated_dt\": now,\n\t\"df\": 0,\n\t\"tenantid\": 1,\n\t\"ts\": \"\",\n\t\"isspecialcondition\": \"\",\n\t\"specialenvironment\": \"ALLNOT\",\n\t\"task_worktype_code\": \"QT\",\n\t\"task_worktype_name\": \"其他\",\n\t\"cywlqfyxzz\": \"0\",\n\t\"isdzdh\": \"0\",\n\t\"projecttype\": \"rcjx\",\n\t\"isupgradedh\": \"0\",\n\t\"persistent_type\": \"newoperation\",\n\t\"issjtssxzy\": \"0\",\n\t\"worklevel_dh\": \"\",\n\t\"worklevel_sx\": \"\",\n\t\"worklevel_gc\": \"\",\n\t\"worklevel_dz\": \"\",\n\t\"worklevel_gx\": \"\",\n\t\"sourcetype\": \"\",\n\t\"territorialdeviceid\": 2000000003454,\n\t\"territorialdevicename\": \"制氢装置\",\n\t\"work_position_id\": 2000000002019,\n\t\"work_position_name\": \"制氢北区\",\n\t\"worksite\": \"作业地点123\",\n\t\"workunit\": 1688712,\n\t\"workunitname\": \"长庆石化分公司\",\n\t\"workname\": name,\n\t\"workcontent\": \"作业内容123\",\n\t\"worktypename\": \"作业许可证\",\n\t\"worktype\": \"xkz\",\n\t\"appointstarttime\": starttime,\n\t\"appointendtime\": endtime,\n\t\"material_medium\": \"物料介质123\",\n\t\"risksmeasures\": \"重点防控的风险123\"\n}\n#请求作业预约保存接口\nrs=requests.post(url2, json = data, headers = headers,cookies=cookies)\n#返回值转码\ndata = rs.content.decode('utf-8')\n#json化\ndata = json.loads(data)\n#获取接口返回状态\nsta= data['status']\nif sta == 3200:\n print(\"作业预约成功\", sta)\nelse:\n print(\"rulst\",data)\n\n ####\n\n#送交用例信息\ncasename = '作业预约送交'\n#送交接口地址\nurl3='http://192.168.6.27:6030/hse/HSE_WORK_APPOINT/wfSend?parentEntityId=&parentFuncCode=&topEntityId=%d&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.30092471197648707&contentType=json&ajax=true&tid=1'%(work_appoint_id_l,work_appoint_id_l)\nformdata2={\n\t\"opinion\": \"申请审批\",\n\t\"nodeStr\": \"2000000009070\",\n\t\"2000000009070\": \"测试用户\",\n\t\"2000000009070_id\": 1000\n}\n#请求送交接口\nrs=requests.post(url3, json = formdata2, headers = headers,cookies=cookies)\n#返回值转码\ndata = rs.content.decode('utf-8')\n#json格式化\ndata = json.loads(data)\n#获取接口返回状态\nstatus= data['status']\nif status == 3200:\n\n print(\"作业预约送交\", status)\n #caseinfo['result'] = 1\nelse:\n print(\"作业预约送交\", data)\n#收集用例执行信息\n#testsuit.append(caseinfo.copy())\n#作业预约审批用例信息\n\ncasename = '作业预约审批'\n# count =count+1\n# caseid = count\n# caseinfo['id'] = caseid\n# caseinfo['name'] = casename\n#审批接口地址\n#url4='http://192.168.6.27:6030/hse/HSE_WORK_APPOINT/wfFinish?parentEntityId=&parentFuncCode=&topEntityId=+&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.027850408425730055&contentType=json&ajax=true&tid=1'%(work_appoint_id_l,work_appoint_id_l)\nurl4='http://192.168.6.27:6030/hse/HSE_WORK_APPOINT/wfFinish?parentEntityId=&parentFuncCode=&topEntityId=+&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.027850408425730055&contentType=json&ajax=true&tid=1'%(work_appoint_id_l)\n\n#参数\nformdata ={\n\t\"opinion\": \"同意\",\n\t\"cC\": \"1000\",\n\t\"cCName\": \"测试用户\",\n\t\"nickName\": \"用户\",\n\t\"is_normal_finish\": \"true\",\n\t\"nodeStr\": \"\"\n}\n#请求接口\nrs=requests.post(url4, json = formdata, headers = headers,cookies=cookies)\n#rs.encoding='utf-8'\n#cc = str(rs.content, 'utf8')\n#返回值转码\ndata = rs.content.decode('utf-8')\n#json格式化\ndata = json.loads(data)\n#获取接口返回状态\nstatus= data['status']\n\nif status == 3200:\n\n print(\"作业预约审批\", status)\n #caseinfo['result'] = 1\n\ncasename = '作业预约作废'\n# count =count+1\n# caseid = count\n# caseinfo['id'] = caseid\n# caseinfo['name'] = casename\n#审批接口地址\n#url4='http://192.168.6.27:6030/hse/HSE_WORK_APPOINT/wfFinish?parentEntityId=&parentFuncCode=&topEntityId=+&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.027850408425730055&contentType=json&ajax=true&tid=1'%(work_appoint_id_l)\nurl4 = 'http://192.168.6.27:6030/hse/HSE_WORK_APPOINT/wfInvalid?parentEntityId=&parentFuncCode=&topEntityId=%d&topFuncCode=HSE_WORK_APPOINT&dataId=%d&0.9786549083065863&contentType=json&ajax=true&tid=1'%(work_appoint_id_l,work_appoint_id_l)\n#参数\nformdata = {\n\t\"tableName\": \"hse_work_appoint\",\n\t\"task_worktype_code\": \"QT\",\n\t\"equt_name\": \"\",\n\t\"territorialdeviceid\": 2000000003454,\n\t\"created_by_name_nick\": \"用户\",\n\t\"worktaskid_no\": 0,\n\t\"cywlqfyxzz\": \"0\",\n\t\"specialenvironment\": \"ALLNOT\",\n\t\"isreport\": \"0\",\n\t\"created_by_name\": \"测试用户\",\n\t\"worklevel_dh\": \"\",\n\t\"sourcecode\": \"\",\n\t\"iscontainplayday\": 0,\n\t\"worktype_name\": \"作业许可证\",\n\t\"sourcefunc\": \"\",\n\t\"equipmentcode\": \"\",\n\t\"territorialdevicename\": \"制氢装置\",\n\t\"sourcetype\": \"\",\n\t\"worktypename\": \"作业许可证\",\n\t\"sourceid\": \"\",\n\t\"worklevel_gx\": \"\",\n\t\"serviceplanid\": \"\",\n\t\"task_worktype_name\": \"其他\",\n\t\"standardmaintenance\": \"\",\n\t\"worklevel_sx\": \"\",\n\t\"material_medium\": \"物料介质123\",\n\t\"risksmeasures\": \"重点防控的风险123\",\n\t\"issjtssxzy\": \"0\",\n\t\"isupgradedh\": \"0\",\n\t\"isdzdh\": \"0\",\n\t\"worklevel_gc\": \"\",\n\t\"persistent_type\": \"newoperation\",\n\t\"territorialunitcode\": \"CS8082020\",\n\t\"worklevel_dz\": \"\",\n\t\"dataStatus\": 0,\n\t\"ver\": 1,\n\t\"created_by\": 1000,\n\t\"created_dt\": now,\n\t\"updated_by\": 1000,\n\t\"updated_dt\": now,\n\t\"df\": 0,\n\t\"tenantid\": 1,\n\t\"ts\": \"\",\n\t\"work_appoint_id\": work_appoint_id_l,\n\t\"code\": \"\",\n\t\"iscontractor\": \"0\",\n\t\"workunit\": 1688712,\n\t\"workunitname\": \"长庆石化分公司\",\n\t\"workunitname_no\": \"长庆石化分公司\",\n\t\"workcontent\": \"作业内容123\",\n\t\"workname\": name,\n\t\"territorialunitid\": 2000000003339,\n\t\"territorialunitname\": \"运行一部\",\n\t\"work_position_id\": 2000000002019,\n\t\"appointstarttime\": starttime,\n\t\"appointendtime\": endtime,\n\t\"work_position_name\": \"制氢北区\",\n\t\"status\": \"approval\",\n\t\"constructionscheme\": \"\",\n\t\"wf_current_user\": \"1000\",\n\t\"wf_audit_state\": \"2\",\n\t\"wf_create_user\": 1000,\n\t\"wf_type\": \"2\",\n\t\"wf_instance\": 2000000010669,\n\t\"wf_current_nodeid\": \"2000000009070\",\n\t\"wf_audit_time\": now,\n\t\"worktype\": \"xkz\",\n\t\"worksite\": \"作业地点123\",\n\t\"equipmentnumber\": \"\",\n\t\"projecttype\": \"rcjx\",\n\t\"isspecialcondition\": \"\",\n\t\"specialcondition\": \"\"\n}\n#请求接口\nrs=requests.post(url4, json = formdata, headers = headers,cookies=cookies)\n#rs.encoding='utf-8'\n#cc = str(rs.content, 'utf8')\n#返回值转码\ndata = rs.content.decode('utf-8')\n#json格式化\ndata = json.loads(data)\n#获取接口返回状态\nstatus= data['status']\n\nif status == 3200:\n\n print(\"作业预约作废\", status)\n #caseinfo['result'] = 1\n#DONE","sub_path":"backup/D3作业预约作废.py","file_name":"D3作业预约作废.py","file_ext":"py","file_size_in_byte":7629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"205262663","text":"\r\nimport time\r\nimport numpy as np\r\nimport os\r\n\r\n'''\r\n载入数据\r\n'''\r\ndef load_data(filename):\r\n print('开始读取数据...')\r\n data_arr=[]\r\n label_arr=[]\r\n f=open(filename,'r')\r\n for line in f.readlines():\r\n curline=line.strip().split(',')\r\n if int(curline[0])>=5:\r\n label_arr.append(1)\r\n else:\r\n label_arr.append(-1) \r\n data_arr.append([int(num)/255 for num in curline[1:]])\r\n print('读取数据完毕!')\r\n return data_arr , label_arr\r\n\r\ndef perceptron(data_arr,label_arr,iter=100,h=0.001):\r\n print('开始训练....')\r\n data=np.mat(data_arr)\r\n m,n=np.shape(data)\r\n label=label_arr\r\n w=np.zeros((1,n))\r\n b=0\r\n for k in range(iter):\r\n for i in range(m):\r\n xi=np.mat(data[i])\r\n yi=label[i]\r\n s=yi*(w*xi.T+b)\r\n if s <=0:\r\n w=w+h*xi*yi\r\n b=b+h*yi\r\n print('当前回合%d/%d training....'%(k,iter))\r\n print('训练完毕!')\r\n return w,b\r\ndef test(data_arr,label_arr,w,b):\r\n errocnt=0\r\n print('开始测试....')\r\n data=np.mat(data_arr)\r\n m=np.shape(data)[0]\r\n label=label_arr\r\n for i in range(m):\r\n xi=np.mat(data[i])\r\n yi=label[i]\r\n s=yi*(w*xi.T+b)\r\n if s <=0:\r\n errocnt=errocnt+1\r\n result=(m-errocnt)/m\r\n print('测试完毕!')\r\n return result\r\nif __name__=='__main__':\r\n start=time.time()\r\n data_dir=os.path.join(os.getcwd(),'data')\r\n traindata,trainlabel=load_data(os.path.join(data_dir,'mnist_train.csv'))\r\n testdata,testlabel=load_data(os.path.join(data_dir,'mnist_test.csv'))\r\n w,b=perceptron(traindata,trainlabel)\r\n result=test(testdata,testlabel,w,b)\r\n end=time.time()\r\n usedtime=end-start\r\n print('训练+测试总用时:%d s 准确率: %s'%(usedtime,result))\r\n","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"367082410","text":"class Solution(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s or s[0] == \"0\":\n return 0\n \n cache = [0 for i in range(len(s) + 1)]\n cache[0] = cache[1] = 1\n for i in range(2, len(s) + 1):\n num = int(s[i-2:i])\n if num == 0 or (s[i - 1] == \"0\" and num > 26): # 100 or 130\n cache[i] = 0\n elif num < 10: # 109\n cache[i] = cache[i - 1]\n elif num < 27: \n if num == 10 or num == 20: # 110 or 120\n cache[i] = cache[i - 2]\n else: # 117, 126 ...\n cache[i] = cache[i - 1] + cache[i - 2]\n else: # 127, 178\n cache[i] = cache[i - 1]\n \n return cache[len(s)]","sub_path":"LeetCode/Decode_Ways.py","file_name":"Decode_Ways.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176145256","text":"from sklearn.neural_network import MLPClassifier\r\nimport glob\r\nimport os\r\nimport sys\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nimport numpy as np\r\nimport keras\r\nimport pickle\r\nimport funkcije\r\nfrom funkcije import writeResToExcel, saveROC\r\nfrom sklearn.utils import shuffle\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout\r\n\r\n# contrast = False\r\n# all_modal = False\r\n# b_dataset = False\r\n# c_dataset = False\r\n\r\ndef ex_mlp(contrast, all_modal, b_dataset, c_dataset):\r\n # PATHS\r\n base_path = r'/home/jovyan/shared/InteliRad-gasper'\r\n img_base_path = os.path.join(base_path, 'images')\r\n\r\n if b_dataset and c_dataset:\r\n print('Ne smeta biti oba')\r\n sys.exit()\r\n npy_base_path = os.path.join(base_path, 'PREPARED_IMAGES')\r\n if not contrast:\r\n # CASE: ['T1W','T2W','FLAIR','OTHER']\r\n pass\r\n else:\r\n if not all_modal:\r\n # CONTRAST CASE: T1W and T1W_CONTRAST\r\n npy_base_path = os.path.join(base_path, 'PREPARED_IMAGES_CONTRAST')\r\n else:\r\n pass\r\n\r\n results_folder = '/home/jovyan/shared/InteliRad-gasper/REZULTATI/'\r\n os.chdir(npy_base_path)\r\n\r\n # EXPERIMENT SETTINGS\r\n to_other = ['?', 'SPINE_OTHER', 'SPINE_T1W', 'SPINE_T2W', 'SPINE_FLAIR']\r\n if not contrast:\r\n modalitete = ['T1W','T2W','FLAIR','OTHER']\r\n else:\r\n if not all_modal:\r\n modalitete = ['T1W', 'T1W_CONTRAST']\r\n else:\r\n modalitete = ['T1W', 'T2W', 'FLAIR', 'OTHER', 'T1W_CONTRAST']\r\n modalitete_encoded = np.arange(len(modalitete)).astype(int)\r\n features = ['RepetitionTime', 'EchoTime', 'InversionTime', 'MagneticFieldStrength', 'SliceThickness', 'NumberofAverages', 'ImagingFrequency', 'NumberofPhaseEncodingSteps',\r\n 'EchoTrainLength', 'PercentSampling', 'PercentPhaseFieldofView', 'PixelBandwidth', 'FlipAngle']\r\n\r\n # MULTILAYER PERCEPTRON SETTINGS\r\n NUM_EPOCHS = 50\r\n num_of_neurons_in_hidden_layer = 20\r\n dropout_rate = 0.2\r\n actv_fun = 'relu'\r\n optimizer_algorithm = 'adam'\r\n\r\n # EXCEL DESCRIPTIONS\r\n save_results = True\r\n final_folder = 'A_mlp'\r\n model_name = 'mlp'\r\n if not contrast:\r\n pass\r\n else:\r\n if not all_modal:\r\n final_folder = final_folder + '_' + 'CONTRAST'\r\n model_name = model_name + '_' + 'CONTRAST'\r\n else:\r\n final_folder = final_folder + '_' + 'CONTRAST_ALL_MODAL'\r\n model_name = model_name + '_' + 'CONTRAST_ALL_MODAL'\r\n\r\n nastavitve_CNN = '{} epochs, dropout rate: {}, {} neurons in hidden layer, actv. fun {}, optimizer: {}'.format(NUM_EPOCHS, dropout_rate, num_of_neurons_in_hidden_layer, actv_fun, optimizer_algorithm)\r\n izbira_rezin = ' '.join(modalitete)\r\n excel_name = 'MLP_rez'\r\n if b_dataset:\r\n excel_name = excel_name + '_B'\r\n model_name = model_name + '_B'\r\n elif c_dataset:\r\n excel_name = excel_name + '_C'\r\n model_name = model_name + '_C'\r\n res_path = os.path.join(results_folder, final_folder)\r\n if not os.path.exists(res_path):\r\n os.makedirs(res_path)\r\n\r\n # Dataframe import\r\n features_and_references_file_name = 'features_and_references_dataframe_1866'\r\n features_and_references_dataframe = pd.read_pickle(os.path.join(base_path, features_and_references_file_name))\r\n features_and_references_dataframe['sequence'] = features_and_references_dataframe['sequence'].mask(~features_and_references_dataframe.sequence.isin(modalitete), 'OTHER')\r\n\r\n datasets = [['11018'],['45321'], ['70982'], ['21911'], ['000000SI4024MR02'], ['22002'], ['41597'], ['141797'], ['35198']]\r\n if not contrast:\r\n # CASE: ['T1W','T2W','FLAIR','OTHER']\r\n pass\r\n else:\r\n if not all_modal:\r\n # CONTRAST CASE: T1W and T1W_CONTRAST\r\n datasets = [['11018'], ['22002'], ['141797'], ['21911'], ['70982'], ['zdruzeno'], ['zdruzeno']]\r\n dataset_names = ['A', 'A', 'A', 'A', 'A', 'B1', 'B2']\r\n else:\r\n pass\r\n\r\n\r\n for num, dataset in enumerate(datasets):\r\n dataset = dataset[0]\r\n print('############### ' + str(num) + ' ###############')\r\n train_file_name = 'A_train' + '_' + dataset\r\n test_file_name = 'A_test' + '_' + dataset\r\n if not contrast:\r\n # CASE: ['T1W','T2W','FLAIR','OTHER']\r\n pass\r\n else:\r\n if not all_modal:\r\n # CONTRAST CASE: T1W and T1W_CONTRAST\r\n train_file_name = 'train' + '_' + dataset_names[num] + '_' + dataset\r\n test_file_name = 'test' + '_' + dataset_names[num] + '_' + dataset\r\n else:\r\n pass\r\n\r\n if not(os.path.isfile(os.path.join(npy_base_path, train_file_name + '.npy'))):\r\n continue\r\n\r\n _, _, train_df = funkcije.load_datasets(npy_base_path, train_file_name)\r\n _, _, test_df = funkcije.load_datasets(npy_base_path, test_file_name)\r\n\r\n if c_dataset:\r\n train_df = pd.concat([train_df, test_df])\r\n _, _, test_df = funkcije.load_datasets(npy_base_path, 'DATASET_C')\r\n dataset = 'DATASET_C'\r\n elif b_dataset:\r\n _, _, train_df = funkcije.load_datasets(npy_base_path, 'B_train_zdruzeno')\r\n _, _, test_df = funkcije.load_datasets(npy_base_path, 'B_test_zdruzeno')\r\n dataset = 'DATASET_B'\r\n\r\n train_tmp = features_and_references_dataframe.loc[train_df.index.to_list()]\r\n test_tmp = features_and_references_dataframe.loc[test_df.index.to_list()]\r\n\r\n X_train, y_train = funkcije.get_img_params(train_tmp, features, modalitete, modalitete_encoded)\r\n X_test, y_test = funkcije.get_img_params(test_tmp, features, modalitete, modalitete_encoded)\r\n\r\n\r\n\r\n if not contrast:\r\n pass\r\n else:\r\n if not all_modal:\r\n y_train = (features_and_references_dataframe.loc[train_df.index].sequence + '_' +\r\n features_and_references_dataframe.loc[train_df.index, 'hasContrast (0/1)']).to_list()\r\n y_test = (features_and_references_dataframe.loc[test_df.index].sequence + '_' +\r\n features_and_references_dataframe.loc[test_df.index, 'hasContrast (0/1)']).to_list()\r\n y_train = funkcije.to_dummies(y_train, ['T1W_0', 'T1W_1'])\r\n y_test = funkcije.to_dummies(y_test, ['T1W_0', 'T1W_1'])\r\n else:\r\n modals = features_and_references_dataframe.loc[train_df.index].sequence\r\n contrasts = features_and_references_dataframe.loc[train_df.index, 'hasContrast (0/1)']\r\n contrasts[~(contrasts == '1')] = ''\r\n contrasts[(modals == 'T1W') & (contrasts == '1')] = '_CONTRAST'\r\n y_train = (modals + contrasts).to_list()\r\n\r\n modals = features_and_references_dataframe.loc[test_df.index].sequence\r\n contrasts = features_and_references_dataframe.loc[test_df.index, 'hasContrast (0/1)']\r\n contrasts[~(contrasts == '1')] = ''\r\n contrasts[(modals == 'T1W') & (contrasts == '1')] = '_CONTRAST'\r\n y_test = (modals + contrasts).to_list()\r\n y_train = funkcije.to_dummies(y_train, modalitete)\r\n y_test = funkcije.to_dummies(y_test, modalitete)\r\n\r\n classifier = Sequential()\r\n # First Hidden Layer\r\n classifier.add(Dense(num_of_neurons_in_hidden_layer, activation=actv_fun, kernel_initializer='random_normal', input_dim=len(features)))\r\n classifier.add(Dropout(dropout_rate))\r\n # Output Layer\r\n classifier.add(Dense(len(modalitete), activation='sigmoid', kernel_initializer='random_normal'))\r\n classifier.add(Dropout(dropout_rate))\r\n # Compiling the neural network\r\n classifier.compile(optimizer=optimizer_algorithm, loss='binary_crossentropy', metrics=['accuracy'])\r\n # Fitting the data to the training dataset\r\n classifier.fit(X_train, y_train, epochs=NUM_EPOCHS)\r\n\r\n y_train_predicted = classifier.predict(X_train)\r\n y_test_predicted = classifier.predict(X_test)\r\n\r\n score_train = classifier.evaluate(X_train, y_train, verbose=0)\r\n score_test = classifier.evaluate(X_test, y_test, verbose=0)\r\n\r\n\r\n # mlp = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(3, len(features)), random_state=1)\r\n # mlp.fit(X_train, y_train)\r\n # y_test_predicted = mlp.predict(X_test)\r\n\r\n roc_micro, roc_macro = saveROC(y_test, y_test_predicted, save_results, final_folder, dataset)\r\n results = {'Input': dataset,\r\n 'Input_opis': ', '.join(features),\r\n 'Modalitete': ', '.join(modalitete),\r\n 'Nastavitve': nastavitve_CNN,\r\n 'Train_acc': score_train[1],\r\n 'Test_acc': score_test[1],\r\n 'Train_loss': score_train[0],\r\n 'Test_loss': score_test[0],\r\n 'ROC_micro': roc_micro,\r\n 'ROC_macro': roc_macro}\r\n\r\n writeResToExcel(excel_name, results, final_folder)\r\n funkcije.save_model(classifier, str(dataset) + '_' + model_name, res_path)\r\n funkcije.plot_confusion_matrix(y_train, y_train_predicted, modalitete, res_path,\r\n title=str(dataset) + '_train_cf')\r\n funkcije.plot_confusion_matrix(y_test, y_test_predicted, modalitete, res_path, title=str(dataset) + '_test_cf')\r\n if b_dataset or c_dataset:\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n print('Nič')","sub_path":"Scripts/mlp_modality_SerialNo.py","file_name":"mlp_modality_SerialNo.py","file_ext":"py","file_size_in_byte":9674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38289265","text":"from flask import request,url_for,json,jsonify\nfrom mysql.connector import Error\nfrom werkzeug.utils import secure_filename\nimport os,sys\nimport db_conf as con\nimport insertdata as ins\nimport updatedata as up\nimport datetime\n\n#DB object for hospital databse\ndb = con.db\ncursor=db.cursor()\n\ndef getPatient_DataForPortal(regno):\n try:\n sql=\"select p.regno,p.pfname,p.pmname,p.psname,p.age,p.agetype,p.sex,ag.gsname from patient_registration p,ipdvisit i,admin_govsch ag,ward_main w where p.regno=i.regno and i.govscheme=ag.gsid and w.ipdid=i.ipdid and w.wardstatus=1 and i.regno='{}'\".format(regno)\n cursor.execute(sql)\n return cursor.fetchall()\n except Exception as e:\n return str(e)\n\ndef getPatient_DischargeInfo(regno):\n try:\n sql=\"select p.regno,p.pfname,p.pmname,p.psname,i.ipddate,i.dischargedate from patient_registration p,ipdvisit i,admin_govsch ag,ward_main w where p.regno=i.regno and i.govscheme=ag.gsid and w.ipdid=i.ipdid and w.wardstatus=0 and i.regno='{}'\".format(regno)\n cursor.execute(sql)\n return cursor.fetchall()\n except Exception as e:\n return str(e)\n\ndef getWardMainId(regno):\n try:\n sql=\"Select w.wrd_id,w.ipdid from ipdvisit i,ward_main w where i.ipdid=w.ipdid and w.wardstatus='1' and w.regno='{}'\".format(regno)\n cursor.execute(sql)\n return cursor.fetchall()\n except Exception as e:\n return str(e)\n\ndef getOpdVisits(regno):\n try:\n sql=\"select vdate,complaint,status from opdvisit where regno='{}'\".format(regno)\n cursor.execute(sql)\n return cursor.fetchall()\n except Exception as e:\n return str(e)\n\n\ndef getIpdVisits(regno):\n try:\n sql=\"select ipddate,complaint,aw.wname,ab.bname from ipdvisit i,ward_main w,admin_wardname aw,admin_ward_bdname ab where w.ipdid=i.ipdid and w.wid=aw.wid and w.bedno=ab.bid and i.regno='{}'\".format(regno)\n cursor.execute(sql)\n return cursor.fetchall()\n except Exception as e:\n return str(e)\n\ndef getXraysData(wmid):\n try:\n sql=\"select x.xdate,ax.xrayname,asx.subxray,x.upload from xray x,admin_xname ax,admin_subname asx,ward_main w where x.xtype=ax.xid and x.stype=asx.subid and w.wrd_id=x.pid and w.wardstatus=1 and x.location<>'OPD' and pid='{}'\".format(wmid)\n print(sql)\n cursor.execute(sql)\n return cursor.fetchall()\n except Exception as e:\n return str(e)\n\ndef getDocsData(regno):\n try:\n sql=\"select doc_date,doc_name,doc_from,doc_file_loc from opdDocument where regno='{}'\".format(regno)\n cursor.execute(sql)\n return cursor.fetchall()\n except Exception as e:\n return str(e)\n","sub_path":"pyfiles/portal/patientPortal.py","file_name":"patientPortal.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"87515856","text":"# Ordereddict\r\nfrom collections import OrderedDict\r\nd = OrderedDict() # order is maintained.\r\nd['a'] = 1\r\nd['b'] = 2\r\nd['c'] = 3\r\nd['d'] = 4\r\nd['e'] = 5\r\nfor k,v in d.items():\r\n print(k,'---',v)\r\n","sub_path":"Udemy(Python 3 bootcamp)/Ordered_dict.py","file_name":"Ordered_dict.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"42657238","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"auth\", \"0006_require_contenttypes_0002\"),\n (\"voucher\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.AddField(\n model_name=\"voucher\",\n name=\"groups\",\n field=models.ManyToManyField(verbose_name=\"User Groups\", to=\"auth.Group\"),\n ),\n migrations.AddField(\n model_name=\"voucher\",\n name=\"limit_usage_by_group\",\n field=models.BooleanField(\n default=False, verbose_name=\"Limit usage to selected user groups\"\n ),\n ),\n ]\n","sub_path":"server/src/oscarbluelight/voucher/migrations/0002_auto_20160503_1138.py","file_name":"0002_auto_20160503_1138.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493589557","text":"from gpiozero import Robot\nimport time\nfrom evdev import InputDevice, categorize, ecodes\n\nrobot = Robot(left=(17,18), right=(27,22))\ncontroller = InputDevice(\"/dev/input/event0\")\n\n# Mapping\nup = 46\ndown = 32\nleft = 18\nright = 33\n\nxBtn = 35\nyBtn = 23\naBtn = 34\nbBtn = 36\n\nstart = 24\nselect = 49\n\nlTrigger = 37\nrTrigger = 50\n\n# Speeds\nmove = 0.75\nturn = 0.5\n\n# Controller Code\nprint(controller)\nprint()\n\nfor event in controller.read_loop():\n if event.value == 0:\n robot.stop()\n elif event.value == 1:\n if event.code == start:\n break\n elif event.code == select:\n temp = up\n up = down\n down = temp\n\n elif event.code == lTrigger:\n if turn > 0.25:\n move -= 0.25\n turn -= 0.25\n elif event.code == rTrigger:\n if move < 1:\n move += 0.25\n turn += 0.25\n\n if event.value == 1 or event.value == 2:\n if event.code == up:\n robot.forward(move)\n elif event.code == down:\n robot.backward(move)\n elif event.code == left:\n robot.left(turn)\n elif event.code == right:\n robot.right(turn)\n\n elif event.code == xBtn:\n print('X')\n # nothing for now\n elif event.code == yBtn:\n print('Y')\n # nothing for now\n elif event.code == aBtn:\n print('A')\n # nothing for now\n elif event.code == bBtn:\n robot.stop()","sub_path":"src/RobotObi/blueController.py","file_name":"blueController.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"6903231","text":"# A message containing letters from A-Z is being encoded to numbers using the following mapping:\n#\n# \n# 'A' -> 1\n# 'B' -> 2\n# ...\n# 'Z' -> 26\n# \n#\n# Given a non-empty string containing only digits, determine the total number of ways to decode it. \n#\n# Example 1: \n#\n# \n# Input: \"12\"\n# Output: 2\n# Explanation: It could be decoded as \"AB\" (1 2) or \"L\" (12).\n# \n#\n# Example 2: \n#\n# \n# Input: \"226\"\n# Output: 3\n# Explanation: It could be decoded as \"BZ\" (2 26), \"VF\" (22 6), or \"BBF\" (2 2 6).\n# Related Topics String Dynamic Programming\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def numDecodings(self, s: str) -> int:\n if s[0] == \"0\":\n return 0\n if len(s) == 1:\n return 1\n res = [1]\n if int(s[0:2]) <= 26 and int(s[1]) != 0:\n res.append(2)\n elif (int(s[0:2]) <= 26 and int(s[1]) == 0) or (int(s[0:2]) > 26 and int(s[1]) != 0):\n res.append(1)\n else:\n return 0\n for ind in range(len(s) - 2):\n if int(s[ind + 1]) == 0 and int(s[ind + 2]) != 0:\n res.append(res[-1])\n continue\n elif int(s[ind + 1]) == 0 and int(s[ind + 2]) == 0:\n return 0\n if int(s[ind + 1:ind + 3]) <= 26 and int(s[ind + 2]) != 0:\n res.append(res[-1] + res[-2])\n elif int(s[ind + 1:ind + 3]) <= 26 and int(s[ind + 2]) == 0:\n res.append(res[-2])\n elif int(s[ind + 1:ind + 3]) > 26 and int(s[ind + 2]) != 0:\n res.append(res[-1])\n else:\n return 0\n return res[-1]\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"leetcode/editor/en/[91]Decode Ways.py","file_name":"[91]Decode Ways.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"8687242","text":"#!/usr/bin/env python3\n\n\"\"\"wcount.py: count words from an Internet file.\n\n__author__ = \"Zhangyuting\"\n__pkuid__ = \"1700017858\"\n__email__ = \"brilliant.99@pku.edu.cn\"\n\"\"\"\n\nimport sys\nfrom urllib.request import urlopen\n\n\ndef wcount(lines, topn=10):\n \"\"\"count words from lines of text string, then sort by their counts\n in reverse order, output the topn (word count), each in one line. \n \"\"\"\n newlines=lines.lower()\n total=newlines.split()\n adict={}\n for x in total :\n adict[x]=total.count(x)\n x=sorted(adict.items(),reverse=True,key=lambda kv:kv[1])[0:topn]\n for (k,v) in x:\n print(k,'\\t',v)\n\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) == 1:\n print('Usage: {} url [topn]'.format(sys.argv[0]))\n print(' url: URL of the txt file to analyze ')\n print(' topn: how many (words count) to output. If not given, will output top 10 words')\n sys.exit(1)\n\n try:\n topn = 10\n if len(sys.argv) == 3:\n topn = int(sys.argv[2])\n except ValueError:\n print('{} is not a valid topn int number'.format(sys.argv[2]))\n sys.exit(1)\n\n try:\n with urlopen(sys.argv[1]) as f:\n contents = f.read()\n lines = contents.decode()\n wcount(lines, topn)\n except Exception as err:\n print(err)\n sys.exit(1)\n","sub_path":"wcount.py","file_name":"wcount.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627168907","text":"# BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE\n\nimport operator\n\nimport numpy\nimport numba\nimport numba.typing.arraydecl\nimport numba.typing.ctypes_utils\n\nimport awkward1.layout\nfrom ..._numba import cpu, util, identities, content\n\n@numba.extending.typeof_impl.register(awkward1.layout.NumpyArray)\ndef typeof(val, c):\n import awkward1._numba.types\n type = val.type\n while isinstance(type, (awkward1.layout.ArrayType, awkward1.layout.RegularType)):\n type = type.type\n return NumpyArrayType(numba.typeof(numpy.asarray(val)), numba.typeof(val.identities), util.dict2parameters(val.parameters))\n\nclass NumpyArrayType(content.ContentType):\n def __init__(self, arraytpe, identitiestpe, parameters):\n assert isinstance(parameters, tuple)\n super(NumpyArrayType, self).__init__(name=\"ak::NumpyArrayType({0}, identities={1}, parameters={2})\".format(arraytpe.name, identitiestpe.name, util.parameters2str(parameters)))\n self.arraytpe = arraytpe\n self.identitiestpe = identitiestpe\n self.parameters = parameters\n\n @property\n def ndim(self):\n return self.arraytpe.ndim\n\n def getitem_int(self):\n return self.getitem_tuple(numba.int64)\n\n def getitem_range(self):\n return self.getitem_tuple(numba.types.slice2_type)\n\n def getitem_str(self):\n raise IndexError(\"cannot slice NumpyArray with str (Record field name)\")\n\n def getitem_tuple(self, wheretpe):\n outtpe = numba.typing.arraydecl.get_array_index_type(self.arraytpe, wheretpe).result\n if isinstance(outtpe, numba.types.Array):\n return NumpyArrayType(outtpe, self.identitiestpe, self.parameters)\n else:\n return outtpe\n\n def getitem_next(self, wheretpe, isadvanced):\n if len(wheretpe.types) > self.arraytpe.ndim:\n raise IndexError(\"too many dimensions in slice\")\n if any(isinstance(x, numba.types.StringLiteral) for x in wheretpe):\n raise IndexError(\"cannot slice NumpyArray with str (Record field name)\")\n\n if isadvanced:\n numreduce = sum(1 if isinstance(x, (numba.types.Integer, numba.types.Array)) else 0 for x in wheretpe.types)\n else:\n numreduce = sum(1 if isinstance(x, numba.types.Integer) else 0 for x in wheretpe.types)\n if numreduce < self.arraytpe.ndim:\n return NumpyArrayType(numba.types.Array(self.arraytpe.dtype, self.arraytpe.ndim - numreduce, self.arraytpe.layout), self.identitiestpe, self.parameters)\n elif numreduce == self.arraytpe.ndim:\n return self.arraytpe.dtype\n else:\n assert False\n\n def carry(self):\n return self\n\n @property\n def lower_len(self):\n return lower_len\n\n @property\n def lower_getitem_nothing(self):\n return None\n\n @property\n def lower_getitem_int(self):\n return lower_getitem\n\n @property\n def lower_getitem_range(self):\n return lower_getitem\n\n @property\n def lower_getitem_next(self):\n return lower_getitem_next\n\n @property\n def lower_carry(self):\n return lower_carry\n\n@numba.extending.register_model(NumpyArrayType)\nclass NumpyArrayModel(numba.datamodel.models.StructModel):\n def __init__(self, dmm, fe_type):\n members = [(\"array\", fe_type.arraytpe)]\n if fe_type.identitiestpe != numba.none:\n members.append((\"identities\", fe_type.identitiestpe))\n super(NumpyArrayModel, self).__init__(dmm, fe_type, members)\n\n@numba.extending.unbox(NumpyArrayType)\ndef unbox(tpe, obj, c):\n asarray_obj = c.pyapi.unserialize(c.pyapi.serialize_object(numpy.asarray))\n array_obj = c.pyapi.call_function_objargs(asarray_obj, (obj,))\n proxyout = numba.cgutils.create_struct_proxy(tpe)(c.context, c.builder)\n proxyout.array = c.pyapi.to_native_value(tpe.arraytpe, array_obj).value\n c.pyapi.decref(asarray_obj)\n c.pyapi.decref(array_obj)\n if tpe.identitiestpe != numba.none:\n id_obj = c.pyapi.object_getattr_string(obj, \"identities\")\n proxyout.identities = c.pyapi.to_native_value(tpe.identitiestpe, id_obj).value\n c.pyapi.decref(id_obj)\n is_error = numba.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())\n return numba.extending.NativeValue(proxyout._getvalue(), is_error)\n\n@numba.extending.box(NumpyArrayType)\ndef box(tpe, val, c):\n NumpyArray_obj = c.pyapi.unserialize(c.pyapi.serialize_object(awkward1.layout.NumpyArray))\n proxyin = numba.cgutils.create_struct_proxy(tpe)(c.context, c.builder, value=val)\n array_obj = c.pyapi.from_native_value(tpe.arraytpe, proxyin.array, c.env_manager)\n args = [array_obj]\n if tpe.identitiestpe != numba.none:\n args.append(c.pyapi.from_native_value(tpe.identitiestpe, proxyin.identities, c.env_manager))\n else:\n args.append(c.pyapi.make_none())\n args.append(util.parameters2dict_impl(c, tpe.parameters))\n out = c.pyapi.call_function_objargs(NumpyArray_obj, args)\n for x in args:\n c.pyapi.decref(x)\n c.pyapi.decref(NumpyArray_obj)\n return out\n\n@numba.extending.lower_builtin(len, NumpyArrayType)\ndef lower_len(context, builder, sig, args):\n tpe, = sig.args\n val, = args\n proxyin = numba.cgutils.create_struct_proxy(tpe)(context, builder, value=val)\n return numba.targets.arrayobj.array_len(context, builder, numba.intp(tpe.arraytpe), (proxyin.array,))\n\n@numba.extending.lower_builtin(operator.getitem, NumpyArrayType, numba.types.Integer)\n@numba.extending.lower_builtin(operator.getitem, NumpyArrayType, numba.types.SliceType)\n@numba.extending.lower_builtin(operator.getitem, NumpyArrayType, numba.types.Array)\n@numba.extending.lower_builtin(operator.getitem, NumpyArrayType, numba.types.List)\n@numba.extending.lower_builtin(operator.getitem, NumpyArrayType, numba.types.ArrayCompatible)\n@numba.extending.lower_builtin(operator.getitem, NumpyArrayType, numba.types.EllipsisType)\n@numba.extending.lower_builtin(operator.getitem, NumpyArrayType, type(numba.typeof(numpy.newaxis)))\n@numba.extending.lower_builtin(operator.getitem, NumpyArrayType, numba.types.BaseTuple)\ndef lower_getitem(context, builder, sig, args):\n import awkward1._numba.identities\n\n rettpe, (tpe, wheretpe) = sig.return_type, sig.args\n val, whereval = args\n proxyin = numba.cgutils.create_struct_proxy(tpe)(context, builder, value=val)\n\n if not isinstance(wheretpe, (numba.types.Integer, numba.types.SliceType)):\n if not isinstance(wheretpe, numba.types.BaseTuple):\n wheretpe = numba.types.Tuple((wheretpe,))\n whereval = context.make_tuple(builder, wheretpe, (whereval,))\n wheretpe, whereval = util.preprocess_slicetuple(context, builder, wheretpe, whereval)\n\n if isinstance(rettpe, NumpyArrayType):\n signature = rettpe.arraytpe(tpe.arraytpe, wheretpe)\n else:\n signature = rettpe(tpe.arraytpe, wheretpe)\n\n if isinstance(wheretpe, numba.types.BaseTuple):\n out = numba.targets.arrayobj.getitem_array_tuple(context, builder, signature, (proxyin.array, whereval))\n else:\n out = numba.targets.arrayobj.getitem_arraynd_intp(context, builder, signature, (proxyin.array, whereval))\n\n if isinstance(rettpe, NumpyArrayType):\n proxyout = numba.cgutils.create_struct_proxy(rettpe)(context, builder)\n proxyout.array = out\n if rettpe.identitiestpe != numba.none:\n proxyout.identities = awkward1._numba.identities.lower_getitem_any(context, builder, rettpe.identitiestpe, wheretpe, proxyin.identities, whereval)\n return proxyout._getvalue()\n else:\n return out\n\ndef lower_getitem_next(context, builder, arraytpe, wheretpe, arrayval, whereval, advanced):\n if len(wheretpe.types) == 0:\n return arrayval\n headtpe = wheretpe.types[0]\n proxyin = numba.cgutils.create_struct_proxy(arraytpe)(context, builder, value=arrayval)\n\n if isinstance(headtpe, numba.types.Array) and advanced is not None:\n tailtpe = numba.types.Tuple(wheretpe.types[1:])\n headval = numba.cgutils.unpack_tuple(builder, whereval)[0]\n tailval = context.make_tuple(builder, tailtpe, numba.cgutils.unpack_tuple(builder, whereval)[1:])\n if headtpe.ndim != 1:\n raise NotImplementedError(\"array.ndim != 1\")\n if arraytpe.arraytpe.ndim < 2:\n raise TypeError(\"too many dimensions in slice\")\n\n shapeval = numba.targets.arrayobj.make_array(arraytpe.arraytpe)(context, builder, proxyin.array).shape\n shapeunpacked = numba.cgutils.unpack_tuple(builder, shapeval)\n lenself, skip = shapeunpacked[:2]\n lennext = builder.mul(lenself, skip)\n\n carry = util.newindex64(context, builder, numba.intp, lenself)\n util.call(context, builder, cpu.kernels.awkward_carry_arange_64,\n (util.arrayptr(context, builder, util.index64tpe, carry),\n util.cast(context, builder, numba.intp, numba.int64, lenself)),\n \"in {0}, indexing error\".format(arraytpe.shortname))\n\n flathead = numba.targets.arrayobj.array_flatten(context, builder, util.index64tpe(headtpe), (headval,))\n lenflathead = util.arraylen(context, builder, util.index64tpe, flathead)\n\n util.call(context, builder, cpu.kernels.awkward_regularize_arrayslice_64,\n (util.arrayptr(context, builder, util.index64tpe, flathead),\n util.cast(context, builder, numba.intp, numba.int64, lenflathead),\n util.cast(context, builder, numba.intp, numba.int64, skip)),\n \"in {0}, indexing error\".format(arraytpe.shortname))\n\n nextcarry = util.newindex64(context, builder, numba.intp, lenself)\n util.call(context, builder, cpu.kernels.awkward_numpyarray_getitem_next_array_advanced_64,\n (util.arrayptr(context, builder, util.index64tpe, nextcarry),\n util.arrayptr(context, builder, util.index64tpe, carry),\n util.arrayptr(context, builder, util.index64tpe, advanced),\n util.arrayptr(context, builder, util.index64tpe, flathead),\n util.cast(context, builder, numba.intp, numba.int64, lenself),\n util.cast(context, builder, numba.intp, numba.int64, skip)),\n \"in {0}, indexing error\".format(arraytpe.shortname))\n\n nextshapetpe = numba.types.UniTuple(numba.intp, arraytpe.arraytpe.ndim - 1)\n nextshapeval = context.make_tuple(builder, nextshapetpe, [lennext] + shapeunpacked[2:])\n\n nextarraytpe = numba.types.Array(arraytpe.arraytpe.dtype, arraytpe.arraytpe.ndim - 1, arraytpe.arraytpe.layout)\n nextarrayval = numba.targets.arrayobj.array_reshape(context, builder, nextarraytpe(arraytpe.arraytpe, nextshapetpe), (proxyin.array, nextshapeval))\n\n nexttpe = NumpyArrayType(nextarraytpe, arraytpe.identitiestpe, arraytpe.parameters)\n proxynext = numba.cgutils.create_struct_proxy(nexttpe)(context, builder)\n proxynext.array = nextarrayval\n if arraytpe.identitiestpe != numba.none:\n proxynext.identities = proxyin.identities\n\n outval = lower_carry(context, builder, nexttpe, util.index64tpe, proxynext._getvalue(), nextcarry)\n return lower_getitem_next(context, builder, nexttpe, tailtpe, outval, tailval, advanced)\n\n else:\n proxyslice = numba.cgutils.create_struct_proxy(numba.types.slice2_type)(context, builder)\n proxyslice.start = context.get_constant(numba.intp, 0)\n proxyslice.stop = util.arraylen(context, builder, arraytpe.arraytpe, proxyin.array, totpe=numba.intp)\n proxyslice.step = context.get_constant(numba.intp, 1)\n wheretpe = numba.types.Tuple((numba.types.slice2_type,) + wheretpe.types)\n whereval = context.make_tuple(builder, wheretpe, [proxyslice._getvalue()] + numba.cgutils.unpack_tuple(builder, whereval))\n\n outtpe = numba.typing.arraydecl.get_array_index_type(arraytpe.arraytpe, wheretpe).result\n outval = numba.targets.arrayobj.getitem_array_tuple(context, builder, outtpe(arraytpe.arraytpe, wheretpe), (proxyin.array, whereval))\n\n if isinstance(outtpe, numba.types.Array):\n proxyout = numba.cgutils.create_struct_proxy(NumpyArrayType(outtpe, arraytpe.identitiestpe, arraytpe.parameters))(context, builder)\n proxyout.array = outval\n if arraytpe.identitiestpe != numba.none:\n proxyout.identities = awkward1._numba.identities.lower_getitem_any(context, builder, arraytpe.identitiestpe, wheretpe, proxyin.identities, whereval)\n return proxyout._getvalue()\n else:\n return out\n\ndef lower_carry(context, builder, arraytpe, carrytpe, arrayval, carryval):\n import awkward1._numba.identities\n\n proxyin = numba.cgutils.create_struct_proxy(arraytpe)(context, builder, value=arrayval)\n\n proxyout = numba.cgutils.create_struct_proxy(arraytpe)(context, builder)\n proxyout.array = numba.targets.arrayobj.fancy_getitem_array(context, builder, arraytpe.arraytpe(arraytpe.arraytpe, carrytpe), (proxyin.array, carryval))\n if arraytpe.identitiestpe != numba.none:\n proxyout.identities = awkward1._numba.identities.lower_getitem_any(context, builder, arraytpe.identitiestpe, carrytpe, proxyin.identities, carryval)\n return proxyout._getvalue()\n\n@numba.typing.templates.infer_getattr\nclass type_methods(numba.typing.templates.AttributeTemplate):\n key = NumpyArrayType\n\n def generic_resolve(self, tpe, attr):\n if attr == \"identities\":\n if tpe.identitiestpe == numba.none:\n return numba.optional(identities.IdentitiesType(numba.int32[:, :]))\n else:\n return tpe.identitiestpe\n\n@numba.extending.lower_getattr(NumpyArrayType, \"identities\")\ndef lower_identities(context, builder, tpe, val):\n proxyin = numba.cgutils.create_struct_proxy(tpe)(context, builder, value=val)\n if tpe.identitiestpe == numba.none:\n return context.make_optional_none(builder, identities.IdentitiesType(numba.int32[:, :]))\n else:\n if context.enable_nrt:\n context.nrt.incref(builder, tpe.identitiestpe, proxyin.identities)\n return proxyin.identities\n","sub_path":"awkward1/_numba/array/numpyarray.py","file_name":"numpyarray.py","file_ext":"py","file_size_in_byte":14099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334596996","text":"# -'- coding: utf-8 -'-\n\nimport sys, mmap, ctypes\nfrom .instruction import Instruction, Code, Label\nfrom .parser import parse_asm\n\nfrom .label import Const\n\n\nclass CodePage(object):\n \"\"\"Compiles assembly, loads machine code into executable memory, and \n generates python functions for accessing the code.\n \n Initialize with either an assembly string or a list of \n :class:`Instruction ` instances. The *namespace*\n argument may be used to define extra symbols when compiling from an \n assembly string. \n \n This class encapsulates a block of executable mapped memory to which a \n sequence of asm commands are compiled and written. The memory page(s) may \n contain multiple functions; use get_function(label) to create functions \n beginning at a specific location in the code.\n \"\"\"\n def __init__(self, asm, namespace=None):\n self.labels = {}\n if isinstance(asm, str):\n asm = parse_asm(asm, namespace=namespace)\n else:\n if namespace is not None:\n raise TypeError(\"Namespace argument may only be used with \"\n \"string assembly type.\")\n \n self.asm = asm\n self.page_addr = 0\n \n # Compile machine code and write to the page.\n self.code = self.compile(asm)\n self._length = None\n \n def __len__(self):\n return sum(map(len, self.asm))\n\n def compile(self, asm):\n ptr = self.page_addr\n # First locate all labels\n for cmd in asm:\n ptr += len(cmd)\n if isinstance(cmd, Label):\n self.labels[cmd.name] = ptr\n \n # now compile\n symbols = self.labels.copy()\n code = b''\n for cmd in asm:\n if isinstance(cmd, Label):\n continue\n \n if isinstance(cmd, (Instruction, Const)):\n # if there are unresolved symbols\n if isinstance(cmd.code, Code):\n # Make some special symbols available when resolving\n # expressions:\n symbols['instr_addr'] = self.page_addr + len(code)\n symbols['next_instr_addr'] = symbols['instr_addr'] + len(cmd)\n \n # actually resolve `cmd.code`\n cmd._code = cmd.code.compile(symbols)\n \n assert isinstance(cmd.code, bytes)\n cmd = cmd.code\n code += cmd\n # TODO: could just return a `Code` instance, which'll hopefully make the code relocatable\n return code\n\n def dump(self):\n \"\"\"Return a string representation of the machine code and assembly\n instructions contained in the code page.\n \"\"\"\n code = ''\n ptr = 0\n indent = ''\n for instr in self.asm:\n hex = ''\n \n if isinstance(instr, Instruction):\n hex = ''.join(f'{b:02x}' for b in instr.code)\n elif isinstance(instr, Const): # just raw data\n if not instr.code:\n continue\n hex = ''.join(f'{b:02x}' for b in instr.code)\n \n pad = ' ' * (40 - len(hex))\n \n if isinstance(instr, Label):\n if indent:\n indent = ''\n \n code += f'0x{ptr:04x}: {hex}{pad}{indent}{instr}\\n'\n\n if isinstance(instr, Label):\n indent = ' ' * 2\n \n ptr += len(hex)//2\n return code\n\n","sub_path":"pycca/asm/codepage.py","file_name":"codepage.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198930397","text":"'''\nCreated on Jul 17, 2015\nCopyright (c) 2015\nHarvard Informatics and Scientific Applications\nAll rights reserved.\n\n@author: Aaron Kitzmiller\n'''\n\n__all__ = []\n\nimport pkgutil\nimport inspect\n\nfor loader, name, is_pkg in pkgutil.walk_packages(__path__):\n module = loader.find_module(name).load_module(name)\n\n for name, value in inspect.getmembers(module):\n if name.startswith('__'):\n continue\n\n globals()[name] = value\n __all__.append(name)\n \n","sub_path":"gx/dispatcher/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"199882356","text":"from handler import db, handle_csv_upload\nimport json\nimport datetime\nimport pymongo\nimport bcrypt\nfrom bson import ObjectId\n\n\ndef dummy_data_decorator(test_function):\n def f():\n '''\n Drop any existing data and fill in some dummy test data,\n as well as creating indexes; the data will be dropped after\n the test as well\n '''\n\n db.user.drop()\n db.user.create_index([\n (\"normalized_email\", pymongo.ASCENDING),\n ], unique=True)\n\n dummy_users = [\n {\n \"_id\": ObjectId(),\n \"name\": \"Brad Jones\",\n \"normalized_email\": \"bjones@performyard.com\",\n \"manager_id\": None,\n \"salary\": 90000,\n \"hire_date\": datetime.datetime(2010, 2, 10),\n \"is_active\": True,\n \"hashed_password\": bcrypt.hashpw(b\"password\", bcrypt.gensalt()),\n },\n {\n \"_id\": ObjectId(),\n \"name\": \"Ted Harrison\",\n \"normalized_email\": \"tharrison@performyard.com\",\n \"manager_id\": None,\n \"salary\": 50000,\n \"hire_date\": datetime.datetime(2012, 10, 20),\n \"is_active\": True,\n \"hashed_password\": bcrypt.hashpw(b\"correct horse battery staple\", bcrypt.gensalt()),\n }\n ]\n\n # Give Ted a manager\n dummy_users[1][\"manager_id\"] = dummy_users[0][\"_id\"]\n\n for user in dummy_users:\n db.user.insert(user)\n\n db.chain_of_command.drop()\n db.chain_of_command.create_index([\n (\"user_id\", pymongo.ASCENDING),\n ], unique=True)\n\n dummy_chain_of_commands = [\n {\"user_id\": dummy_users[0][\"_id\"], \"chain_of_command\":[]},\n {\"user_id\": dummy_users[1][\"_id\"], \"chain_of_command\":[dummy_users[0]]},\n ]\n\n for chain_of_command in dummy_chain_of_commands:\n db.chain_of_command.insert(chain_of_command)\n\n test_function()\n db.user.drop()\n db.chain_of_command.drop()\n return f\n\n\n@dummy_data_decorator\ndef test_setup():\n '''\n This test should always pass if your environment is set up correctly\n '''\n assert(True)\n\n\n@dummy_data_decorator\ndef test_simple_csv():\n '''\n This should successfully update one user and create one user,\n also updating their chain of commands appropriately\n '''\n\n body = '''Name,Email,Manager,Salary,Hire Date\nBrad Jones,bjones@performyard.com,,100000,02/10/2010\nJohn Smith,jsmith@performyard.com,bjones@performyard.com,80000,07/16/2018\n'''\n\n response = handle_csv_upload(body, {})\n assert(response[\"statusCode\"] == 200)\n body = json.loads(response[\"body\"])\n\n # Check the response counts\n assert(body[\"numCreated\"] == 1)\n assert(body[\"numUpdated\"] == 1)\n assert(len(body[\"errors\"]) == 0)\n\n # Check that we added the correct number of users\n assert(db.user.count() == 3)\n assert(db.chain_of_command.count() == 3)\n\n # Check that Brad's salary was updated\n brad = db.user.find_one({\"normalized_email\": \"bjones@performyard.com\"})\n assert(brad[\"salary\"] == 100000)\n\n # Check that Brad's chain of command is still empty\n brad_chain_of_command = db.chain_of_command.find_one(\n {\"user_id\": brad[\"_id\"]})\n assert(len(brad_chain_of_command[\"chain_of_command\"]) == 0)\n\n # Check that John's data was inserted correctly\n john = db.user.find_one({\"normalized_email\": \"jsmith@performyard.com\"})\n assert(john[\"name\"] == \"John Smith\")\n assert(john[\"salary\"] == 80000)\n assert(john[\"manager_id\"] == brad[\"_id\"])\n assert(john[\"hire_date\"] == datetime.datetime(2018, 7, 16))\n\n # Check that Brad is in John's chain of command\n john_chain_of_command = db.chain_of_command.find_one(\n {\"user_id\": john[\"_id\"]})\n assert(len(john_chain_of_command[\"chain_of_command\"]) == 1)\n assert(john_chain_of_command[\"chain_of_command\"][0] == brad[\"_id\"])\n\n\n@dummy_data_decorator\ndef test_invalid_number():\n '''\n This test should still update Brad and create John, but should return\n a single error because the salary field for Brad isn't a number\n '''\n\n body = '''Name,Email,Manager,Salary,Hire Date\nBradley Jones,bjones@performyard.com,,NOT A NUMBER,02/10/2010\nJohn Smith,jsmith@performyard.com,bjones@performyard.com,80000,07/16/2018\n'''\n\n response = handle_csv_upload(body, {})\n assert(response[\"statusCode\"] == 200)\n body = json.loads(response[\"body\"])\n\n # Check the response counts\n assert(body[\"numCreated\"] == 1)\n assert(body[\"numUpdated\"] == 1)\n assert(len(body[\"errors\"]) == 1)\n\n # Check that we added the correct number of users\n assert(db.user.count() == 3)\n assert(db.chain_of_command.count() == 3)\n\n # Check that Brad's salary was updated\n brad = db.user.find_one({\"normalized_email\": \"bjones@performyard.com\"})\n assert(brad[\"salary\"] == 90000)\n assert(brad[\"name\"] == \"Bradley Jones\")\n\n # Check that Brad's chain of command is still empty\n brad_chain_of_command = db.chain_of_command.find_one(\n {\"user_id\": brad[\"_id\"]})\n assert(len(brad_chain_of_command[\"chain_of_command\"]) == 0)\n\n # Check that John's data was inserted correctly\n john = db.user.find_one({\"normalized_email\": \"jsmith@performyard.com\"})\n assert(john[\"name\"] == \"John Smith\")\n assert(john[\"salary\"] == 80000)\n assert(john[\"manager_id\"] == brad[\"_id\"])\n assert(john[\"hire_date\"] == datetime.datetime(2018, 7, 16))\n\n # Check that Brad is in John's chain of command\n john_chain_of_command = db.chain_of_command.find_one(\n {\"user_id\": john[\"_id\"]})\n assert(len(john_chain_of_command[\"chain_of_command\"]) == 1)\n assert(john_chain_of_command[\"chain_of_command\"][0] == brad[\"_id\"])\n","sub_path":"src/test_handler.py","file_name":"test_handler.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321403380","text":"from math import sqrt\n\n\nclass AverDev:\n def __init__(self, array_1, array_2, width=1, real_size=1):\n self.first = array_1\n self.second = array_2\n self.scale = width/real_size\n self.max_id = 0\n\n def max_deviation(self):\n deviation = list(map(lambda x, y: abs(x-y), self.first, self.second))\n self.max_id = deviation.index(max(deviation))\n return max(deviation)/self.scale\n\n def deviation(self):\n deviation = list(map(lambda x, y: x-y, self.first, self.second))\n average = sum(deviation)/len(deviation)\n return average/self.scale\n\n def average_dev(self):\n temp = 0\n deviation = list(map(lambda x, y: x - y, self.first, self.second))\n average = sum(deviation) / len(deviation)\n for i in range(len(self.first)):\n temp += pow(deviation[i] - average, 2)\n average_dev = sqrt(temp / len(deviation))\n return average_dev / self.scale\n\n def create_table(self, len_table=24):\n first_array, second_array = [], []\n if self.max_id - int(len_table/2) < 0:\n for i in range(24):\n first_array.append(self.first[i])\n second_array.append(self.second[i])\n elif self.max_id + int(len_table/2) > len(self.first) - 1:\n for i in range(len(self.first) - len_table, len(self.first)):\n first_array.append(self.first[i])\n second_array.append(self.second[i])\n else:\n begin_id = self.max_id - int(len_table/2)\n end_id = self.max_id + int(len_table/2)\n for i in range(begin_id, end_id):\n first_array.append(self.first[i])\n second_array.append(self.second[i])\n return first_array, second_array\n","sub_path":"Prog/AverDeviat.py","file_name":"AverDeviat.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159275027","text":"import numpy as np\nfrom TeenyGoAI import NeuralNetwork\n\nclass GoTrainer(object):\n\n def __init__(self):\n self.board = np.zeros([9, 9])\n self.move_holder = np.zeros([9, 9])\n self.letters = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"]\n self.white_score = 0\n self.black_score = 0\n self.y_data = []\n self.x_data = []\n self.turn = \"black\"\n size = np.zeros([9, 9])\n self.NN = NeuralNetwork(size, size, alpha=0.001, iterations=1, num_layers=3, hidden_addition=1, lamb=0.0000)\n\n # returns an empty board with the move about to be made\n def position_to_coordinates(self, move):\n #self.move_holder = np.zeros([9,9])\n #self.move_holder[self.letters.index(move[0]), int(move[1])]\n return [self.letters.index(move[0]), int(move[1])]\n\n def get_y(self, move):\n y = self.board * 1\n y[y==0] = 0.2\n y[y!=0.2] = 0\n y[move[0]][move[1]] = 1\n return y\n\n def play(self, data, winner):\n self.board = np.zeros([9, 9])\n self.white_score = 0\n self.black_score = 0\n for move in data:\n type_for_capture = 0\n move = self.position_to_coordinates(move)\n x = self.board*1\n y = self.get_y(move)\n if self.board[move[0]][move[1]] == 0:\n # stand off is set to False\n #print(\"made it\")\n stand_off = 0\n #if the position is good then...\n if move is not None:\n move_state = self.board[move[0]][move[1]]\n # if the board space is empty...\n if move_state == 0:\n # place white or black depending on the turn\n if self.turn == \"white\":\n self.board[move[0]][move[1]] = 1\n elif self.turn == \"black\":\n self.board[move[0]][move[1]] = -1\n # if\n check_captures = self.check_capture_pieces(move)\n\n if check_captures == \"white\" or check_captures == \"black\":\n if check_captures == \"white\":\n type_for_capture = 1\n stand_off = 1\n\n if check_captures == \"black\":\n type_for_capture = -1\n stand_off = 1\n\n if check_captures == 0 or stand_off == 1:\n if self.turn == \"white\":\n self.turn = \"black\"\n #print(\"its blacks turn\")\n elif self.turn == \"black\":\n self.turn = \"white\"\n #print(\"its whites turn\")\n\n if True:\n #print(\"optimizing\")\n #self.NN.optimize()\n if self.turn == winner:\n if winner == \"white\":\n self.x_data.append(x)\n self.y_data.append(y)\n\n if winner == \"black\":\n x[x==1] = -2\n x[x==-1] = 1\n x[x==-2] = -1\n self.x_data.append(x)\n self.y_data.append(y)\n \n\n if check_captures == 1:\n #print(move)\n #print(\"Invalid Move\")\n #print(self.board)\n #print(move)\n self.board[move[0]][move[1]] = 0\n #print(self.board)\n\n if type_for_capture != 0:\n self.capture_pieces(type_for_capture)\n #print(self.board)\n\n def capture_pieces(self, type_for_capture):\n for i in range(9):\n for j in range(9):\n location_state = self.board[i][j]\n if location_state != 0 and location_state == type_for_capture:\n group = self.get_group([i, j], location_state)\n if group != []:\n free = self.check_neighbors(group, location_state)\n if free == \"False\":\n self.remove_group(group)\n\n def check_capture_pieces(self, position):\n killing_itself = 0\n for i in range(9):\n for j in range(9):\n location_state = self.board[i, j]\n if location_state != 0:\n group = self.get_group([i, j], location_state)\n if group != []:\n free = self.check_neighbors(group, location_state)\n if free == \"False\":\n if position in group:\n killing_itself = 1\n if location_state == 1 and self.board[position[0]][position[1]] != 1:\n return \"white\"\n if location_state == -1 and self.board[position[0]][position[1]] != -1:\n return \"black\"\n\n return killing_itself\n\n\n def check_neighbors(self, group, state_type):\n liberty = \"False\"\n\n for position in group:\n\n a, b = position[0], position[1]\n\n if a < 8:\n if self.board[a+1][b] == 0:\n return True\n\n if a > 0:\n if self.board[a-1][b] == 0:\n return True\n\n if b < 8:\n if self.board[a][b+1] == 0:\n return True\n\n if b > 0:\n if self.board[a][b-1] == 0:\n return True\n\n return liberty\n\n\n def get_group(self, position, state_type):\n stone_group = []\n stone_group.append(position)\n for j in range(20):\n for pos in stone_group:\n a, b = pos[0], pos[1]\n if a > 0:\n if self.board[a-1][b] == state_type and [a-1, b] not in stone_group:\n stone_group.append([a-1, b])\n\n if a < 8:\n if self.board[a+1][b] == state_type and [a+1, b] not in stone_group:\n stone_group.append([a+1, b])\n\n if b > 0:\n if self.board[a][b-1] == state_type and [a, b-1] not in stone_group:\n stone_group.append([a, b-1])\n\n if b < 8:\n if self.board[a][b+1] == state_type and [a, b+1] not in stone_group:\n stone_group.append([a, b+1])\n\n return stone_group\n\n def remove_group(self, group):\n #print(group)\n if self.board[group[0][0]][group[0][1]] == 1:\n self.black_score = self.black_score + len(group)\n if self.board[group[0][0]][group[0][1]] == -1:\n self.white_score = self.white_score + len(group)\n #print(\"White Score: \" + str(self.white_score))\n #print(\"Black Score: \" + str(self.black_score))\n for elmnt in group:\n self.board[elmnt[0]][elmnt[1]] = 0\n","sub_path":"depreciated/OptimizedGoTrainer.py","file_name":"OptimizedGoTrainer.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60243761","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nfrom ns3gym import ns3env\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport random\nimport time\nfrom IPython.display import clear_output\n\n\n__author__ = \"Piotr Gawlowicz\"\n__copyright__ = \"Copyright (c) 2018, Technische Universität Berlin\"\n__version__ = \"0.1.0\"\n__email__ = \"gawlowicz@tkn.tu-berlin.de\"\n\n\nparser = argparse.ArgumentParser(description='Start simulation script on/off')\nparser.add_argument('--start',\n type=int,\n default=1,\n help='Start ns-3 simulation script 0/1, Default: 1')\nparser.add_argument('--iterations',\n type=int,\n default=1,\n help='Number of iterations, Default: 1')\nargs = parser.parse_args()\nstartSim = bool(args.start)\niterationNum = int(args.iterations)\n\nport = 7408\nsimTime = 5 # seconds\nstepTime = 0.5 # seconds\nseed = 0\nsimArgs = {\"--simTime\": simTime,\n \"--stepTime\": stepTime,\n \"--testArg\": 123}\ndebug = False\n\nenv = ns3env.Ns3Env(port=port, stepTime=stepTime, startSim=startSim, simSeed=seed, simArgs=simArgs, debug=debug)\n# simpler:\n#env = ns3env.Ns3Env()\nenv.reset()\n\nob_space = env.observation_space\nac_space = env.action_space\nprint(\"Observation space: \", ob_space, ob_space.dtype)\nprint(\"Action space: \", ac_space, ac_space.dtype)\ntime_history = []\nrew_history = []\nstepIdx = 0\ncurrIt = 0\nobs = env.reset()\n\naction_space_size=4\nstate_space_size=2\n\nnum_states = (env.observation_space.high - env.observation_space.low)*\\\n np.array([1000,10,10,10])\nnum_states = np.round(num_states, 0).astype(int) + 1\n\n # Initialize Q table\nq_table = np.random.uniform(low = -1, high = 1,\n size = (num_states[0], num_states[1],\n action_space_size))\n#q_table=np.zeros((state_space_size, action_space_size))\n# initilize\nnum_episode=40\nmax_steps_per_episode=1000\n\nlearning_rate=0.1\ndiscount_rate=0.99\n\n\nexploration_rate=1\nmax_exploration_rate=1\nmin_exploration_rate=0.01\nexploration_decay_rate=0.001\nrewards_all_episode=[]\nprint(\"qtable shape\",q_table.shape)\n\n#Q-learning\nfor episode in range(num_episode):\n state=env.reset()\n \n done=False\n rewards_current_episode=0\n for step in range(max_steps_per_episode):\n print(\"Step:\",step)\n exploration_rate_threshold = random.uniform(0,1)\n if exploration_rate_threshold>exploration_rate:\n action=np.argmax(q_table[state,:])\n print(\"New state:\",new_state,\" reward:\",reward,\" done:\",done,\" info:\",info)\n else:\n action=env.action_space.sample()\n new_state,reward,done,info= env.step(action)\n print(\"New state:\",new_state,\" reward:\",reward,\" done:\",done,\" info:\",info)\n q_table[state, action]=q_table[state, action]*(1-learning_rate) + \\\n learning_rate*(reward+discount_rate*np.max(q_table[new_state,:]))\n state=new_state\n print(action)\n rewards_current_episode+=reward \n time_history.append(step)\n rew_history.append(rewards_current_episode)\n if done == True:\n break\n\n\n exploration_rate=min_exploration_rate+ \\\n (max_exploration_rate - min_exploration_rate)*np.exp(-exploration_decay_rate*episode)\n rewards_all_episode.append(rewards_current_episode)\n \n#rewards_per_thousand_episodes=np.split(np.array(rewards_all_episode),num_episode/1000)\ncount=1000\n\n\nprint(\"\\n\\n********************Q-table**********\\n\")\nprint(q_table)\nprint(\"Plot Learning Performance\")\nmpl.rcdefaults()\nmpl.rcParams.update({'font.size': 16})\n\nfig, ax = plt.subplots(figsize=(10,4))\nplt.grid(True, linestyle='--')\nplt.title('Learning Performance')\nplt.plot(range(len(time_history)), time_history, label='Steps', marker=\"^\", linestyle=\":\")#, color='red')\nplt.plot(range(len(rew_history)), rew_history, label='Reward', marker=\"\", linestyle=\"-\")#, color='k')\nplt.xlabel('step')\nplt.ylabel('Reward')\nplt.legend(prop={'size': 12})\n\nplt.savefig('learning.pdf', bbox_inches='tight')\nplt.show()\n\n","sub_path":"rl-tcp-var/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494829513","text":"#!/usr/bin/python3\n\nfrom alphabetizer import *\n \ndef load_file(filename):\n members = []\n with open(filename, 'r') as reader:\n for line in reader:\n if line.strip():\n (first, last, email) = line.split()\n members.append(Person(first, last, email[1:-1]))\n return members\n \ndef write_file(filename, memberlist):\n with open(filename, 'w') as writer:\n writer.writelines(str(member) + '\\n' for member in memberlist)\n \n\ndef main(infile, outfile):\n order = order_first_name\n #order = order_last_name\n member_list = load_file(infile)\n (sorted_list, cost) = alphabetize(member_list, order)\n if not is_alphabetized(sorted_list, order):\n print('Sorting was not successful!')\n print(cost, 'comparisons were required')\n write_file(outfile, sorted_list)\n \nif __name__ == '__main__':\n main('gryffindor.txt', 'sorted.txt')\n #main('short.txt', 'sorted.txt')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"547454978","text":"from odoo import models, fields\n\n\nclass VmAuthor(models.Model):\n _name = \"vm.author\"\n _description = \"Media Author\"\n\n name = fields.Char('Name', size=128, required=True)\n # address = fields.Many2one('res.partner', 'Address')\n media_ids = fields.Many2many('vm.media', string='Media(s)')\n","sub_path":"models/author.py","file_name":"author.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"183084367","text":"\"\"\"\nThis file contains AES-GCM symmetric encryption scheme primitives.\n\"\"\"\n\nfrom Crypto.Cipher import AES\n\nimport const\nimport function_utils as fu\nimport logging\nimport math\nimport sys\n\n\ndef key_gen(sym_key_size=const.SYM_KEY_DEFAULT_SIZE, debug=0):\n \"\"\"\n Generate a random symmetric key with given size.\n :param sym_key_size: length in bytes of the symmetric key\n :param debug: if 1, prints will be shown during execution; default 0, no prints are shown\n :return: the randomly generated symmetric key\n \"\"\"\n\n # Clamp the size between SYM_KEY_MIN_SIZE and the system maximum possible value\n size = fu.clamp(sym_key_size, const.SYM_KEY_MIN_SIZE, sys.maxsize)\n\n # Check if an error occurred during clamping\n if size is None:\n logging.error('sym_key_gen clamp size exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in sym_key_gen clamp size')\n raise Exception\n\n # Check if size is a power of 2\n if not math.log2(size).is_integer():\n logging.error('sym_key_gen size exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in sym_key_gen size')\n raise Exception\n\n # Generate and return a random symmetric key with the given size\n return fu.generate_random_bytes(size, debug)\n\n\ndef iv_gen(iv_length=const.IV_DEFAULT_SIZE, debug=0):\n \"\"\"\n Generate an initialisation vector (IV) with the given length.\n :param iv_length: length in bytes of the IV\n :param debug: if 1, prints will be shown during execution; default 0, no prints are shown\n :return: the randomly generated IV\n \"\"\"\n\n # Clamp the size between IV_DEFAULT_SIZE and the system maximum possible value\n length = fu.clamp(iv_length, const.IV_DEFAULT_SIZE, const.IV_MAX_SIZE)\n\n # Check if an error occurred during clamping\n if length is None:\n logging.error('generate_iv clamp length exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in generate_iv clamp length')\n raise Exception\n\n # Check if length is a power of 2\n if not math.log2(length).is_integer():\n logging.error('generate_iv length exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in generate_iv length')\n raise Exception\n\n # Generate and return a random IV with the given length\n return fu.generate_random_bytes(iv_length, debug)\n\n\ndef get_cipher(mode=AES.MODE_CTR, init_val=0, tag=None, key=None, iv=None, debug=0):\n \"\"\"\n Create a cipher with the given mode, key and iv.\n :param mode: cipher mode\n :param init_val: initial value (ONLY FOR CTR MODE)\n :param tag: authentication tag (ONLY FOR GCM MODE)\n :param key: encryption key\n :param iv: initialisation vector\n :param debug: if 1, prints will be shown during execution; default 0, no prints are shown\n :return: the cipher\n \"\"\"\n\n # Check if key is set\n if key is None:\n logging.error('cipher key exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in cipher key')\n raise Exception\n\n # Check if iv is set\n if iv is None:\n logging.error('cipher iv exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in cipher iv')\n raise Exception\n\n # Construct a AES Cipher object with the given mode, key and IV\n if mode is AES.MODE_CTR:\n cipher = AES.new(key, mode, initial_value=init_val, nonce=iv)\n else:\n cipher = AES.new(key, mode, nonce=iv)\n\n return cipher\n\n\ndef encrypt(cipher=None, plaintext=None, debug=0):\n \"\"\"\n Encrypt the given plaintext using the given cipher.\n :param cipher: cipher to use for encryption\n :param plaintext: data to encrypt\n :param debug: if 1, prints will be shown during execution; default 0, no prints are shown\n :return: the ciphertext\n \"\"\"\n\n # Check if cipher is set\n if cipher is None:\n logging.error('encrypt cipher exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in encrypt cipher')\n raise Exception\n\n # Check if plaintext is set\n if plaintext is None:\n logging.error('encrypt plaintext exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in encrypt plaintext')\n raise Exception\n\n # Encrypt the plaintext and return the related ciphertext\n return cipher.encrypt(plaintext)\n\n\ndef decrypt(cipher=None, ciphertext=None, debug=0):\n \"\"\"\n Decrypt the ciphertext using the given cipher.\n :param cipher: cipher to use for encryption\n :param ciphertext: data to decrypt\n :param debug: if 1, prints will be shown during execution; default 0, no prints are shown\n :return: the plaintext\n \"\"\"\n\n # Check if cipher is set\n if cipher is None:\n logging.error('decrypt cipher exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in decrypt cipher')\n raise Exception\n\n # Check if ciphertext is set\n if ciphertext is None:\n logging.error('decrypt ciphertext exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in decrypt ciphertext')\n raise Exception\n\n # Decrypt the ciphertext and return the related ciphertext\n return cipher.decrypt(ciphertext)\n","sub_path":"re_enc_engine/sym_enc_primitives.py","file_name":"sym_enc_primitives.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460737040","text":"from flask import Flask, render_template, request, redirect, url_for, session,jsonify\nfrom flask_mysqldb import MySQL\nimport MySQLdb.cursors\nfrom werkzeug.utils import secure_filename\nfrom datetime import datetime\nimport os\nimport re\n\n\napp = Flask(__name__)\n\n# Change this to your secret key (can be anything, it's for extra protection)\napp.secret_key = 'your secret key'\n\n# Enter your database connection details below\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_DB'] = 'student_management_system'\n\nUPLOAD_FOLDER_EVENTS = './static/styles/Admin/images/events'\nUPLOAD_FOLDER_STUDENTS = './static/styles/Admin/images/students'\nUPLOAD_FOLDER_FACULTY = './static/styles/Admin/images/faculties'\n\napp.config['UPLOAD_FOLDER_EVENTS'] = UPLOAD_FOLDER_EVENTS\napp.config['UPLOAD_FOLDER_STUDENT'] = UPLOAD_FOLDER_STUDENTS\napp.config['UPLOAD_FOLDER_FACULTY'] = UPLOAD_FOLDER_FACULTY\n\n# Intialize MySQL\nmysql = MySQL(app)\n\n\n@app.route('/')\ndef index():\n cursor =mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('select * from students')\n result=cursor.fetchone()\n print(result)\n return render_template('home.html')\n\n\n#admin routes\n#login route for admin\n@app.route('/admin/login', methods=['GET','POST'])\ndef adminLogin():\n #if login route is of method get\n if request.method==\"GET\":\n if 'adminloggedin' in session:\n return redirect(url_for('dashboard'))\n else: \n return render_template('Admin/adminLogin.html')\n else:\n #login route post method\n #print(mysql)\n username=request.form['username']\n password=request.form['password']\n query='select * from users where username=%s and password=%s'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(username,password));\n account = cursor.fetchone()\n if account:\n session['adminloggedin'] =True\n return jsonify({'status':True})\n #return render_template('Admin/dashboard.html')\n else:\n message=\"Invalid credentials\"\n return jsonify({'status':False})\n #return render_template('Admin/adminLogin.html',message=message)\n \n@app.route('/admin/logout', methods=['GET'])\ndef adminLogout():\n session.pop('adminloggedin', None)\n return redirect(url_for('adminLogin'))\n \n@app.route('/admin/dashboard', methods=['GET'])\ndef dashboard():\n if 'adminloggedin' in session:\n query='select * from admin'\n query2='select count(id) as total from students'\n query3='select count(id) as total1 from faculties'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query);\n account = cursor.fetchone()\n result2=cursor.execute(query2);\n totalStudents = cursor.fetchone()\n result3=cursor.execute(query3);\n totalFaculties = cursor.fetchone()\n print(account)\n \n return render_template('Admin/dashboard.html',account=account,totalStudents=totalStudents,totalFaculties=totalFaculties)\n else:\n return redirect(url_for('adminLogin'))\n\n@app.route('/admin/students', methods=['GET'])\ndef students():\n query='select * from students'\n query2='select count(id) as total from students'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query);\n allstudents = cursor.fetchall()\n result2=cursor.execute(query2); \n totalStudents = cursor.fetchone()\n return render_template('Admin/students.html',allstudents=allstudents,totalStudents=totalStudents)\n\n@app.route('/admin/studentProfile//', methods=['GET'])\ndef studentProfile(id,roll):\n print(id,roll)\n query='select * from students where id=%s and admission_number=%s'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(id,roll));\n account = cursor.fetchone()\n return render_template('Admin/studentProfile.html',account=account)\n@app.route('/admin/facultyProfile//', methods=['GET'])\ndef facultyProfile(id,f_id):\n query='select * from faculties where id=%s and faculty_id=%s'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(id,f_id));\n account = cursor.fetchone()\n return render_template('Admin/facultyProfile.html',account=account)\n\n\n@app.route('/admin/editStudentProfile//', methods=['GET'])\ndef editStudentProfile(id,roll):\n print(id,roll)\n query='select * from students where id=%s and admission_number=%s'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(id,roll));\n account = cursor.fetchone()\n return render_template('Admin/editStudentProfile.html',account=account)\n@app.route('/admin/editFacultyProfile//', methods=['GET'])\ndef editFacultyProfile(id,f_id):\n print(id,f_id)\n query='select * from faculties where id=%s and faculty_id=%s'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(id,f_id));\n account = cursor.fetchone()\n return render_template('Admin/editFacultyProfile.html',account=account) \n\n\n@app.route('/admin/editStudentProfile',methods=['POST'])\ndef editSprofile():\n fname=request.form['fname']\n admission=request.form['adnum']\n phone=request.form['phn']\n email=request.form['em']\n address=request.form['add']\n gender=request.form['gen']\n religion=request.form['rel']\n dob=request.form['dob']\n course=request.form['cor']\n batch=request.form['bat']\n f=request.files['file']\n propic=request.files['file'].filename\n if f:\n f.save(os.path.join(app.config['UPLOAD_FOLDER_STUDENT'], propic))\n sid=request.form['stid']\n if not propic:\n propic=request.form['previmg']\n else:\n filePath=os.path.join(app.config['UPLOAD_FOLDER_STUDENT'], request.form['previmg'])\n if os.path.exists(filePath):\n os.remove(filePath) \n print(propic)\n query='UPDATE `students` SET `admission_number`=%s,`fname`=%s ,`gender`=%s,`dob`=%s,`religion`=%s, `phone`=%s,`address`=%s,`email`=%s,`course`=%s,`batch`=%s,`profile_img`=%s WHERE `id` =%s;'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(admission,fname,gender,dob,religion,phone,address,email,course,batch,propic,sid))\n mysql.connection.commit()\n if result:\n return redirect(url_for('studentProfile',id=sid,roll=admission))\n else:\n return redirect(url_for('editStudentProfile',id=sid,roll=admission))\n@app.route('/admin/editFacultyProfile',methods=['POST'])\ndef editFprofile():\n fname=request.form['fname']\n f_id=request.form['fid']\n phone=request.form['pho']\n email=request.form['ema']\n gender=request.form['gen']\n religion=request.form['rel']\n department=request.form['dep']\n dob=request.form['dob']\n daj=request.form['daj']\n designation=request.form['des']\n qualifications=request.form['qua']\n f=request.files['file']\n propic=request.files['file'].filename\n fcid=request.form['fcid']\n if f:\n f.save(os.path.join(app.config['UPLOAD_FOLDER_FACULTY'], propic))\n if not propic:\n propic=request.form['previmg']\n else:\n filePath=os.path.join(app.config['UPLOAD_FOLDER_FACULTY'], request.form['previmg'])\n if os.path.exists(filePath):\n os.remove(filePath)\n print(propic)\n query='UPDATE `faculties` SET `faculty_name`=%s,`designation`=%s ,`department`=%s,`phone`=%s,`email`=%s, `qualifications`=%s,`gender`=%s,`religion`=%s,`dob`=%s,`date_joined`=%s,`faculty_id`=%s,`profile_img`=%s WHERE `id` =%s;'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(fname,designation,department,phone,email,qualifications,gender,religion,dob,daj,f_id,propic,fcid))\n mysql.connection.commit()\n if result:\n return redirect(url_for('facultyProfile',id=fcid,f_id=f_id))\n else:\n return redirect(url_for('editFacultyProfile',id=fcid,f_id=f_id))\n\n@app.route('/admin/faculties', methods=['GET'])\ndef faculties():\n query='select * from faculties'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query)\n faculty = cursor.fetchall()\n return render_template('Admin/faculties.html',faculty=faculty)\n\n@app.route('/admin/events',methods=['GET'])\ndef adminEvents():\n query='select * from events'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query)\n events = cursor.fetchall()\n print(events)\n return render_template('Admin/events.html',events=events)\n\n@app.route('/admin/view_attendance//',methods=['GET'])\ndef viewAttendance(s_id,ad_id):\n query='select * from attendance where student_id=%s and attendance_date=%s'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(s_id,'2021-07-05'))\n att= cursor.fetchall()\n print(len(att))\n att_dict={\"ADM\":0,'BIA':0,'ML':0,\"MC\":0,\"BD\":0}\n for key in att_dict:\n for i in att:\n if key==i['subject']:\n att_dict.update({key:1})\n print(att_dict)\n return render_template('Admin/attendance.html',att_dict=att_dict)\n\n@app.route('/admin/enter_attendance',methods=['GET'])\ndef enterAttendance():\n query='select * from students'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query)\n students = cursor.fetchall()\n return render_template('Admin/addAttendance.html',students=students)\n@app.route('/admin/enter_attendance',methods=['POST'])\ndef addAttendance():\n print(request.form)\n a_date=request.form['dte']\n student_attendances=[]\n for i in request.form:\n if i!=\"dte\":\n sub_split=i.split('_')\n sub=sub_split[0]\n student_id=sub_split[1]\n att_status='1' if request.form[i]=='1' else '0';\n \n query='INSERT INTO `attendance` (`attendance_date`, `student_id`, `subject`,`status`) VALUES (%s,%s,%s,%s)'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(a_date,student_id,sub,att_status))\n print('done')\n mysql.connection.commit()\n return redirect(url_for('students'))\n\n@app.route('/admin/view_marks//',methods=['GET'])\ndef viewMarks(s_id,ad_id): \n print(ad_id)\n query='select * from marks where student_id=%s and sem=%s'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(ad_id,'s4'))\n mark = cursor.fetchall()\n print(mark)\n return render_template('Admin/marks.html',mark=mark) \n\n \n@app.route('/admin/addevent', methods=['GET','POST'])\ndef addEvents():\n if request.method==\"GET\":\n return render_template('Admin/addEvent.html')\n else:\n event_name=request.form['event-name'] \n event_desc=request.form['event-desc'] \n event_dte=request.form['event-date']\n f=request.files['file']\n event_img=request.files['file'].filename\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], event_img))\n event_id='2';\n print(event_desc)\n print(event_name)\n print(event_img)\n print(event_dte)\n query='insert into `events` (`event_name`, `event_date`, `event_description`, `event_image`) values(%s,%s,%s,%s)'\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n result=cursor.execute(query,(event_name,event_dte,event_desc,event_img))\n mysql.connection.commit()\n if result:\n return redirect(url_for('adminEvents'))\n else:\n return render_template('Admin/addEvent.html')\n \n\n\n\n@app.route('/admin/cancelevent', methods=['POST'])\ndef cancelEvent():\n id=request.form['id']\n print(id)\n query='DELETE FROM events WHERE event_id=%s'\n cursor = mysql.connection.cursor()\n result=cursor.execute(query,(id));\n mysql.connection.commit()\n if result:\n return jsonify({'status':True})\n\n\n@app.route('/admin/deletestudent', methods=['POST'])\ndef deleteStudent():\n s_id=request.form['id']\n print(s_id)\n cursor =mysql.connection.cursor()\n result=cursor.execute('DELETE FROM students WHERE admission_number=%s',(s_id,))\n mysql.connection.commit()\n if result:\n return jsonify({'status':True})\n\n@app.route('/admin/approve', methods=['POST'])\ndef approveStudent():\n s_id=request.form['id']\n print(s_id)\n cursor =mysql.connection.cursor()\n result=cursor.execute('UPDATE students SET approval=%s where admission_number=%s',('1',s_id,))\n mysql.connection.commit()\n if result:\n return jsonify({'status':True})\n \n#user routes\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n # Output message if something goes wrong...\n msg = ''\n # Check if \"username\" and \"password\" POST requests exist (user submitted form)\n if request.method == 'POST' and 'username' in request.form and 'password' in request.form:\n # Create variables for easy access\n username = request.form['username']\n password = request.form['password']\n # Check if account exists using MySQL\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'SELECT * FROM accounts WHERE username = %s AND password = %s', (username, password,))\n # Fetch one record and return result\n account = cursor.fetchone()\n # If account exists in accounts table in out database\n if account:\n # Create session data, we can access this data in other routes\n session['loggedin'] = True\n session['id'] = account['id']\n session['username'] = account['username']\n session['email'] = account['email']\n # Redirect to home page\n return redirect(url_for('home'))\n else:\n # Account doesnt exist or username/password incorrect\n msg = 'Incorrect username/password!'\n # Show the login form with message (if any)\n return render_template('index.html', msg=msg)\n\n# http://localhost:5000/python/logout - this will be the logout page\n\n@app.route('/logout')\ndef logout():\n # Remove session data, this will log the user out\n session.pop('loggedin', None)\n session.pop('id', None)\n session.pop('username', None)\n # Redirect to login page\n return redirect(url_for('login'))\n\n# http://localhost:5000/pythinlogin/home - this will be the home page, only accessible for loggedin users\n\n\n@app.route('/s_welcome')\ndef home():\n # Check if user is loggedin\n if 'loggedin' in session:\n # User is loggedin show them the home page\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM accounts WHERE id = %s',\n (session['id'],))\n account = cursor.fetchone()\n\n return render_template('s_welcome.html', account=account)\n # User is not loggedin redirect to login page\n return redirect(url_for('login'))\n\n# http://localhost:5000/pythinlogin/profile - this will be the profile page, only accessible for loggedin users\n\n\n@app.route('/s_profile')\ndef profile():\n # Check if user is loggedin\n if 'loggedin' in session:\n # We need all the account info for the user so we can display it on the profile page\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM accounts WHERE id = %s',\n (session['id'],))\n account = cursor.fetchone()\n\n # Show the profile page with account info\n return render_template('s_profile.html', account=account)\n # User is not loggedin redirect to login page\n return redirect(url_for('login'))\n\n\n@app.route('/s_attendance')\ndef s_attendance():\n # Check if user is loggedin\n if 'loggedin' in session:\n # User is loggedin show them the home page\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM accounts WHERE id = %s',\n (session['id'],))\n account = cursor.fetchone()\n return render_template('s_attendance.html', account=account)\n # User is not loggedin redirect to login page\n return redirect(url_for('login'))\n\n\n@app.route('/s_marks')\ndef s_marks():\n # Check if user is loggedin\n if 'loggedin' in session:\n # User is loggedin show them the home page\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM accounts WHERE id = %s',\n (session['id'],))\n account = cursor.fetchone()\n return render_template('s_marks.html', account=account)\n # User is not loggedin redirect to login page\n return redirect(url_for('login'))\n\n\n@app.route('/s_timetable')\ndef s_timetable():\n # Check if user is loggedin\n if 'loggedin' in session:\n # User is loggedin show them the home page\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM accounts WHERE id = %s',\n (session['id'],))\n account = cursor.fetchone()\n return render_template('s_timetable.html', account=account)\n # User is not loggedin redirect to login page\n return redirect(url_for('login'))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"Student Management System/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"379826867","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf.urls import url, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom common import auth_views\n\nauth_patterns = [\n # site login via server-side login (for testing only)\n url(r'^ss-login/?$', auth_views.ss_login, name='ss-login'),\n url(r'^ss-login-error/?$', auth_views.ss_login_error, name='ss-login-error'),\n url(r'^ss-home/?$', auth_views.ss_home, name='ss-home'),\n url(r'^ss-logout/?$', auth_views.ss_logout, name='ss-logout'),\n]\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n # server-side login\n url(r'auth/', include(auth_patterns)),\n # direct use of oauth2_provider (no psa). Used for testing\n url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),\n url(r'', include('social_django.urls', namespace='social'))\n]\n","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120291770","text":"import numpy as np\nimport os, h5py, scipy.io, scipy.signal, scipy.ndimage\nimport util\nimport matplotlib.pyplot as plt\nimport cv2\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nclass RFdata(object):\n \n def __init__(self, result_path):\n \"\"\"\n * argument\n - result_path : by USCTSim\n \"\"\"\n assert os.path.exists(result_path)\n \n # simulation inputs\n try:\n self.param = util.load_matlab_struct(result_path, 'param')\n except:\n self.param = h5py.File(os.path.join(result_path, 'param.mat'), \"r\")[\"param\"]\n \n medium = h5py.File(os.path.join(result_path, 'medium.mat'), \"r\")[\"medium\"]\n self.medium_c = np.array(medium[\"sound_speed\"])\n self.medium_d = np.array(medium[\"density\"])\n self.medium_imp = self.medium_c * self.medium_d\n \n img_lap = np.abs(cv2.Laplacian(self.medium_imp,cv2.CV_64F))\n self.medium_sct = (img_lap > 0.0 )*1.0\n \n try:\n self.kgrid = util.load_matlab_struct(result_path, 'str_kgrid')\n except:\n self.kgrid = h5py.File(os.path.join(result_path, 'str_kgrid.mat'), \"r\")[\"str_kgrid\"]\n \n self.dt = self.kgrid[\"t_array\"][1] - self.kgrid[\"t_array\"][0]\n \n # sensors\n try:\n self.sensor_pos = util.load_matlab_struct(result_path, 'sensor')[\"mask\"].T\n except:\n self.sensor_pos = h5py.File(os.path.join(result_path, 'sensor.mat'), \"r\")[\"sensor\"][\"mask\"]\n self.sensor_pos = np.array(self.sensor_pos)\n \n path = os.path.join(result_path, 'mask_points.mat')\n try:\n self.sensor_map = scipy.io.loadmat(path)[\"mask_points\"]\n except:\n self.sensor_map = np.array(h5py.File(path, \"r\")[\"mask_points\"])\n \n # sources\n path = os.path.join(result_path, \"source_wave.mat\")\n try:\n self.source_wave = scipy.io.loadmat(path)[\"source_wave\"][0,:]\n param_source = self.param[\"source\"][\"point_map\"]\n except:\n self.source_wave = np.array(h5py.File(path, \"r\")[\"source_wave\"][0,:])\n param_source = np.array(self.param[\"source\"][\"point_map\"]).astype(int)\n \n self.source_pos = self.sensor_pos[param_source-1,:]\n \n # simulation outputs\n path = os.path.join(result_path, \"rfdata.mat\")\n self.rawdata = np.array(h5py.File(path, \"r\")[\"rfdata\"])\n \n # hilbert transformation\n comp_ = scipy.signal.hilbert(self.rawdata - self.rawdata.mean(), axis=2)\n self.phase = np.angle(comp_)\n self.amp = abs(comp_)\n \n # size info \n self.T, self.R, self.L = list(self.rawdata.shape)\n self.n_dim = self.sensor_pos.shape[0]\n \n # T-R mesh \n self.mesh_n_rcv, self.mesh_n_src = np.meshgrid(np.arange(self.R), param_source-1)\n self.mesh_pos_rcv = self.sensor_pos[self.mesh_n_rcv,:]\n self.mesh_pos_src = self.sensor_pos[self.mesh_n_src,:]\n \n # T-R mask\n self.TRmask = None\n \n return\n \n def getPointSubset(self, ngrid, offset_arr=[0], flg_mask = True):\n\n # travel distance\n pos = self.ngrid2pos(ngrid)\n map_dist_src = np.linalg.norm( self.mesh_pos_src - pos, axis = 2)\n map_dist_rcv = np.linalg.norm( self.mesh_pos_rcv - pos, axis = 2)\n map_dist = map_dist_src + map_dist_rcv\n\n # sampling index of arrival time \n c = np.median(self.medium_c)\n map_time_pos = (map_dist/(c*self.dt)).astype(np.uint16)\n \n def pairwise_extraction(RF, map_time_pos, offset_arr):\n D = np.zeros( (self.T, self.R, len(offset_arr)), dtype=np.float32)\n # D = np.zeros( (self.T, self.R, len(offset_arr)), dtype=RF.dtype)\n for i in range(self.T):\n for j in range(self.R):\n pos = map_time_pos[i,j]\n ts = RF[i,j,:]\n ts = np.concatenate((ts, np.zeros_like(ts)))\n D[i,j,:] = ts[pos+offset_arr]\n return D \n \n if flg_mask and self.TRmask is not None:\n RF = self.amp * self.TRmask[:,:,np.newaxis]\n else:\n RF = self.amp\n \n subset = pairwise_extraction(RF, map_time_pos, offset_arr)\n \n return map_time_pos, subset\n \n def setTRmask(self, maskFunc):\n self.TRmask = maskFunc(self)\n return self.TRmask\n \n def syntheticAperture(self, c = 1):\n \n mesh_grid = np.array(np.meshgrid(np.arange(self.kgrid[\"Ny\"][0]//c), np.arange(self.kgrid[\"Nx\"][0]//c) ), dtype=int)\n ngrids = mesh_grid.reshape(2, mesh_grid.size//2).T\n def gridwise_summation(ngrid):\n _, subset = self.getPointSubset(ngrid*c)\n return np.sum(subset)\n \n sa = np.array([gridwise_summation(ngrid) for ngrid in ngrids])\n sa = np.log(sa.reshape( self.kgrid[\"Nx\"]//c, self.kgrid[\"Ny\"]//c ).T)\n return sa\n \n \n def ngrid2pos(self, ngrid):\n return np.array([ \n np.array(self.kgrid[\"x\"]).T[ngrid[0], ngrid[1]], \n np.array(self.kgrid[\"y\"]).T[ngrid[0], ngrid[1]]])\n \n def pos2ngrid(self, pos):\n pos_array = np.array([np.array(self.kgrid[\"x\"]).T, np.array(self.kgrid[\"y\"]).T])\n dist = np.linalg.norm(pos_array - pos[:, np.newaxis, np.newaxis], axis=0)\n ngrid = np.unravel_index( np.argmin(dist), dist.shape)\n return ngrid \n\n def draw_input(self):\n \n points = np.where(self.sensor_map>0)\n\n fig = plt.figure(figsize=(16,12))\n\n ax = plt.subplot(121)\n image = ax.imshow(self.medium_c, cmap='gray')\n ax.axis(\"image\")\n plt.scatter( points[0], points[1], s=3, c='blue')\n plt.title(\"sound speed\")\n\n divider = make_axes_locatable(ax)\n ax_cb = divider.new_horizontal(size=\"2%\", pad=0.05)\n fig.add_axes(ax_cb) \n plt.colorbar(image, cax = ax_cb) \n\n ax = plt.subplot(122)\n image = ax.imshow(self.medium_d, cmap='gray')\n ax.axis(\"image\")\n plt.scatter( points[0], points[1], s=3, c='blue')\n plt.title(\"density\")\n\n divider = make_axes_locatable(ax)\n ax_cb = divider.new_horizontal(size=\"2%\", pad=0.05)\n fig.add_axes(ax_cb)\n plt.colorbar(image, cax = ax_cb) \n\n plt.show()\n \n def debug():\n pass","sub_path":"pyusct/rfdata.py","file_name":"rfdata.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"109445067","text":"import os\r\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'music_project.settings')\r\n\r\nimport django\r\ndjango.setup()\r\n\r\n#-------------------------------------------------------------------------------\r\nimport requests\r\nimport random\r\nfrom musicapp.models import UserProfile, Artist, Album, Song, Comment, Rating\r\n\r\ndef populate():\r\n\r\n # Add artists, albums and songs to the database\r\n searchRequest(name=\"ironmaiden\", conn=\"artist\")\r\n searchRequest(name=\"metallica\", conn=\"artist\")\r\n searchRequest(name=\"megadeth\", conn=\"artist\")\r\n searchRequest(name=\"acdc\", conn=\"artist\")\r\n searchRequest(name=\"mastodon\", conn=\"artist\")\r\n\r\n # Add rating and comment to all element\r\n print(\"Add rating and one comment to each artist\")\r\n for a in Artist.objects.filter():\r\n addComment(\"artist\", a.ArtistSlug)\r\n addRating( \"artist\", a.ArtistSlug)\r\n\r\n print(\"Add rating and one comment to each album\")\r\n for a in Album.objects.filter():\r\n addComment(\"album\", a.Artist.ArtistSlug, a.AlbumSlug)\r\n addRating( \"album\", a.Artist.ArtistSlug, a.AlbumSlug)\r\n\r\n print(\"Add rating and one comment to each song\")\r\n for s in Song.objects.filter():\r\n addComment(\"song\", s.Artist.ArtistSlug, s.Album.AlbumSlug, s.SongSlug)\r\n addRating( \"song\", s.Artist.ArtistSlug, s.Album.AlbumSlug, s.SongSlug)\r\n\r\ndef searchRequest(name=\"\", conn=\"\", artist=\"\", album=\"\"):\r\n\r\n if conn == \"\":\r\n searchRequest(name, conn=\"artist\")\r\n searchRequest(name, conn=\"album\")\r\n searchRequest(name, conn=\"track\")\r\n\r\n # Send request to search information\r\n result = requests.get(\"https://api.deezer.com/search/\" + conn + \"?\", params={'q':name})\r\n\r\n # Check if the HTTP response is OK\r\n if result.status_code == 200:\r\n result = result.json()\r\n\r\n # Don't need to populate to much\r\n if(result['total'] > 25):\r\n nbr_results = 25\r\n else:\r\n nbr_results = result['total']\r\n\r\n # Browse the different element in the JSON answer\r\n for i in range(len(result['data'])):\r\n data = result['data'][i]\r\n\r\n # If an artist field is found, search his album\r\n if data['type'] == \"artist\":\r\n\r\n print(data['name'], \",\", data['type'])\r\n\r\n # Create a new artist\r\n artistDB = Artist()\r\n artistDB.Name = data['name']\r\n artistDB.PictureURL = data['picture_medium']\r\n artistDB.ArtistDeezerID = data['id']\r\n\r\n # If the artist is not already in the DB, save it\r\n try:\r\n artistDB = Artist.objects.get(Name=artistDB.Name)\r\n except Artist.DoesNotExist:\r\n artistDB.save()\r\n\r\n # Search the albums of the artist\r\n searchRequest(name=data['name'], artist=data['name'], conn=\"album\")\r\n\r\n # Populate the database only with the first artist found\r\n break;\r\n\r\n # If an album field is found, search his tracks\r\n if data['type'] == \"album\" and\\\r\n (data['artist']['name'] == artist or artist == \"\"):\r\n\r\n print(artist, \",\", data['title'], \",\", data['type'])\r\n\r\n artistDB = Artist.objects.get(Name=artist)\r\n\r\n # Create a new album\r\n albumDB = Album()\r\n albumDB.Artist = artistDB\r\n albumDB.Title = data['title']\r\n albumDB.PictureURL = data['cover_medium']\r\n albumDB.AlbumDeezerID = data['id']\r\n albumDB.ArtistDeezerID = data['artist']['id']\r\n\r\n # If the album is not already in the DB, save it\r\n try:\r\n albumDB = Album.objects.get(Title=albumDB.Title,\r\n Artist=artistDB)\r\n except Album.DoesNotExist:\r\n albumDB.save()\r\n\r\n # Search the songs of the artist\r\n searchRequest(name=data['title'], conn=\"track\", artist=artist,\r\n album=data['title'])\r\n\r\n # Populate the database only with the four first albums found\r\n if i > 3:\r\n break;\r\n\r\n if data['type'] == \"track\" and\\\r\n (data['artist']['name'] == artist or artist == \"\") and\\\r\n (data['album']['title'] == album or album == \"\"):\r\n\r\n print(artist, \",\", album, \",\", data['title'], \",\", data['type'])\r\n\r\n artistDB = Artist.objects.get(Name=artist)\r\n albumDB = Album.objects.get(Title=album, Artist=artistDB)\r\n\r\n # Create a new song\r\n songDB = Song()\r\n songDB.Album = albumDB\r\n songDB.Artist = artistDB\r\n songDB.Title = data['title']\r\n songDB.PictureURL = data['album']['cover_medium']\r\n songDB.PreviewURL = data['preview']\r\n songDB.AlbumDeezerID = data['album']['id']\r\n songDB.ArtistDeezerID = data['artist']['id']\r\n songDB.SongDeezerID = data['id']\r\n\r\n # If the song is not already in the DB, save it\r\n try:\r\n songDB = Song.objects.get(Title=songDB.Title,\r\n Album=albumDB,\r\n Artist=artistDB)\r\n except Song.DoesNotExist:\r\n songDB.save()\r\n\r\ndef addComment(page, artist, album=\"\", song=\"\"):\r\n\r\n com = Comment.objects.create(Username = \"populator\",\r\n Content = \"populator let a comment\",\r\n Artist = artist,\r\n Album = album,\r\n Song = song,\r\n Comment_page = page)\r\n\r\n com.save()\r\n\r\ndef addRating(page, artist, album=\"\", song=\"\"):\r\n\r\n rate = Rating.objects.create(Username = \"populator\",\r\n Artist = artist,\r\n Album = album,\r\n Song = song,\r\n RatingValue = random.randint(0, 5),\r\n Rating_page = page)\r\n rate.save()\r\n\r\n# Start execution here!\r\nif __name__ == '__main__':\r\n print(\"Starting Musicapp opulation script...\")\r\n populate()\r\n","sub_path":"population_script.py","file_name":"population_script.py","file_ext":"py","file_size_in_byte":6609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486633447","text":"# -*- coding: utf-8 -*-\n\n#\n# Licensed Materials - Property of esse.io\n#\n# (C) Copyright esse.io. 2015 All Rights Reserved\n#\n# Author: frank (frank@esse.io)\n#\n#\n\nfrom ws.db import api as db_api\nfrom ws.context import g_context\n\n\ndef addrcode_create(addrcode_info):\n '''\n create addrcode resource\n @param addrcode_info: required addrcode information for registration, dict\n @return: api return, dict without resource name\n '''\n # 1) call db api to insert addrcode information into db\n ret = db_api.addrcode_create(g_context, addrcode_info)\n addrcode_info = {\n 'code': ret['code'],\n 'address': ret['address']\n }\n\n return addrcode_info\n\ndef addrcode_list(code, level):\n '''\n list all addrcodes\n '''\n results = []\n rets = db_api.get_addrcode_list_by_level(g_context, code, level)\n\n if rets:\n for ret in rets:\n ret = ret._as_dict()\n del ret['created_at']\n del ret['deleted_at']\n del ret['updated_at']\n results.append(ret)\n\n return {'addrcode': results}\n\ndef addrcode_detail(code):\n ret = db_api.get_addrcode_by_code(g_context, code)\n return dict(ret)\n\ndef generate_addrcode_levels(params):\n '''\n generate addrcode regx\n '''\n if 'addrcode' in params:\n addrcode = params['addrcode']\n addrcode_county = addrcode\n ret = addrcode_detail(addrcode_county)\n if len(ret) != 0:\n params['addrcode_county'] = ret['address']\n params['addrcode_county_code'] = ret['code']\n addrcode_city = addrcode[0:4]\n addrcode_city = addrcode_city + '00'\n ret = addrcode_detail(addrcode_city)\n if len(ret) != 0:\n params['addrcode_city'] = ret['address']\n params['addrcode_city_code'] = ret['code']\n addrcode_province = addrcode[0:2]\n addrcode_province = addrcode_province + '0000'\n ret = addrcode_detail(addrcode_province)\n if len(ret) != 0:\n params['addrcode_province'] = ret['address']\n params['addrcode_province_code'] = ret['code']\n","sub_path":"ws/resources/addrcode.py","file_name":"addrcode.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284846051","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('create_view/', views.create_view, name='create_view'),\n path('list_view/', views.list_view, name='list_view'),\n path('detail_view/', views.detail_view, name='detail_view'),\n path('update_view/', views.update_view, name='update_view'),\n path('delete_view/', views.delete_view, name='delete_view'),\n]\n","sub_path":"CRUD_Project/CRUD_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38578102","text":"#!/anaconda3/bin/python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n# For 3D Ggraphs\nfrom mpl_toolkits.mplot3d import Axes3D, axes3d\n\n# Line \n# Scatter\n# Histogram\n# Bar\n# 3D Graph (with an add-on library)\n\nXarray2d = np.random.random_sample( (4,5)) * 100\nXarray3d = np.random.random_sample( (3,4,5)) * 100\nYarray2d = 2*Xarray2d**2-3*Xarray2d+1\nYarray3d = 2*Xarray3d**2-3*Xarray3d+1\nprint(Xarray2d.shape, Yarray2d.shape)\nprint(Xarray2d)\nprint(Yarray2d)\n#print(\"3D\")\n#print(Xarray3d)\n#print(Yarray3d)\n\n# Start plotting\n\n# 1. Line\nplt.plot(Xarray2d.T, Yarray2d.T)\nplt.xlabel('X random values')\nplt.ylabel('Y function (of X) values')\nplt.title('Line Graph')\n# ??? HOW TO WORK THE LEGEND FOR ARRAYS ????\n#plt.legend(('label1', 'label2', 'label3', 'label4', 'label5'))\nplt.show()\n\n# 2. Scatter\nplt.scatter(Xarray2d.T, Yarray2d.T)\nplt.xlabel('X random values')\nplt.ylabel('Y function (of X) values')\nplt.title('Scatter Graph')\n# ??? HOW TO WORK THE LEGEND FOR ARRAYS ????\n#plt.legend(('label1', 'label2', 'label3', 'label4', 'label5'))\nplt.show()\n\n# 3. Histogram\n# for col in Xarray.axis()\nplt.hist(Xarray2d.T[0], Yarray2d.T[0])\nplt.xlabel('X random values')\nplt.ylabel('Y function (of X) values')\nplt.title('Histogram Graph')\n# ??? HOW TO WORK THE LEGEND FOR ARRAYS ????\n#plt.legend((),('label1', 'label2', 'label3', 'label4', 'label5'))\nplt.show()\n\n# 4. Bar Graph\nplt.bar(Xarray2d.T[0], Yarray2d.T[0])\nplt.xlabel('X random values')\nplt.ylabel('Y function (of X) values')\nplt.title('Bar Graph')\n# ??? HOW TO WORK THE LEGEND FOR ARRAYS ????\n#plt.legend(('label1', 'label2', 'label3', 'label4', 'label5'))\nplt.show()\n\n# 5. Pie Graph\nplt.pie(Yarray2d[0])\nplt.title('Pie Graph')\n# ??? HOW TO WORK THE LEGEND FOR ARRAYS ????\n#plt.legend((),('label1', 'label2', 'label3', 'label4', 'label5'))\nplt.show()\n\n\n# Something Else\nx = np.arange(0., 5., 0.2)\nprint(x)\n# red dashes, blue squares and green triangles\nplt.plot(x, x, 'ro', label='Red O')\nplt.plot(x, x**2, 'bs', label='Blue Square') \nplt.plot(x, x**3, 'g^', label='Green Star')\nplt.xlabel(\"Range of X\")\nplt.ylabel(\"Rang of Y (function of X)\")\nplt.title(\"Interesting Plot\")\nplt.legend()\nplt.show()\n\n\n","sub_path":"Ghana_trainig/python/.ipynb_checkpoints/script7-checkpoint.py","file_name":"script7-checkpoint.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353025822","text":"import argparse\n\nfrom common.base.PhoneSet import PhoneSet\nfrom common.base.BatchFile import load_batch\nfrom common.base.LexiconManager import LexiconManager\nfrom common.decoding.BestPath import BestPath\nfrom common.decoding.FillerManager import FillerManager\nfrom common.decoding.HypothesisLattice import HypothesisLattice\n\nparser = argparse.ArgumentParser(description='')\nparser.add_argument('phonetic_symbol_set', help='')\nparser.add_argument('pronunciation_lexicon', help='')\nparser.add_argument('batch', help='')\nparser.add_argument('--hyp', dest='hypotheses_file', help='')\nparser.add_argument('--hypf', dest='hypotheses_format', choices=['trn', 'ctm'], default='trn', help='')\nparser.add_argument('--ip', dest='insertion_penalty', type=float, help='')\nparser.add_argument('--ipf', dest='insertion_penalties_file', help='')\nparser.add_argument('--ams', dest='acoustic_model_scale_factor', type=float, default=0.0, help='')\nparser.add_argument('--lms', dest='language_model_scale_factor', type=float, default=0.0, help='')\nparser.add_argument('--res', dest='rescoring_method', choices=['likelihood', 'pp'], default='likelihood', help='')\nargs = parser.parse_args()\n\n\ndef rescore(phonetic_symbol_set, pronunciation_lexicon, batch, hypotheses_file, hypotheses_format, insertion_penalty,\n insertion_penalty_file, acoustic_model_scale_factor, language_model_scale_factor, rescoring_method):\n # get the rescoring method\n phone_set = PhoneSet(phonetic_symbol_set)\n lexicon_manager = LexiconManager(pronunciation_lexicon, phone_set)\n\n if insertion_penalty:\n # global insertion penalty\n lexicon_manager.attach_lex_unit_penalties(insertion_penalty, insertion_penalty)\n\n # insertion penalty for fillers\n if insertion_penalty_file:\n filler_manager = FillerManager(insertion_penalty_file)\n filler_manager.attach_insertion_penalty_fillers(lexicon_manager)\n\n if hypotheses_format == 'trn':\n with open(hypotheses_file, 'w') as outfile:\n for input_lattice, utterance_id in load_batch(batch, 2):\n hypothesis_lattice = HypothesisLattice(phone_set, lexicon_manager, input_lattice)\n\n # attach insertion penalties\n if insertion_penalty:\n hypothesis_lattice.attach_insertion_penalty(lexicon_manager)\n\n # likelihood based rescoring: set scaling factors\n if rescoring_method == 'likelihood':\n hypothesis_lattice.set_scaling_factors(acoustic_model_scale_factor, language_model_scale_factor)\n\n # lattice rescoring\n best_path = hypothesis_lattice.rescore(rescoring_method)\n if best_path:\n best_path.write(outfile.get_stream(), utterance_id)\n\n elif hypotheses_format == 'ctm':\n for input_lattice, utterance_id, hypothesis in load_batch(batch, 3):\n hypothesis_lattice = HypothesisLattice(phone_set, lexicon_manager, input_lattice)\n\n # attach insertion penalties\n if insertion_penalty:\n hypothesis_lattice.attach_insertion_penalty(lexicon_manager)\n\n # likelihood based rescoring: set scaling factors\n if rescoring_method == 'likelihood':\n hypothesis_lattice.set_scaling_factors(acoustic_model_scale_factor, language_model_scale_factor)\n\n # lattice rescoring\n best_path = hypothesis_lattice.rescore(rescoring_method)\n if best_path:\n with open(hypothesis, 'w') as outfile:\n best_path.write(outfile.getStream(), utterance_id, utterance_id, 0.0, False, True, True)\n\n\nrescore(args.phonetic_symbol_set, args.pronunciation_lexicon, args.batch, args.hypotheses_file,\n args.hypotheses_format, args.insertion_penalty, args.insertion_penalty_file,\n args.acoustic_model_scale_factor, args.language_model_scale_factor, args.rescoring_method)","sub_path":"tools/lattice_editor/lattice_rescore.py","file_name":"lattice_rescore.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"35838602","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport cv2\nimport rospy\nimport numpy as np\nfrom xycar_msgs.msg import xycar_motor\nfrom stanley_follower import StanleyController\nfrom bump import Bumper\nfrom Stop import Stop\n\nimport time\n\nclass Xycar(object):\n\n def __init__(self) :\n self.rate = rospy.Rate(10)\n self.pub = rospy.Publisher('xycar_motor', xycar_motor, queue_size=1)\n self.msg = xycar_motor()\n\n self.stanley_follower = StanleyController()\n self.stop = Stop()\n self.bump = Bumper()\n \n \n def control(self) :\n \n self.stanley_follower.Control()\n self.stop.Detect()\n self.bump.BumpAction()\n\n # Tracking(Stanley Method)가 속도와 조향각의 기본 베이스로 들어간다.\n self.msg.speed = self.stanley_follower.speed\n self.msg.angle = self.stanley_follower.angle\n \n # 방지턱이 없는 경우 bump_speed는 0으로 아무런 영향이 없고,\n # Imu센서를 통해 방지턱을 감지한 경우에 bump_speed가 15와 -25 등으로 바뀌며 속도에 영향을 주고, 조향각은 0이 된다.\n self.msg.speed += self.bump.bump_speed\n if self.bump.bump_speed != 0:\n self.msg.angle = 0\n \n # 정지선을 보면 2초 간 정지\n if self.stop.check == True :\n print(\"정지선\")\n time.sleep(2)\n self.msg.speed = 20\n self.msg.angle = 0\n self.pub.publish(self.msg)\n self.stop.check = False\n\n\n self.pub.publish(self.msg)\n \n self.rate.sleep()\n","sub_path":"final/src/xycar_lidar_test.py","file_name":"xycar_lidar_test.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"416290670","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/pygp/demo/demo_gpr_shiftx.py\n# Compiled at: 2013-04-10 06:45:39\n__doc__ = '\\nApplication Example of GP regression\\n====================================\\n\\nThis Example shows the Squared Exponential CF\\n(:py:class:`covar.se.SEARDCF`) preprocessed by shiftCF(:py:class`covar.combinators.ShiftCF) and combined with noise\\n:py:class:`covar.noise.NoiseISOCF` by summing them up\\n(using :py:class:`covar.combinators.SumCF`).\\n'\nfrom pygp.covar import se, noise, combinators\nfrom pygp.priors import lnpriors\nfrom pygp.plot import gpr_plot\nimport logging as LG, numpy.random as random, pylab as PL, scipy as SP\nfrom pygp.gp.gp_base import GP\nfrom pygp.optimize.optimize_base import opt_hyper\n\ndef run_demo():\n LG.basicConfig(level=LG.INFO)\n PL.figure()\n random.seed(1)\n n_replicates = 4\n xmin = 1\n xmax = 2.5 * SP.pi\n x1_time_steps = 10\n x2_time_steps = 20\n x1 = SP.zeros(x1_time_steps * n_replicates)\n x2 = SP.zeros(x2_time_steps * n_replicates)\n for i in xrange(n_replicates):\n x1[(i * x1_time_steps):((i + 1) * x1_time_steps)] = SP.linspace(xmin, xmax, x1_time_steps)\n x2[(i * x2_time_steps):((i + 1) * x2_time_steps)] = SP.linspace(xmin, xmax, x2_time_steps)\n\n C = 2\n sigma1 = 0.15\n sigma2 = 0.15\n n_noises = 1\n b = 0\n y1 = b * x1 + C + 1 * SP.sin(x1)\n y1 += sigma1 * random.randn(y1.shape[0])\n y1 -= y1.mean()\n y2 = b * x2 + C + 1 * SP.sin(x2)\n y2 += sigma2 * random.randn(y2.shape[0])\n y2 -= y2.mean()\n for i in xrange(n_replicates):\n x1[i * x1_time_steps:(i + 1) * x1_time_steps] += 0.7 + i / 2.0\n x2[i * x2_time_steps:(i + 1) * x2_time_steps] -= 0.7 + i / 2.0\n\n x1 = x1[:, SP.newaxis]\n x2 = x2[:, SP.newaxis]\n x = SP.concatenate((x1, x2), axis=0)\n y = SP.concatenate((y1, y2), axis=0)\n X = SP.linspace(xmin - n_replicates, xmax + n_replicates, 100 * n_replicates)[:, SP.newaxis]\n dim = 1\n replicate_indices = []\n for i, xi in enumerate((x1, x2)):\n for rep in SP.arange(i * n_replicates, (i + 1) * n_replicates):\n replicate_indices.extend(SP.repeat(rep, len(xi) / n_replicates))\n\n replicate_indices = SP.array(replicate_indices)\n n_replicates = len(SP.unique(replicate_indices))\n logthetaCOVAR = [\n 1, 1]\n logthetaCOVAR.extend(SP.repeat(SP.exp(1), n_replicates))\n logthetaCOVAR.extend([sigma1])\n logthetaCOVAR = SP.log(logthetaCOVAR)\n hyperparams = {'covar': logthetaCOVAR}\n SECF = se.SqexpCFARD(dim)\n noiseCF = noise.NoiseCFISO()\n shiftCF = combinators.ShiftCF(SECF, replicate_indices)\n CovFun = combinators.SumCF((shiftCF, noiseCF))\n covar_priors = []\n covar_priors.append([lnpriors.lnGammaExp, [1, 2]])\n for i in range(dim):\n covar_priors.append([lnpriors.lnGammaExp, [1, 1]])\n\n for i in range(n_replicates):\n covar_priors.append([lnpriors.lnGauss, [0, 0.5]])\n\n for i in range(n_noises):\n covar_priors.append([lnpriors.lnGammaExp, [1, 1]])\n\n covar_priors = SP.array(covar_priors)\n priors = {'covar': covar_priors}\n Ifilter = {'covar': SP.ones(n_replicates + 3)}\n gpr = GP(CovFun, x=x, y=y)\n opt_model_params = opt_hyper(gpr, hyperparams, priors=priors, gradcheck=True, Ifilter=Ifilter)[0]\n M, S_glu = gpr.predict(opt_model_params, X)\n T = opt_model_params['covar'][2:2 + n_replicates]\n PL.subplot(212)\n gpr_plot.plot_sausage(X, M, SP.sqrt(S_glu), format_line=dict(alpha=1, color='g', lw=2, ls='-'))\n gpr_plot.plot_training_data(x, y, shift=T, replicate_indices=replicate_indices, draw_arrows=2)\n PL.suptitle('Example for GPTimeShift with simulated data', fontsize=23)\n PL.title('Regression including time shift')\n PL.xlabel('x')\n PL.ylabel('y')\n ylim = PL.ylim()\n gpr = GP(combinators.SumCF((SECF, noiseCF)), x=x, y=y)\n priors = {'covar': covar_priors[[0, 1, -1]]}\n hyperparams = {'covar': logthetaCOVAR[[0, 1, -1]]}\n opt_model_params = opt_hyper(gpr, hyperparams, priors=priors, gradcheck=True)[0]\n PL.subplot(211)\n M, S_glu = gpr.predict(opt_model_params, X)\n gpr_plot.plot_sausage(X, M, SP.sqrt(S_glu), format_line=dict(alpha=1, color='g', lw=2, ls='-'))\n gpr_plot.plot_training_data(x, y, replicate_indices=replicate_indices)\n PL.title('Regression without time shift')\n PL.xlabel('x')\n PL.ylabel('y')\n PL.ylim(ylim)\n PL.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.8, wspace=0.4, hspace=0.4)\n PL.show()\n\n\nif __name__ == '__main__':\n run_demo()","sub_path":"pycfiles/pygpar-1.0.tar/demo_gpr_shiftx.py","file_name":"demo_gpr_shiftx.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"322966492","text":"\"\"\"\nmodel_grid_utils.py\n(C) Zachary R. Claytor\nInstitute for Astronomy\nUniversity of Hawaiʻi\n2019 July 1\n\nPython utilities designed to interact with output model grids from the Yale\nRotating stellar Evolution Code (YREC, Demarque et al. 2008). The grid of\nmodels that accompany this software were produced by van Saders & \nPinsonneault (2013) and updated by Claytor et al. (2019, in prep) using Castelli & \nKurucz (2004) model atmospheres, which tabulate opacity information for various\nmetallicities and allow for alpha-enhancement. The rotation code within YREC\nassumes a stellar-wind-driven braking law, and specific details on braking\nparameters can be found in the file `braking_law_parameters.txt`.\n\nThese models span the following mass, metallicity, and alpha-enhancement\n(grid parameters can be changed in `config.py`):\n\n | min | max | step\n -----------------------------\n M/Msun | 0.3 | 2.0 | 0.01\n [M/H] | -1.0 | 0.5 | 0.5\n [alpha/M] | 0.0 | 0.4 | 0.4\n -----------------------------\n\nAccording to Castelli & Kurucz (2004), [M/H] is log10(Z/Zsun), and \nalpha includes O, Ne, Mg, Si, S, Ar, Ca, and Ti.\n\nThe YREC output files have the format `met_###_alpha_###.out`, where ###\nspecifies the metallicity and alpha-enhancement for the models therein. The\nformatting is such that met_-050_alpha_040.out corresponds to the set of tracks\nwith [M/H] = -0.5 and [alpha/M] = 0.4. Any given `.out` file contains an\nevolution track for every mass in the grid. More details on these `.out` files\ncan be found in the files themselves.\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\n\nfrom .config import mass_grid, met_grid, alpha_grid\nfrom .config import model_path, column_labels\nfrom .config import eep_path, primary_eep_indices\n\n\ndef pickle_tracks(modelfile, columnfile=model_path+\"column_labels.txt\"):\n \"\"\"\n Takes YREC output files for a set of evolution tracks and converts\n them to pandas DataFrames, then saves them to pickles in the same\n directory as the model files.\n Each YREC output file will yield a set of pickle files, one for each\n evolutionary track contained in the model file.\n\n PARAMETERS\n ----------\n `modelfile`: a string containing the full path to the desired model file\n\n `columnfile`: a string containing the full path to the file listing the\n column names to be assigned in the output DataFrame. Each\n line in this file should contain an single label, and each\n column in the output file must have a corresponding label.\n Any columns that the user does not wish to be saved in the \n pickled track should have a `#` somewhere on the label line\n in the columnfile.\n\n RETURNS nothing.\n \"\"\"\n\n with open(columnfile, \"r\") as cf:\n # Read column labels, but we will use only labels with no '#'.\n column_labels = np.asarray([line.strip() for line in cf.readlines()])\n masked = np.asarray([\"#\" in label for label in column_labels])\n\n with open(modelfile, \"r\") as f:\n header = f.readline()\n # Header format: ' NUMBER OF TRACKS XYZ ...'\n # Each file contains `ntracks` evolutionary tracks, each with\n # `nsteps` steps\n ntracks = int(header[18:21])\n nsteps = np.zeros(ntracks, dtype=int)\n # initial mass and period also specified in preamble\n mass_init = np.zeros(ntracks, dtype=float)\n period_init = np.zeros(ntracks, dtype=float)\n\n # read preamble. first column is an unnecessary index.\n for i in range(ntracks):\n line = f.readline().split()\n _, nsteps[i], mass_init[i], period_init[i] = line\n\n \n # read the column label line, but don't use it\n dummy_labels = f.readline()\n\n # begin reading tracks\n for i in range(ntracks):\n # put together output filename\n mass_str = \"_mass_%s.pkl\" %_to_string(mass_init[i])\n out_fname = modelfile.replace(\".out\", mass_str)\n\n track = np.zeros((nsteps[i], len(column_labels)))\n for j in range(nsteps[i]):\n track[j] = f.readline().split()\n\n # put track into DataFrame, leaving out unwanted columns, then save.\n df_track = pd.DataFrame(track[:,~masked], columns=column_labels[~masked])\n df_track.to_pickle(out_fname)\n\n\ndef get_full_track(mass, met, alpha, labels=None, \n read_path=model_path, return_fname=False):\n \"\"\"\n Obtains the desired stellar evolutionary track from the corresponding\n pickle file\n\n PARAMETERS\n ----------\n `mass`: (float) the mass of the star on the desired track, \n in solar mass units\n\n `met`: (float) the metallicity ([M/H]) of the star on the desired track\n\n `alpha`: (float) the alpha-enhancement ([alpha/M]) of the star on \n the desired track\n\n `labels`: (list of str) the column labels for desired stellar parameters.\n Default is None, which returns all parameters.\n\n `models_path`: a string containing the path to the directory containing \n the model pickle files. Default is \"models/\".\n\n `return_fname`: if True, returns the name of the file being read. This\n is mostly for convenience when converting to Equivalent-\n Evolutionary- Point- (EEP) based tracks, where the filename\n is different only by the \"eep\" prefix. Default is False.\n\n RETURNS\n -------\n `track`: a pandas DataFrame containing the specified evolutionary track\n\n `fname`: (optional) the name of the file containing the desired track\n \"\"\"\n \n # convert input values to strings as they appear in filenames\n mass_str = _to_string(mass)\n met_str = _to_string(met)\n alpha_str = _to_string(alpha)\n\n fname = \"met_%s_alpha_%s_mass_%s.pkl\" %(met_str, alpha_str, mass_str)\n track = pd.read_pickle(read_path+fname)\n if labels is not None:\n track = track[labels]\n\n if return_fname:\n return track, fname\n return track\n\n\ndef _to_string(val):\n \"\"\"\n Converts a given float (`val`) of mass, metallicity, or alpha \n enhancement to a string (`my_str`) formatted for the model filename.\n For example, a metallicity [M/H] = -0.5 corresponds to the string \n \"-050\", and the mass 1.32 corresponds to the string \"132\".\n \"\"\"\n if val < 0:\n my_str = \"-\"\n else:\n my_str = \"\"\n\n my_str += \"%03.f\" %abs(100*val)\n return my_str\n \n\ndef _pickle_series(save_path=model_path):\n \"\"\"\n Pickles all the models in the grid with specified metallicities\n and alpha enhancements.\n \"\"\"\n metallicities = [_to_string(met) for met in met_grid]\n alphas = [_to_string(alf) for alf in alpha_grid]\n\n n_total = len(metallicities)*len(alphas)\n with tqdm(total=n_total) as pbar:\n for met in metallicities:\n for alf in alphas:\n fname = save_path + \"met_%s_alpha_%s.out\" %(met, alf)\n pickle_tracks(fname)\n pbar.update()\n\n\ndef _pickle_pool(save_path=model_path):\n \"\"\"\n Pickles all the models in the grid with specified metallicities\n and alpha enhancements.\n \"\"\"\n metallicities = [_to_string(met) for met in met_grid]\n alphas = [_to_string(alf) for alf in alpha_grid]\n\n fnames = []\n for met in metallicities:\n for alf in alphas:\n fnames.append(save_path + \"met_%s_alpha_%s.out\" % (met, alf))\n\n print(\"Pickling evolution tracks...\")\n with Pool() as pool:\n with tqdm(total=len(fnames)) as pbar:\n for i, _ in enumerate(pool.imap_unordered(pickle_tracks, fnames)):\n pbar.update()\n\n\ndef pickle_all_tracks(use_pool=False):\n \"\"\"\n Wrapper for functions to pickle evolution tracks.\n Allows user to pickle in series or in parallel using multiprocessing.Pool.\n \"\"\"\n if use_pool:\n _pickle_pool()\n else:\n _pickle_series()\n\n\ndef get_eep_track(mass, met, alpha, labels=\"all\",\n re_index=None):\n \"\"\"\n Given mass, metallicity, and alpha-enhancement, we read and return an\n Equivalent-Evolutionary-Phase- (EEP) based track from file with desired\n column labels.\n\n User can optionally reindex the EEP-based track. Indices outside\n the current range will be set to NaN.\n \"\"\"\n met_str = _to_string(met)\n alpha_str = _to_string(alpha)\n mass_str = _to_string(mass)\n fname = \"eep_met_%s_alpha_%s_mass_%s.pkl\" %(met_str, alpha_str, mass_str)\n\n # eep_path is defined in eep_config.py\n try:\n if labels == \"all\":\n eep_track = pd.read_pickle(eep_path+fname)\n else:\n eep_track = pd.read_pickle(eep_path+fname)[labels]\n if re_index is not None:\n eep_track = eep_track.reindex(range(re_index))\n return eep_track\n except FileNotFoundError:\n return np.nan\n\n\ndef _import_model_grid(labels=\"all\"):\n \"\"\"Gets the grid of all EEP-based evolution tracks; return as nested list.\n \"\"\"\n\n re_index = primary_eep_indices[-1]+1\n grid_tracks = [[[get_eep_track(m, z, a, labels, re_index=re_index) \n for a in alpha_grid]\n for z in met_grid]\n for m in mass_grid]\n return grid_tracks\n\n\nif __name__ == \"__main__\":\n pickle_all_tracks(use_pool=True)\n","sub_path":"kiauhoku/model_grid_utils.py","file_name":"model_grid_utils.py","file_ext":"py","file_size_in_byte":9437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"579595924","text":"from utils.custom_exceptions import UserAlreadyExist\nfrom datetime import datetime\n\nclass SampleUserService:\n \"\"\" Business Layer\n\n Attributes:\n sample_user_dao: TestUserDao 클래스\n\n Author: 홍길동\n\n History:\n 2020-20-20(홍길동): 초기 생성\n 2020-20-21(홍길동): 1차 수정\n 2020-20-22(홍길동): 2차 수정\n \"\"\"\n\n def __init__(self, sample_user_dao):\n self.sample_user_dao = sample_user_dao\n\n def get_sample_user_service(self, connection, data):\n \"\"\"해당 아이디를 가진 유저를 검색 함수\n\n Args:\n connection: 데이터베이스 연결 객체\n data : View 에서 넘겨받은 dict 객체\n\n Author: 홍길동\n\n Returns:\n return [{'id': 12, 'name': '홍길동', 'gender': '남자', 'age': '18'}]\n\n Raises:\n 400, {'message': 'key error', 'errorMessage': 'key_error'}: 잘못 입력된 키값\n\n History:\n 2020-20-20(홍길동): 초기 생성\n 2020-20-21(홍길동): 1차 수정\n 2020-20-22(홍길동): 2차 수정\n \"\"\"\n\n try:\n user_id = data['user_id']\n return self.sample_user_dao.get_dao(connection, user_id)\n\n except KeyError:\n raise KeyError('key_error')\n\n def post_sample_user_service(self, connection, data):\n \"\"\"POST 메소드: 유저생성\n\n Args:\n connection: 데이터베이스 연결 객체\n data : View 에서 넘겨받은 dict 객체\n\n Author: 홍길동\n\n Returns:\n return (): 빈값 반환\n\n Raises:\n 400, {'message': 'key error', 'errorMessage': 'key_error'} : 잘못 입력된 키값\n 400, {'message': 'user already exist', 'errorMessage': 'already_exist'}: 중복 유저 존재\n\n History:\n 2020-20-20(홍길동): 초기 생성\n 2020-20-21(홍길동): 1차 수정\n 2020-20-22(홍길동): 2차 수정\n \"\"\"\n\n try:\n # 중복검사\n username = self.sample_user_dao.get_username(connection, data)\n\n if username:\n raise UserAlreadyExist('already_exist')\n\n return self.sample_user_dao.post_dao(connection, data)\n\n except KeyError:\n raise KeyError('key_error')\n\n def patch_sample_user_service(self, connection, data):\n \"\"\"PATCH 메소드: 유저 정보 수정\n\n Args:\n connection: 데이터베이스 연결 객체\n data : front 에서 넘겨받은 json 객체\n\n Author: 홍길동\n\n Returns:\n return (): 빈값 반환\n\n Raises:\n 400, {'message': 'key error', 'errorMessage': 'key_error'}: 잘못 입력된 키값\n\n History:\n 2020-20-20(홍길동): 초기 생성\n 2020-20-21(홍길동): 1차 수정\n 2020-20-22(홍길동): 2차 수정\n \"\"\"\n\n try:\n return self.sample_user_dao.patch_dao(connection, data)\n\n except KeyError:\n raise KeyError('key_error')\n","sub_path":"service/sample_user_service.py","file_name":"sample_user_service.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"182063530","text":"from math import exp\n\n\ndef simple_backprop(x, y, z):\n \"\"\"Performs a simple backprop using f(x,y,z) = (x+y)z\"\"\"\n # f(x,y,z) = (x+y)z\n\n # forward pass\n q = x + y\n f = q * z\n\n # backward pass\n dfdz = q\n dfdq = z\n dqdx = 1.0\n dqdy = 1.0\n dfdx = dfdq * dqdx\n dfdy = dfdq * dqdy\n\n return dfdz, dfdx, dfdy\n\n\ndef sigmoid_backpropagation(w, x):\n \"\"\"Performs a simple backpropagation for a 2-dimensional neuron using the sigmoid function\n f(w,x) = 1 / (1 + e ^ (-w0x0+w1x1+x2)\"\"\"\n # forward\n dot = w[0] * x[0] + w[1] * x[1] + w[2]\n f = 1 / (1 + exp(-dot))\n\n # backward\n ddot = (1 - f) * f\n dw0 = ddot * x[0]\n dw1 = ddot * x[1]\n dw2 = ddot\n dx0 = ddot * w[0]\n dx1 = ddot * w[1]\n return [dw0, dw1, dw2], [dx0, dx1]\n\n\nprint(simple_backprop(-2.0, 5.0, -4.0))\nprint(sigmoid_backpropagation([2, -3, -3], [-1, -2]))\n","sub_path":"backprop.py","file_name":"backprop.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278954259","text":"#coding: utf-8\n\n#######################################\n# TESTING PURPOSE ONLY MODELS!! #\n# DO NOT ADD THE APP TO INSTALLED_APPS#\n#######################################\n\nfrom django.db import models\n\nGENDER_CH = [('M', 'male'), ('F', 'female')]\n\nclass Person(models.Model):\n gender = models.CharField(max_length=1, choices=GENDER_CH)\n happy = models.BooleanField(default=True)\n name = models.CharField(max_length=30)\n age = models.IntegerField()\n bio = models.TextField()\n birthday = models.DateField()\n appointment = models.DateTimeField()\n\n #backward compatibilty with Django 1.1\n try:\n wanted_games_qtd = models.BigIntegerField()\n except AttributeError:\n wanted_games_qtd = models.IntegerField()\n\nclass Dog(models.Model):\n owner = models.ForeignKey('Person')\n breed = models.CharField(max_length=50)\n\nclass DummyIntModel(models.Model):\n int_field = models.IntegerField()\n small_int_field = models.SmallIntegerField()\n\n #backward compatibilty with Django 1.1\n try:\n big_int_field = models.BigIntegerField()\n except AttributeError:\n big_int_field = models.IntegerField()\n\nclass DummyPositiveIntModel(models.Model):\n positive_small_int_field = models.PositiveSmallIntegerField()\n positive_int_field = models.PositiveIntegerField()\n\nclass DummyNumbersModel(models.Model):\n float_field = models.FloatField(null=True)\n\nclass DummyDecimalModel(models.Model):\n decimal_field = models.DecimalField(max_digits=5, decimal_places=2)\n","sub_path":"model_mommy/tests/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448605091","text":"# !/usr/bin/env python\n# -*- coding: utf8 -*-\n# need ChromeDriver v2.37 for Chrome 64\nimport os , re\nimport time\nimport chromedriver_binary # Adds chromedriver binary to path 这就可以自动打开网页了\nfrom selenium import webdriver\ndef get_title_year_yinyong(pmid):\n driver = webdriver.Chrome()\n driver.get('http://xueshu.baidu.com/')\n time.sleep(3)\n # 元素定位的方法\n # 找到“百度一下”\n driver.find_element_by_id(\"kw\").send_keys(pmid)\n # 点击百度一下\n driver.find_element_by_id(\"su\").click()\n # 页面最大化\n #driver.maximize_window()\n # 进行页面截屏\n #driver.save_screenshot(\"./baidu.png\")\n # driver 获取html字符串\n #print(driver.page_source) # 内容\n #print(driver.current_url) # 网址\n # 打开第一个链接\n links = driver.find_elements_by_xpath(\"//h3[@class='t c_font']\") # 使用xpath查找页面中的h3元素\n for link in links:\n if link.find_element_by_xpath(\"a\"):\n t = link.find_element_by_xpath(\"a\")\n time.sleep(1)\n t.click()\n break\n # 移动句柄为当前页面\n handles = driver.window_handles\n # 切换句柄切换到新打开的窗口\n driver.switch_to.window(handles[1])\n datasourse = driver.page_source.split(u'') # 按这个标志分组\n #com_ru_all = re.compile(r'publish_text.*paper_source',re.S)\n #content = com_ru_all.search(datasourse[1])\n allcontent = re.compile(r'title=\\\"\\《(.*)\\》\\\".*?(\\d+)<\\/span>.*?sc_cited\\'\\}\\\">.*?(\\d+).*?paper_src_wr',re.S)\n zazhititle =allcontent.search(datasourse[1]).group(1).lstrip()\n year= allcontent.search(datasourse[1]).group(2)\n yinyong = allcontent.search(datasourse[1]).group(3)\n # 退出浏览器\n time.sleep(3)\n driver.quit()\n return zazhititle,year,yinyong\n\n#print(zazhititle,\"\\n\",year,\"\\n\",yinyong)\n\n#compile_rule = re.compile(r'被引量:\\xa0\\n.*(\\d+).*\\n',re.S)\n#yinyong = compile_rule.search(datasourse[1]).group(1) # python中括号捕获,group(1) 相当于Perl中$1\n\"\"\"这样可以获得引用量,比较难匹配的是被引量:\\xa0\\n,用findall先全查看了,才知道后面的:  识别为\\xa0 \"\"\"\n#print(yinyong)\n#link.click()\n\n\n","sub_path":"bioinformation/serach_by_baiduxueshu.py","file_name":"serach_by_baiduxueshu.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214813940","text":"#!/usr/bin/python3\n\nimport signal\nimport time\nimport sys\nfrom sk import ExchangeObject, DistributedMonitor\nimport random\n\ndef ctrl_c(signal, frame):\n print('Stoping...')\n ds.stop_zmq()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, ctrl_c)\n\ntoken = sys.argv[1]\nif token == \"1\":\n token = True\nelse:\n token = False\nmy_id = sys.argv[2]\nworkers = sys.argv[2:]\n\nds = DistributedMonitor(my_id, token, workers)\nds.run_zmq()\n\n\ni = 1 # product counter\nwhile True:\n prod = ds.acquire()\n if not prod:\n prod = [0]\n else:\n tmp = prod[-1] + 1\n prod.append(tmp)\n\n print(\"Produces...\", prod)\n i+=1\n time.sleep(random.randint(1,3))\n ds.release(prod)\n\n if (i >= 20):\n \tprint(\"Made:\", i)\n \tds.stop_zmq()\n \tsys.exit(0)\n\n time.sleep(random.randint(1,3))\n","sub_path":"Experiment_9_TokenBased/Product.py","file_name":"Product.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569147040","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 17 09:36:51 2019\r\n\r\n@author: FartherSkies\r\n\"\"\"\r\n\r\n# Polynomial Regression\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv('Position_Salaries.csv')\r\nX = dataset.iloc[:, 1:2].values\r\ny = dataset.iloc[:, 2].values\r\n\r\n# Splitting the dataset into the Training set and Test set\r\n\"\"\"from sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\"\"\"\r\n\r\n# Feature Scaling\r\n\"\"\"from sklearn.preprocessing import StandardScaler\r\nsc_X = StandardScaler()\r\nX_train = sc_X.fit_transform(X_train)\r\nX_test = sc_X.transform(X_test)\"\"\"\r\n\r\n# Fitting Linear Regression to the dataset\r\nfrom sklearn.linear_model import LinearRegression\r\nlin_reg = LinearRegression()\r\nlin_reg.fit(X, y)\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures (degree=4)\r\nX_poly = poly_reg.fit_transform (X)\r\n\r\nlin_reg_2 = LinearRegression()\r\nlin_reg_2.fit(X_poly, y)\r\n\r\nplt.scatter (X, y, color='green')\r\nplt.plot (X, lin_reg.predict(X), color='red')\r\nplt.plot (X, lin_reg_2.predict(X_poly), color='blue')\r\n\r\nplt.xlabel ('Position Level')\r\nplt.ylabel ('Salary')\r\nplt.show()\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\nprint ('Linear: '+ str (mean_squared_error(y, lin_reg.predict(X))) )\r\nprint ('Poly: '+ str (mean_squared_error(y, lin_reg_2.predict(X_poly))) )\r\n\r\nprint (str (mean_squared_error(y, lin_reg.predict(X) >\r\n mean_squared_error(y, lin_reg_2.predict(X_poly)))))\r\n\r\n'''final prediction'''\r\n\r\nyears = [[6.5]]\r\n\r\nlin_reg.predict (years)\r\nlin_reg_2.predict (poly_reg.fit_transform(years))","sub_path":"Udemy/Machine Leartning A-Z/Polynomial/my_polynomial_regression.py","file_name":"my_polynomial_regression.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"83208918","text":"from math import sqrt\n\ndef div_propri(n):\n l=[]\n for d in range(2,int(sqrt(n))+1): #considera i numeri da 2(saltando 1 perche voglio solo i divisori propri) fino a rad(n) compresa\n if n%d==0:\n l.append(d)\n if d**2!=n:\n l.append(int(n/d))\n return l #con return chiudo la funzione\n\ndef n_primi(ls):\n for n in ls[:]:\n l=div_propri(n) #eseguo la funzione divisori_propri precedentemente definita\n if l!=[]:\n ls.remove(n)\n return ls\n\ndef modi(ls,k):\n num_primi=n_primi(ls[:]) #nuova lista sulla quale agire corrispondente a quella ottenuta nella f.ne citata\n for n in ls[:]: #copia di lista per non modificare la lista di partenza\n l=div_propri(n) \n if len(l)!=k:\n ls.remove(n)\n return num_primi\n\n","sub_path":"students/1793487/homework01/program01.py","file_name":"program01.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"261174975","text":"from datetime import date, timedelta\n\nbegin = date(year=2021, month=10, day=1)\nend = date(year=2021, month=10, day=10)\nfile_path_list = list()\nfile_path_f = \"s3://mcc-operation-logs/operation-log-{}/\"\n\ncurrent = begin\nwhile current <= end:\n file_path_list.append(file_path_f.format(current))\n current = current + timedelta(days=1)\n\nprint(file_path_list)\n\n","sub_path":"pyspark/temp_new.py","file_name":"temp_new.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148095815","text":"import math\n\n# Calculates the distance in meters and the angle in degree from the current location to a given location\nclass RelativPositionCalculator(object):\n\n def __init__(self, current_latitude, current_longitude):\n self.current_latitude = current_latitude\n self.current_longitude = current_longitude\n\n def calculate_relative_position(self, target_longitude, target_latitude):\n longitude = self.current_longitude\n latitude = self.current_latitude\n dlong = (target_longitude-longitude) / 180 * math.pi\n lat1rad= latitude / 180 * math.pi\n lat2rad= target_latitude / 180 * math.pi\n \n targetDistance = 6378.388 * math.acos(math.sin(latitude / 180 * math.pi) * math.sin(target_latitude / 180 * math.pi) + math.cos(latitude / 180 * math.pi) * math.cos(target_latitude / 180 * math.pi) * math.cos(target_longitude / 180 * math.pi - longitude / 180 * math.pi))\n \n y = math.sin(dlong) * math.cos(lat2rad)\n x = math.cos(lat1rad)* math.sin(lat2rad) - math.sin(lat1rad)* math.cos(lat2rad) * math.cos(dlong)\n\n targetAngle = math.atan2(y, x) * 180 / math.pi\n \n if targetAngle < 0:\n \ttargetAngle = (180 + (180 + targetAngle))\n \n bearing = int(targetAngle)\n target_distance_meters = int(targetDistance * 1000)\n\n return RelativePosition(target_distance_meters, bearing)\n \nclass RelativePosition(object):\n def __init__(self, distance, bearing):\n self.distance = distance\n self.bearing = bearing\n\n# Calculates the x and y pixel position of a given geo coordinate. \n# It needs two known reference points for that.\nclass GeoPositionToPixelConverter(object):\n def __init__(self, ref_point1, ref_point2):\n\n x_offset = min(ref_point1.x, ref_point2.x)\n y_offset = min(ref_point1.y, ref_point2.y)\n\n south = math.radians(min(ref_point1.latitude, ref_point2.latitude))\n north = math.radians(max(ref_point1.latitude, ref_point2.latitude))\n west = math.radians(min(ref_point1.longitude, ref_point2.longitude))\n east = math.radians(max(ref_point1.longitude, ref_point2.longitude))\n\n height = abs(ref_point1.y - ref_point2.y)\n width = abs(ref_point1.x - ref_point2.x)\n\n ymin = self.mercat(south)\n ymax = self.mercat(north)\n\n xFactor = width/(east-west)\n yFactor = height/(ymax - ymin)\n\n self.xFactor = xFactor\n self.yFactor = yFactor\n self.west = west\n self.x_offset = x_offset\n self.y_offset = y_offset\n self.ymax = ymax\n\n # mercator projection\n def mercat(sef, lat):\n return math.log(math.tan(lat/2 + math.pi/4))\n\n def convert(self, latitude, longitude):\n \n latitude_rad = math.radians(latitude)\n longitude_rad = math.radians(longitude)\n\n targetX = (longitude_rad - self.west) * self.xFactor + self.x_offset\n targetY = (self.ymax - self.mercat(latitude_rad)) * self.yFactor + self.y_offset\n\n return Point(targetX, targetY)\n\n\nclass ReferencePoint(object):\n def __init__(self, x, y, latitude, longitude):\n self.x = x\n self.y = y\n self.longitude = longitude\n self.latitude = latitude\n\nclass Point(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\n","sub_path":"positioning.py","file_name":"positioning.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512220888","text":"import os\nimport subprocess\nimport sys\n\ndef main():\n if len(sys.argv) != 2:\n print('Please specify directory')\n else:\n startDir = sys.argv[1]\n workingDir = '%s/Pictures' % startDir\n #arr = os.listdir(workingDir)\n #print(arr)\n \n for file in os.listdir(workingDir):\n if file.endswith(\".png\"):\n #pngFile = os.path.join(workingDir, file)\n subprocess.call(['magick',file,'-trim','+repage',file], cwd=workingDir)\n #scriptName = 'resize-%s.bat' % startDir\n #subprocess.call([scriptName], cwd=startDir)\n \n srcDir='tale-string-searching-part1/Pictures'\n dstDir='tale-string-searching-part1'\n \n dd = dict([('abab-automaton.png', 440),\n ('aabab-automaton.png',500),\n ('prefix-functions1.png',500),\n ('prefix-functions2.png',500)])\n \n for key in dd:\n subprocess.call(['magick','convert', '%s/%s' % (srcDir,key), '-resize', '%d' % dd[key], '%s/%s' % (dstDir,key) ]) \n \n subprocess.call(['pandoc','-t','revealjs','-s',\n\t\t '-o','content.html','content.md','--slide-level=2',\n\t\t '-V','revealjs-url=../../reveal.js','--metadata', 'pagetitle=\"Uzdevumi\"',\n \t\t'--mathjax=https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML',\n\t\t '-V','theme=white'], cwd=startDir)\n\n\nif __name__=='__main__':\n main()\n\n","sub_path":"src/site/algorithms/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501917037","text":"import datetime\nimport logging\n\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\n\nfrom crawler.items import VideoItem\nfrom crawler.modules import url_helper, print_helper\nfrom crawler.modules.third_party_js import third_party_js\nfrom crawler.modules.video_player_validators import other_validators\nfrom crawler.modules.link_validator import link_validator\nfrom crawler.modules.video_player_validators.ooyala_validator import OoyalaValidator\nfrom crawler.modules.video_player_validators.vimeo_validator import VimeoValidator\n\n\nclass VideoPlayerSpider(scrapy.Spider):\n name = \"video_player_spider\"\n start_time = None\n end_time = None\n\n def __init__(self, *args, **kwargs):\n super(VideoPlayerSpider, self).__init__()\n self.start_time = datetime.datetime.now() # Keep track of start time\n assert kwargs and \"start_urls\" in kwargs, \"[start_urls] param must not be empty and separated by comma!\"\n self.start_urls = kwargs[\"start_urls\"].split(\",\") # maybe ast.literal_eval ?\n self.domain_size = {}\n self.allowed_domains = set()\n for url in self.start_urls:\n domain = url_helper.extract_domain(url)\n self.domain_size[domain] = 0\n self.allowed_domains.add(domain)\n\n self.max_depth = int(kwargs[\"max_depth\"]) if \"max_depth\" in kwargs else 0\n assert self.max_depth >= 0, \\\n \"[max_depth] param must be non-negative integer\"\n\n self.name = str(kwargs[\"name\"]) if \"name\" in kwargs else \"video_player_spider\"\n\n if \"stats_path\" in kwargs:\n self.stats_path = kwargs[\"stats_path\"]\n else:\n self.stats_path = \"\"\n self.total_size = 0\n\n self.crawled_count = 0\n print_helper.print_line_with_wrapper(\"Crawler %s initialized with MAX_DEPTH: %d START_URLS: %d\"\n % (self.name, self.max_depth, len(self.start_urls)), \"-\")\n\n def parse(self, response):\n url = response.url\n # print(url)\n response_domain = url_helper.extract_domain(url)\n\n self.crawled_count += 1\n self.total_size += 1\n self.domain_size[response_domain] += 1\n if self.crawled_count == 20:\n self.crawled_count = 0\n print(\"Spider %s - Crawled size : %d !\" % (self.name, self.total_size))\n\n youtube_player_count = other_validators.valid_youtube_players(response)\n brightcove_player_count = other_validators.valid_brightcove_players(response)\n\n vimeo_state = VimeoValidator().parse_response(response)\n ooyala_state = OoyalaValidator().parse_response(response)\n item = VideoItem()\n item[\"vendor\"] = {}\n item[\"player_type\"] = {}\n item[\"link\"] = url\n found_player = False\n\n # Begin video player validations\n if vimeo_state and vimeo_state.player_exist:\n found_player = True\n item[\"player_type\"][\"vimeo\"] = vimeo_state.player_type\n # item[\"video_ids\"] = vimeo_state.video_ids # We may not be interested in what are the exact video ids\n item[\"vendor\"][\"vimeo\"] = 0\n for tp in vimeo_state.player_type:\n item[\"vendor\"][\"vimeo\"] += vimeo_state.player_type[tp]\n print_helper.print_line_with_wrapper(\" Vimeo player found on link: \" + url + \" \", \"*\")\n\n if ooyala_state and ooyala_state.player_exist:\n found_player = True\n item[\"player_type\"][\"ooyala\"] = ooyala_state.player_type\n item[\"vendor\"][\"ooyala\"] = 0\n for tp in ooyala_state.player_type:\n item[\"vendor\"][\"ooyala\"] += ooyala_state.player_type[tp]\n print_helper.print_line_with_wrapper(\" Ooyala player found on link: \" + url + \" \", \"*\")\n\n if youtube_player_count:\n found_player = True\n item[\"vendor\"][\"youtube\"] = youtube_player_count\n # item[\"language\"] = \"en\" # place holder until we have a function to quickly determine the major lang\n print_helper.print_line_with_wrapper(\" YouTube player found on link: \" + url + \" \", \"*\")\n\n if brightcove_player_count:\n found_player = True\n item[\"vendor\"][\"brightcove\"] = brightcove_player_count\n print_helper.print_line_with_wrapper(\" Brightcove player found on link: \" + url + \" \", \"*\")\n\n if found_player:\n yield item\n\n # we do not use the allow_domain from LinkExtractor to keep consistency with our domain middleware\n links = LinkExtractor().extract_links(response)\n next_urls = set()\n for link in links:\n url = link.url\n if link_validator.is_valid_link(url):\n # and not link_validator.is_static_file(page):\n next_urls.add(url)\n\n scripts = set(response.xpath(\"//script//@src\").extract())\n for script in scripts:\n if not third_party_js.is_3rd_party_js_link(script):\n next_urls.add(script)\n\n for url in next_urls:\n request = scrapy.Request(response.urljoin(url), callback=self.parse)\n yield request\n\n def closed(self, reason):\n import json\n if self.stats_path != \"\":\n self.end_time = datetime.datetime.now()\n stats = {\n \"start_urls\": self.start_urls,\n \"domain_size\": self.domain_size,\n \"max_depth\": self.max_depth,\n \"name\": self.name,\n \"total_size\": self.total_size,\n \"total_time\": (self.end_time - self.start_time).total_seconds()\n }\n with open(self.stats_path, 'w') as outfile:\n json.dump(stats, outfile)\n print(\"Spider: %s write out to stats file: %s success!\" % (self.name, self.stats_path))\n","sub_path":"crawler/spiders/video_player_spider.py","file_name":"video_player_spider.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159281718","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\na=float(input(\"enter the tempreture you want to convert :\"))\nb=input(\" is it cْ degree ? yes/no :\")\nif b==\"yes\":\n print(a*1.8+32)\nelse:\n print((a-32)/1.8)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Week 1/Pracise1-c.py","file_name":"Pracise1-c.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"73204924","text":"import pandas as pd\nimport numpy as np\nimport os\n\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras import initializers\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\n\nfrom tensorflow.keras.losses import MSE\nfrom tensorflow.keras.activations import relu\n\n\ndef create_input_and_output_data_FCNN(df):\n Y =();\n ECFP = ();\n data = df\n \n for ind in data.index:\n if not data[\"ECFP\"][ind] == np.inf:\n x = list(data[\"ECFP\"][ind])\n x = np.array([int(z) for z in x])\n y = np.array([float(data[\"log10_Km\"][ind])])\n if (len(x) == 1024 and not pd.isnull(y[0])):\n ECFP = ECFP + (x,);\n Y = Y + (y,);\n\n ECFP = np.array(ECFP)\n Y = np.array(Y)\n return([ECFP,Y])\n\n\ndef build_model(learning_rate=0.001, decay =10e-6, momentum=0.9, l2_parameter= 0.1): \n model = models.Sequential()\n model.add(layers.Dense(units = 256,\n kernel_regularizer=regularizers.l2(l2_parameter),\n kernel_initializer = initializers.TruncatedNormal(\n mean=0.0, stddev= np.sqrt(2./ 1024), seed=None),\n activation='relu', input_shape=(1024,)))\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(units= 64,\n kernel_regularizer=regularizers.l2(l2_parameter),\n kernel_initializer = initializers.TruncatedNormal(\n mean=0.0, stddev = np.sqrt(2./ 256), seed=None),\n activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(units= 16,\n kernel_regularizer=regularizers.l2(l2_parameter),\n kernel_initializer = initializers.TruncatedNormal(\n mean=0.0, stddev = np.sqrt(2./ 128), seed=None),\n activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(1, kernel_regularizer=regularizers.l2(l2_parameter),\n kernel_initializer = initializers.TruncatedNormal(\n mean=0.0, stddev = np.sqrt(2./ 64), seed=None)))\n model.compile(optimizer=optimizers.SGD(lr=learning_rate, decay= decay, momentum=0.9, nesterov=True),\n loss='mse', metrics=['mse'])\n return model","sub_path":"notebooks_and_code/additional_code/build_FCNN_and_load_ECFPs.py","file_name":"build_FCNN_and_load_ECFPs.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249931080","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\n\n# Create your models here.\n\nclass CnUser(AbstractUser):\n birthday = models.DateField(verbose_name=\"出生日期\", null=True, blank=True)\n gender = models.CharField(choices=((\"male\", \"男\"), (\"female\", \"女\")), default=\"male\", max_length=6,verbose_name=\"性别\")\n phone = models.CharField(max_length=11, null=True, blank=True,verbose_name=\"手机号\")\n avatar = models.ImageField(upload_to=\"media/avatar\", default=\"media/avatar/default_user.png\", max_length=100, verbose_name = \"头像\")\n rank = models.CharField(max_length=10, verbose_name=\"会员等级\", default=\"青铜会员\")\n\n class Meta:\n verbose_name = \"用户详情\"\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.username\n\n def toDict(self):\n json = {}\n for f in self._meta.concrete_fields:\n if f.name==\"avatar\":\n continue\n json[f.name]=f.value_from_object(self)\n return json","sub_path":"apps/userAdmin/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106026340","text":"#!/usr/bin/env python3\n# sliding_window.py\n\nimport re\nimport sys\n\ndef sliding_window(k, string):\n '''Returns a list of all k-mers in the given string'''\n dna = []\n end = len(string) - k + 1\n for start in range(0, end):\n dna.append(string[start:start + k])\n return dna\n\ndef gc_content(dna):\n '''Returns [0,1], the fraction of GC's in the given string'''\n dna = dna.lower()\n\n #Count the number of g's and c's\n gc=0\n for nucleotide in dna:\n if nucelotide in ['g', 'c']:\n gc += 1\n return gc/len(dna)\n\n\nif __name__ == \"__main__\":\n\n #Check to make sure there are at least two arguments\n arg_count = len(sys.argv) - 1\n if arg_count < 2:\n raise Exception(\"This script requires 2 arguments: a kmer size and then a string\")\n\n k = int(sys.argv[1])\n string = sys.argv[2]\n\n dna = sys.argv[1]\n result =gc_content(dna)\n for i in range(len(dna)):\n print(\"{}\\t{:.2}\".format(dna, result))\n\n","sub_path":"module1/sliding_window.py","file_name":"sliding_window.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88465047","text":"# Disable some pylint warnings: no-self-use, missing-docstring\n# pylint: disable=R0201, C0111\n\nimport logging\nimport os\nimport pickle\nimport random\n\nfrom unittest import TestCase\nimport nose\n\nimport archinfo\nimport angr\nfrom angr.analyses.reaching_definitions import LiveDefinitions\n\nLOGGER = logging.getLogger('test_reachingdefinitions')\n\nTESTS_LOCATION = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n '..', '..', 'binaries', 'tests'\n)\n\n\nclass ReachingDefinitionAnalysisTest(TestCase):\n def _run_reaching_definition_analysis(self, project, func, result_path):\n tmp_kb = angr.KnowledgeBase(project)\n reaching_definition = project.analyses.ReachingDefinitions(\n func, init_func=True, kb=tmp_kb, observe_all=True\n )\n\n unsorted_result = map(\n lambda x: {'key': x[0],\\\n 'register_definitions': x[1].register_definitions,\\\n 'stack_definitions': x[1].stack_definitions,\\\n 'memory_definitions': x[1].memory_definitions},\n reaching_definition.observed_results.items()\n )\n result = list(sorted(\n unsorted_result,\n key=lambda x: x['key']\n ))\n\n with open(result_path, 'rb') as result_file:\n expected_result = pickle.load(result_file)\n\n nose.tools.assert_list_equal(result, expected_result)\n\n\n def test_reaching_definition_analysis(self):\n def _binary_path(binary_name):\n return os.path.join(TESTS_LOCATION, 'x86_64', binary_name)\n def _result_path(binary_name):\n return os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'reachingdefinitions_results',\n 'x86_64',\n binary_name + '.pickle'\n )\n\n binaries_and_results = list(map(\n lambda binary: (_binary_path(binary), _result_path(binary)),\n ['all', 'fauxware', 'loop']\n ))\n\n for binary, result_path in binaries_and_results:\n project = angr.Project(binary, load_options={'auto_load_libs': False})\n cfg = project.analyses.CFGFast()\n\n self._run_reaching_definition_analysis(project, cfg.kb.functions['main'], result_path)\n\n\nclass LiveDefinitionsTest(TestCase):\n def test_initializing_live_definitions_for_ppc_without_rtoc_value_should_raise_an_error(self):\n arch = archinfo.arch_ppc64.ArchPPC64()\n nose.tools.assert_raises(ValueError, LiveDefinitions, arch=arch, init_func=True)\n\n\n def test_initializing_live_definitions_for_ppc_with_rtoc_value(self):\n arch = archinfo.arch_ppc64.ArchPPC64()\n rtoc_value = random.randint(0, 0xffffffffffffffff)\n live_definition = LiveDefinitions(arch=arch, init_func=True, rtoc_value=rtoc_value)\n\n rtoc_offset = arch.registers['rtoc'][0]\n rtoc_definition = next(iter(\n live_definition.register_definitions.get_objects_by_offset(rtoc_offset)\n ))\n rtoc_definition_value = rtoc_definition.data.get_first_element()\n\n nose.tools.assert_equals(rtoc_definition_value, rtoc_value)\n\n\nif __name__ == '__main__':\n LOGGER.setLevel(logging.DEBUG)\n logging.getLogger('angr.analyses.reaching_definitions').setLevel(logging.DEBUG)\n\n nose.core.runmodule()\n","sub_path":"tests/test_reachingdefinitions.py","file_name":"test_reachingdefinitions.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"98299804","text":"#!bin/python3\n\nimport sys\nimport socket\nfrom datetime import datetime\n\n# define target\nif len(sys.argv) == 2:\n\n\t#transform hostname to IPv4\n\ttarget = socket.gethostbyname(sys.argv[1])\n\t\nelse:\n\tprint(\"Invalid amount of arguments.\")\n\tprint(\"Syntax: python3 scanner.py \")\n\t\n# create console drawing\nprint(\"-\" * 50)\nprint(\"Scanning target \" + target)\nprint(\"Time started: \" + str(datetime.now()))\nprint(\"-\" * 50)\n\ntry:\n\tfor port in range(50, 85):\n\n\t\t# socket.AF_INET is IPv4 and socket.SOCK_STREAM is port\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tsocket.setdefaulttimeout(1)\n\n\t\t# connect to our target and loop thro port range\n\t\tresult = s.connect_ex((target, port))\n\t\t\n\t\t# if port is open it will return 0\n\t\t# if port is not open it will return 1\n\t\tif result == 0:\n\t\t\tprint(\"Port {} is open\".format(port))\n\t\ts.close()\n\n# if you do ctrl+c exits from py script\t\t\nexcept KeyboardInterrupt:\n\tprint(\"\\nStopping script.\")\n\tsys.exit()\n\n# if you cannot get the host name\nexcept socket.gaierror:\n\tprint(\"Hostname could not be resolved.\")\n\tsys.exit()\n\n# if you cant connect to the IP target\nexcept socket.error:\n\tprint(\"Couldn't connect to server.\")\n\tsys.exit()\n","sub_path":"badportscanner.py","file_name":"badportscanner.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617351032","text":"class MagicDictionary:\n\n def __init__(self):\n self.myDict = {}\n\n def buildDict(self, dict):\n for st in dict:\n self.myDict[st] = 1\n\n def search(self, word):\n for i in range(len(word)):\n part1 = word[:i]\n part2 = word[i+1:]\n for char in \"abcdefghijklmnopqrstuvwxyz\":\n newWord = part1 + char + part2\n if newWord != word and newWord in self.myDict:\n return True\n return False","sub_path":"LC/676_Implement_Magic_Dictionary.py","file_name":"676_Implement_Magic_Dictionary.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"82915555","text":"import unittest\n\nfrom scraper_toolkit import exceptions\nfrom scraper_toolkit.components.PageFetcher import PageFetcher\nfrom scraper_toolkit.components.Parser import Parser\nfrom scraper_toolkit.components.Selector import Selector\n\nQUOTES_URL = 'http://quotes.toscrape.com/'\nBOOKS_URL = 'http://books.toscrape.com/'\n\nQUOTES_HTML = PageFetcher(QUOTES_URL).get_html()\n\n\nclass TestParser(unittest.TestCase):\n\n def setUp(self):\n self.quotes_parser = Parser(QUOTES_HTML)\n\n def test_get_quote_text_from_loaded_selector(self):\n self.quotes_parser.add_selector(selector='div.quote > span.text', name='quote_text', attribute='text')\n self.quotes_parser.parse()\n\n expected = '“The world as we have created it is a process of our thinking. ' \\\n 'It cannot be changed without changing our thinking.”'\n actual = self.quotes_parser.parsed[0]['quote_text']\n\n self.assertEqual(expected, actual)\n\n def test_get_fourth_quote_attributes_from_loaded_selectors(self):\n self.quotes_parser.add_selector(selector='div.quote > span.text', name='quote_text', attribute='text')\n self.quotes_parser.add_selector(selector='div.quote > span > small', name='author', attribute='text')\n self.quotes_parser.parse()\n\n expected_quote = '“The person, be it gentleman or lady, who has not pleasure in a good novel, ' \\\n 'must be intolerably stupid.”'\n expected_author = 'Jane Austen'\n\n actual_quote = self.quotes_parser.parsed[3]['quote_text']\n actual_author = self.quotes_parser.parsed[3]['author']\n\n self.assertEqual(expected_quote, actual_quote)\n self.assertEqual(expected_author, actual_author)\n\n def test_missing_parsed_attribute_raises_InvalidAttributeError(self):\n self.quotes_parser.add_selector(selector='div.quote > span.text', name='missing', attribute='missing_attr')\n\n with self.assertRaises(exceptions.InvalidAttributeError):\n self.quotes_parser.parse()\n\n def test_third_quote_has_all_five_tags(self):\n self.quotes_parser.add_selector(selector='div.quote > div > meta.keywords', name='tags', attribute='content',\n post_processing=lambda x: x.split(','))\n self.quotes_parser.parse()\n\n expected = ['inspirational', 'life', 'live', 'miracle', 'miracles']\n actual = self.quotes_parser.parsed[2]['tags']\n\n self.assertEqual(expected, actual)\n\n def test_add_selector_as_selector_object(self):\n tags_selector = Selector(selector_str='div.quote > div > meta.keywords', name='tags', attribute='content',\n post_processing=lambda x: x.split(','))\n self.quotes_parser.add_selector(tags_selector)\n self.quotes_parser.parse()\n\n expected = ['inspirational', 'life', 'live', 'miracle', 'miracles']\n actual = self.quotes_parser.parsed[2]['tags']\n\n self.assertEqual(expected, actual)\n","sub_path":"tests/Test_Parser.py","file_name":"Test_Parser.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"241954444","text":"# -*- encoding: utf-8 -*-\nclass Solution:\n #@param A and B: sorted integer array A and B.\n #@return: A new sorted integer array\n def mergeSortedArray(self, A, B):\n # write your code here\n if A is None and B is None:\n return None\n if A is None:\n return B\n if B is None:\n return A\n\n i = 0\n j = 0\n result = []\n while i < len(A) and j < len(B):\n if A[i] <= B[j]:\n result.append(A[i])\n i += 1\n else:\n result.append(B[j])\n j += 1\n if j >= len(B):\n result.extend(A[i:])\n if i >= len(A):\n result.extend(B[j:])\n return result","sub_path":"lintcode/Array/6 merge 2 sorted array.py","file_name":"6 merge 2 sorted array.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444207547","text":"import os\nimport sys\nimport inspect\nimport json\nimport pandas as pd\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, \"{}/src\".format(parentdir))\n\n\nclass MockIG:\n \"\"\"\n Mock broker interface class\n \"\"\"\n\n def __init__(self, mockFilepath, mockPricesFilepath):\n self.mockFilepath = mockFilepath\n self.mockPricesFilepath = mockPricesFilepath\n pass\n\n def get_market_info(self, epic_id):\n # Read mock file\n try:\n with open(self.mockFilepath, \"r\") as file:\n mock = json.load(file)\n except IOError:\n exit()\n return mock\n\n def get_prices(self, epic_id, interval, data_range):\n # Read mock file\n try:\n with open(self.mockPricesFilepath, \"r\") as file:\n mock = json.load(file)\n except IOError:\n exit()\n return mock\n\n def macd_dataframe(self, epic, interval):\n data = [i + 1 for i in range(100)]\n px = pd.DataFrame({\"close\": data})\n px[\"26_ema\"] = pd.DataFrame.ewm(px[\"close\"], span=26).mean()\n px[\"12_ema\"] = pd.DataFrame.ewm(px[\"close\"], span=12).mean()\n px[\"MACD\"] = px[\"12_ema\"] - px[\"26_ema\"]\n px[\"MACD_Signal\"] = px[\"MACD\"].rolling(9).mean()\n px[\"MACD_Hist\"] = px[\"MACD\"] - px[\"MACD_Signal\"]\n return px\n\n\nclass MockAV:\n \"\"\"\n Mock AlphaVantage interface class\n \"\"\"\n\n def __init__(self, mockFilepath):\n self.mockFilepath = mockFilepath\n\n def macdext(self, marketId, interval):\n # Read mock file\n try:\n with open(self.mockFilepath, \"r\") as file:\n mock = json.load(file)\n px = pd.DataFrame.from_dict(\n mock[\"Technical Analysis: MACDEXT\"], orient=\"index\", dtype=float\n )\n px.index = range(len(px))\n except IOError:\n exit()\n return px\n\n def weekly(self, marketId):\n # Read mock file\n try:\n with open(self.mockFilepath, \"r\") as file:\n mock = json.load(file)\n px = pd.DataFrame.from_dict(\n mock[\"Weekly Time Series\"], orient=\"index\", dtype=float\n )\n except IOError:\n exit()\n return px\n\n\nclass MockBroker:\n \"\"\"\n Mock Broker class\n \"\"\"\n\n def __init__(self, config, services):\n self.ig = services[\"ig_index\"]\n self.av = services[\"alpha_vantage\"]\n self.use_av_api = config[\"alpha_vantage\"][\"enable\"]\n self.mock_watchlist = [\n {\"epic\": \"EPIC1\"},\n {\"epic\": \"EPIC2\"},\n {\"epic\": \"EPIC3\"},\n {\"epic\": \"EPIC4\"},\n ]\n\n def get_market_info(self, epic):\n data = {}\n info = self.ig.get_market_info(epic)\n data[\"market_id\"] = epic\n data[\"bid\"] = info[\"snapshot\"][\"bid\"]\n data[\"offer\"] = info[\"snapshot\"][\"offer\"]\n data[\"stop_distance_min\"] = info[\"dealingRules\"][\n \"minNormalStopOrLimitDistance\"\n ][\"value\"]\n data[\"epic\"] = epic\n data[\"name\"] = epic\n data[\"high\"] = info[\"snapshot\"][\"high\"]\n data[\"low\"] = info[\"snapshot\"][\"low\"]\n return data\n\n def get_prices(self, epic, market_id, interval, range):\n data = {\"high\": [], \"low\": [], \"close\": [], \"volume\": []}\n prices = self.ig.get_prices(epic, interval, range)\n for i in prices[\"prices\"]:\n if i[\"highPrice\"][\"bid\"] is not None:\n data[\"high\"].append(i[\"highPrice\"][\"bid\"])\n if i[\"lowPrice\"][\"bid\"] is not None:\n data[\"low\"].append(i[\"lowPrice\"][\"bid\"])\n if i[\"closePrice\"][\"bid\"] is not None:\n data[\"close\"].append(i[\"closePrice\"][\"bid\"])\n if isinstance(i[\"lastTradedVolume\"], int):\n data[\"volume\"].append(int(i[\"lastTradedVolume\"]))\n return data\n\n def macd_dataframe(self, epic, market_id, interval):\n if self.use_av_api:\n return self.av.macdext(market_id, interval)\n else:\n return self.ig.macd_dataframe(epic, None)\n return None\n\n def weekly(self, market_id):\n return self.av.weekly(market_id)\n\n def get_market_from_watchlist(self, watchlist_name):\n return self.mock_watchlist\n","sub_path":"test/common/MockComponents.py","file_name":"MockComponents.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"636428935","text":"# -*- coding: utf-8 -*-\n#files for beta test 1 - time tracker\nimport sys\nimport time\nimport datetime\nimport connection\n\nfrom PySide import QtCore, QtGui, QtSql\nfrom timeTraceUI import Ui_MainWindow\nfrom taskpopup import SortedDict, ChargeCodeCatalog\nfrom cachedtable import TableEditor\n\n\nclass timers(QtGui.QMainWindow):\n def __init__(self, database, tableName, parent=None):\n super(timers, self).__init__(parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n# self.tl = ChargeCodeCatalog()\n self.timeCodes = TableEditor(database, tableName)\n\n #self.c2 = 0\n #currentTime2 = 0\n\n self.clock1 = StartStopClock()\n self.clock2 = StartStopClock()\n self.clock3 = StartStopClock()\n\n self.lcdTime = QtCore.QTimer()\n self.lcdTime.start(1000)\n\n #self.deltatime = self.clock1.lcdElapsedTimer.elapsed()\n #self.clock1delta = ElapsedTime()\n #self.clock1delta.elapsedTime(self.deltatime)\n\n #buttons\n QtCore.QObject.connect(self.ui.stop, QtCore.SIGNAL(\"clicked()\"), self.clock1.stop_clock)\n QtCore.QObject.connect(self.ui.stop, QtCore.SIGNAL(\"clicked()\"), self.clock3.stop_clock)\n QtCore.QObject.connect(self.ui.start, QtCore.SIGNAL(\"clicked()\"), self.clock1.start_clock)\n QtCore.QObject.connect(self.ui.start, QtCore.SIGNAL(\"clicked()\"), self.clock3.start_clock)\n QtCore.QObject.connect(self.ui.stop_2, QtCore.SIGNAL(\"clicked()\"), self.clock2.stop_clock)\n QtCore.QObject.connect(self.ui.stop_2, QtCore.SIGNAL(\"clicked()\"), self.clock3.start_clock)\n QtCore.QObject.connect(self.ui.start_2, QtCore.SIGNAL(\"clicked()\"), self.clock2.start_clock)\n QtCore.QObject.connect(self.ui.start_2, QtCore.SIGNAL(\"clicked()\"), self.clock3.stop_clock)\n QtCore.QObject.connect(self.ui.stop_3, QtCore.SIGNAL(\"clicked()\"), self.clock3.stop_clock)\n QtCore.QObject.connect(self.ui.start_3, QtCore.SIGNAL(\"clicked()\"), self.clock3.start_clock)\n QtCore.QObject.connect(self.ui.start_3, QtCore.SIGNAL(\"clicked()\"), self.clock2.stop_clock)\n QtCore.QObject.connect(self.ui.actionSelect_Task, QtCore.SIGNAL(\"triggered()\"), self.timeCodes.show)\n\n QtCore.QObject.connect(self.clock1.lcdTimer, QtCore.SIGNAL(\"timeout()\"), self.updtTime)\n QtCore.QObject.connect(self.clock2.lcdTimer, QtCore.SIGNAL(\"timeout()\"), self.updtTime2)\n QtCore.QObject.connect(self.clock3.lcdTimer, QtCore.SIGNAL(\"timeout()\"), self.updtTime3)\n QtCore.QObject.connect(self.lcdTime, QtCore.SIGNAL(\"timeout()\"), self.updtTime4)\n\n QtCore.QMetaObject.connectSlotsByName(self)\n\n def select_task(self):\n self.tl.show()\n\n def single(self):\n \"\"\"\n run singleShot timer after button push\n \"\"\"\n self.stimer.singleShot(1000, self.singleUpdate)\n\n def updtTime(self):\n currentTime1 = datetime.timedelta(seconds=self.clock1.lcdElapsedTimer.elapsed()/1000)\n self.ui.lcdNumber.display(str(currentTime1))\n\n def updtTime2(self):\n if self.clock2.c2 == 0:\n currentTime2 = datetime.timedelta(seconds=self.clock2.lcdElapsedTimer.elapsed()/1000)\n else:\n currentTime2 = datetime.timedelta(seconds=(self.clock2.lcdElapsedTimer.elapsed() + sum(self.clock2.timelist))/1000)\n\n self.ui.lcdNumber_2.display(str(currentTime2))\n\n def updtTime3(self):\n if self.clock3.c2 == 0:\n currentTime3 = datetime.timedelta(seconds=self.clock3.lcdElapsedTimer.elapsed()/1000)\n else:\n currentTime3 = datetime.timedelta(seconds=(self.clock3.lcdElapsedTimer.elapsed() + sum(self.clock3.timelist))/1000)\n\n self.ui.lcdNumber_3.display(str(currentTime3))\n\n def updtTime4(self):\n currentTime = QtCore.QDateTime.currentDateTime().toString('hh:mm')\n self.ui.lcdNumber_4.display(str(currentTime))\n\nclass ElapsedTime(object):\n def elapsedTime(self,deltaTime):\n self.delT = datetime.timedelta(seconds=deltaTime)\n #return self.delT\n #self.lcdDisplay.display(str(self.delT))\n\n\nclass StartStopClock(object):\n def __init__(self):\n self.lcdTimer = QtCore.QTimer()\n self.lcdElapsedTimer = QtCore.QElapsedTimer()\n self.c2 = 0\n self.timelist = []\n\n def start_clock(self):\n self.lcdElapsedTimer.start()\n self.lcdTimer.start(1000)\n\n def stop_clock(self):\n self.c2 = 1\n self.timelist.append(self.lcdElapsedTimer.elapsed())\n self.lcdTimer.stop()\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n# if not connection.createConnection():\n# sys.exit(1)\n\n# model = QtSql.QSqlTableModel()\n# db = connection.initializeModel(model)\n\n# view1 = connection.createView(\"Charge Code Table Model\", model)\n # view1.show()\n\n db, tableName = connection.createConnection()\n\n myapp = timers(db, tableName)\n myapp.show()\n sys.exit(app.exec_())\n\n#PySide.QtSql.QSqlTableModel.insertRecord(row, record) record is a QSqlRecord thing\n#PySide.QtSql.QSqlRecord(other)","sub_path":"timeTrace.py","file_name":"timeTrace.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"32929529","text":"import pyperclip\n\nclass Exporter:\n def __init__(self, textName):\n self.CLIPBOARD_OPTION = \"clipboard\"\n self.FILE_OPTION = \"file\"\n self.EXPORT_OPTIONS = [self.CLIPBOARD_OPTION, self.FILE_OPTION]\n\n self.filename = None\n self.exportTo = None\n self.textName = textName\n\n def addArgumentsToParser(self, parser):\n parser.add_argument('--export-to', required=True, help=\" or \".join([\"'\"+option+\"'\" for option in self.EXPORT_OPTIONS]))\n parser.add_argument('--file-name', help='filename if you choose \"%s\" for exporting' % self.FILE_OPTION)\n\n\n def parseArguments(self, args):\n self.exportTo = args.export_to\n self.filename = None\n\n if self.exportTo not in self.EXPORT_OPTIONS:\n print(\"option has to be in \"+str(self.EXPORT_OPTIONS))\n return False\n\n if self.exportTo == self.FILE_OPTION:\n if args.file_name != None:\n self.filename = args.file_name\n else:\n print(\"you have to specify a filename using --file-name\")\n return False\n return True\n\n\n def exportText(self, text):\n if self.exportTo == self.FILE_OPTION:\n try:\n with open(self.filename, 'w') as file:\n file.write(text)\n except Exception as e:\n print(\"Could not print to file\")\n print(e)\n print(\"Press enter to print %s here:\" % self.textName)\n raw_input()\n print(text)\n\n elif self.exportTo == self.CLIPBOARD_OPTION:\n pyperclip.copy(text)\n print(\"%s is in your clipboard now\" % self.textName)","sub_path":"Exporter.py","file_name":"Exporter.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"596920659","text":"import Task\nfrom Bio.PDB.PDBParser import PDBParser\nfrom Bio.PDB.NeighborSearch import NeighborSearch\nfrom Bio.PDB.vectors import Vector\nfrom Bio.PDB.Selection import unfold_entities\n\nclass PdbParser(Task.Task):\n def __init__(self):\n self.name = \"pdb parsing\"\n def help(self, pre: str, mult: int):\n print(pre*mult + \"[file]\")\n print(pre*(mult+1) + \"'file' is a path to the pdb data file\")\n print()\n print(pre*(mult+1) + \"-s --show\")\n print(pre*(mult+2) + \"displays all residues and their atoms\")\n print(pre*(mult+1) + \"-a --atoms\")\n print(pre*(mult+2) + \"displays all atoms\")\n print(pre*(mult+1) + \"-r --residues\")\n print(pre*(mult+2) + \"displays all residues\")\n print(pre*(mult+1) + \"-q --quantities\")\n print(pre*(mult+2) + \"displays quantity of models, chains, residues and atoms\")\n print(pre*(mult+1) + \"-w --width\")\n print(pre*(mult+2) + \"displays the distance of the two far most atoms\")\n print(pre*(mult+1) + \"-la --ligand-atoms [ligand_name] [radius]\")\n print(pre*(mult+2) + \"displays all atoms in radius around ligand given by it's tri-letter name\")\n print(pre*(mult+1) + \"-lr --ligand-residues [ligand_name] [radius]\")\n print(pre*(mult+2) + \"displays all residues in radius around ligand given by it's tri-letter name\")\n print(pre*(mult+1) + \"-gl --get-ligands\")\n print(pre*(mult+2) + \"displays all ligands present in the structure\")\n def averageVector(self, resi):\n result = Vector(0,0,0)\n for v in resi:\n result += v.get_vector()\n result /= len(resi)\n return result.get_array()\n def getClose(self, center_name: str, structure, radius: float, layer: str, predicate, toPrint):\n resi = None\n for m in structure:\n for c in m:\n for r in c:\n if r.get_full_id()[3][0] == center_name:\n resi = r\n if resi != None:\n ns = NeighborSearch(unfold_entities(structure, 'A'))\n center = self.averageVector(resi)\n atoms = ns.search(center, radius, layer)\n atoms = [i for i in atoms if predicate(i, resi)]\n return Task.Result(atoms, Task.Result.printList_advanced(toPrint))\n else:\n print(\"ligand of name \" + center_name[2:] + \" not found in given file\")\n return None\n def run(self, params):\n if len(params) < 2:\n print(\"wrong number of parameters:\")\n Task.Task.help(self)\n else:\n if not Task.isFile(params[0]):\n print(\"one of given paths is not a path\")\n return None\n structure = PDBParser().get_structure(\"s\", params[0])\n if params[1] == \"-s\" or params[1] == \"--show\":\n return Task.Result(structure, PdbParser.showStructure) \n elif params[1] == \"-a\" or params[1] == \"--atoms\":\n r = \"\"\n for atom in structure.get_atoms():\n r = r + atom.get_name() + \" \"\n return Task.Result(r)\n elif params[1] == \"-r\" or params[1] == \"--residues\":\n r = \"\"\n for model in structure:\n for resi in model.get_residues():\n r = r + resi.get_resname() + \" \"\n return Task.Result(r)\n elif params[1] == \"-q\" or params[1] == \"--quantities\":\n m = len(structure)\n c = 0\n r = 0\n a = 0\n for model in structure:\n c += len(model)\n for chain in model:\n r += len(chain)\n for resi in chain:\n a += len(resi)\n return Task.Result((m,c,r,a), PdbParser.showQuantities)\n elif params[1] == \"-w\" or params[1] == \"--width\":\n m = 0\n for a1 in structure.get_atoms():\n for a2 in structure.get_atoms():\n m = max(m, a1 - a2)\n return Task.Result(m)\n elif params[1] == \"-la\" or params[1] == \"--ligand-atoms\":\n m = 0\n if len(params) < 4:\n Task.Task.help(self)\n else:\n name = \"H_\" + params[2]\n return self.getClose(\n name, structure,\n float(params[3]),\n 'A',\n lambda a,r: a.get_parent() != r,\n lambda a: a.get_name() + \"\\t\" + str(a.get_coord()))\n elif params[1] == \"-lr\" or params[1] == \"--ligand-residues\":\n m = 0\n if len(params) < 4:\n Task.Task.help(self)\n else:\n name = \"H_\" + params[2]\n return self.getClose(\n name, \n structure, \n float(params[3]),\n 'R',\n lambda r1,r2: r1 != r2,\n lambda r: r.get_resname() + \"\\t\" + str(self.averageVector(r)))\n elif params[1] == \"-gl\" or params[1] == \"--get-ligands\":\n ligs = []\n for m in structure:\n for c in m:\n for r in c:\n if r.get_full_id()[3][0][0:2] == \"H_\":\n ligs.append(r)\n return Task.Result(ligs, Task.Result.printList_advanced(\n lambda l: l.get_resname() + \"\\t\" + str(self.averageVector(l))))\n else:\n print(\"argument \" + params[1] + \" is not a valid argument:\")\n Task.Task.help(self)\n\n def showQuantities(quadr):\n m,c,r,a = quadr\n d = \"\\t\"\n print(\"m\" + d + \"c\" + d + \"r\" + d + \"a\")\n print(str(m) + d + str(c) + d + str(r) + d + str(a))\n def showStructure(structure):\n for model in structure:\n for chain in model:\n for resi in chain:\n print(resi.get_resname())\n r = \"\\t\"\n for atom in resi:\n r = r + atom.get_name() + \" \"\n print(r)\n print(\"--------------------------\")","sub_path":"PdbParsing.py","file_name":"PdbParsing.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"515377970","text":"from turtle import Turtle, xcor\n\nclass Snake:\n negative_x_limit_not_achieved=True\n positive_x_limit_not_achieved=False\n\n def __init__(self):\n self.snakebody=[]\n x_axis=0\n for _ in range(3):\n snakebody=Turtle()\n snakebody.shape(\"square\")\n snakebody.color(\"white\")\n snakebody.penup()\n snakebody.goto(x_axis,0)\n snakebody.setheading\n x_axis-=20\n self.snakebody.append(snakebody)\n self.snakehead=self.snakebody[0]\n \n def move(self):\n for i in range(len(self.snakebody)-1,0,-1):\n self.snakebody[i].goto(self.snakebody[i-1].xcor(),self.snakebody[i-1].ycor())\n self.snakehead.forward(20)\n \n def extendsnake(self):\n snakebody=Turtle()\n snakebody.shape(\"square\")\n snakebody.color(\"white\")\n snakebody.penup()\n snakebody.setheading\n snakebody.goto(self.snakebody[-1].position()) \n self.snakebody.append(snakebody)\n\n def up(self):\n if not (self.snakehead.heading ()== 270):\n self.snakehead.setheading(90) \n \n \n def down(self):\n if not (self.snakehead.heading() == 90):\n self.snakehead.setheading(270)\n \n\n \n def left(self):\n \n if not (self.snakehead.heading() == 0):\n self.snakehead.setheading(180) \n\n \n def right(self):\n \n if not (self.snakehead.heading()== 180):\n self.snakehead.setheading(0) \n","sub_path":"day 21/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"511052961","text":"#!/usr/bin/python\n# coding=utf-8\n################################################################################\n\nfrom test import CollectorTestCase\nfrom test import get_collector_config\nfrom test import unittest\nfrom mock import Mock\nfrom mock import patch\nfrom contextlib import nested\n\nfrom diamond.collector import Collector\nfrom diskusage import DiskUsageCollector\n\n################################################################################\n\n\nclass TestDiskUsageCollector(CollectorTestCase):\n def setUp(self):\n config = get_collector_config('DiskUsageCollector', {\n 'interval': 10,\n 'sector_size': '512',\n 'byte_unit': 'kilobyte'\n })\n\n self.collector = DiskUsageCollector(config, None)\n\n @patch('os.access', Mock(return_value=True))\n def test_get_disk_statistics(self):\n with nested(\n patch('__builtin__.open', Mock(\n return_value=self.getFixture('diskstats')))):\n\n result = self.collector.get_disk_statistics()\n\n open.assert_called_once_with('/proc/diskstats')\n\n self.assertEqual(\n sorted(result.keys()),\n [(8, 0), (8, 1), (8, 16), (8, 17), (8, 32),\n (8, 33), (8, 48), (8, 49), (9, 0)])\n\n return result\n\n @patch('os.access', Mock(return_value=True))\n @patch.object(Collector, 'publish')\n def test_should_work_with_real_data(self, publish_mock):\n\n with nested(\n patch('__builtin__.open', Mock(\n return_value=self.getFixture('proc_diskstats_1'))),\n patch('time.time', Mock(return_value=10))):\n self.collector.collect()\n\n self.assertPublishedMany(publish_mock, {})\n\n with nested(\n patch('__builtin__.open', Mock(\n return_value=self.getFixture('proc_diskstats_2'))),\n patch('time.time', Mock(return_value=20))):\n self.collector.collect()\n\n metrics = {\n 'sda.average_queue_length': 0.0,\n 'sda.average_request_size_kilobyte': 10.6,\n 'sda.await': 0.0,\n 'sda.concurrent_io': 0.0,\n 'sda.io': 3.0,\n 'sda.io_in_progress': 0.0,\n 'sda.io_milliseconds': 0.0,\n 'sda.io_milliseconds_weighted': 0.0,\n 'sda.iops': 0.3,\n 'sda.read_kilobyte_per_second': 0.0,\n 'sda.read_requests_merged_per_second': 0.0,\n 'sda.reads': 0.0,\n 'sda.reads_kilobyte': 0.0,\n 'sda.reads_merged': 0.0,\n 'sda.reads_milliseconds': 0.0,\n 'sda.reads_per_second': 0.0,\n 'sda.service_time': 0.0,\n 'sda.util_percentage': 0.0,\n 'sda.write_kilobyte_per_second': 3.2,\n 'sda.write_requests_merged_per_second': 0.5,\n 'sda.writes': 3.0,\n 'sda.writes_kilobyte': 32.0,\n 'sda.writes_merged': 5.0,\n 'sda.writes_milliseconds': 0.0,\n 'sda.writes_per_second': 0.3,\n\n 'sdb.average_queue_length': 495700.0,\n 'sdb.average_request_size_kilobyte': 6.3,\n 'sdb.await': 0.8,\n 'sdb.concurrent_io': 0.5,\n 'sdb.io': 9214.0,\n 'sdb.io_in_progress': 0.0,\n 'sdb.io_milliseconds': 4957.0,\n 'sdb.io_milliseconds_weighted': 7492.0,\n 'sdb.iops': 921.4,\n 'sdb.read_kilobyte_per_second': 1862.4,\n 'sdb.read_requests_merged_per_second': 0.0,\n 'sdb.reads': 1164.0,\n 'sdb.reads_kilobyte': 18624.0,\n 'sdb.reads_merged': 0.0,\n 'sdb.reads_milliseconds': 7163.0,\n 'sdb.reads_per_second': 116.4,\n 'sdb.service_time': 0.5,\n 'sdb.util_percentage': 495.7,\n 'sdb.write_kilobyte_per_second': 3914.3,\n 'sdb.write_requests_merged_per_second': 201.7,\n 'sdb.writes': 8050.0,\n 'sdb.writes_kilobyte': 39143.0,\n 'sdb.writes_merged': 2017.0,\n 'sdb.writes_milliseconds': 337.0,\n 'sdb.writes_per_second': 805.0,\n }\n\n self.setDocExample(self.collector.__class__.__name__, metrics)\n self.assertPublishedMany(publish_mock, metrics)\n\n################################################################################\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"src/collectors/diskusage/test/testdiskusage.py","file_name":"testdiskusage.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165872269","text":"# -*- coding: utf-8 -*-\r\n\r\n'''\r\nCreated on 2011-8-24\r\n主要用途:\r\n 对程序中所使用的loggong模式做一般性配置\r\n 日志处理\r\n'''\r\n\r\nimport logging\r\n\r\nimport logging.handlers\r\n\r\nimport os\r\n\r\n# base_dir='D:\\Program Files\\Python_Workspace\\devpos_simple\\logs'\r\n# logs_dir = os.path.join(base_dir, \"logs\")\r\n# file_name=logs_dir+'\\log.txt'\r\n\r\ndef log(log_file):\r\n# LEVELS = {'NOSET': logging.NOTSET,\r\n# 'DEBUG': logging.DEBUG,\r\n# 'INFO': logging.INFO,\r\n# 'WARNING': logging.WARNING,\r\n# 'ERROR': logging.ERROR,\r\n# 'CRITICAL': logging.CRITICAL}\r\n \r\n #set up logging to file\r\n \r\n #logging.basicConfig(level = logging.NOTSET,\r\n # format = \"%(asctime)s %(name)-12s %(levelname)-8s %(message)s\"\r\n # )\r\n \r\n ## filename = \"./log.txt\",\r\n \r\n ## filemode = \"w\")\r\n \r\n # create logs file folder\r\n logs_dir=os.path.dirname(log_file)\r\n if os.path.exists(logs_dir) and os.path.isdir(logs_dir):\r\n pass\r\n else:\r\n os.mkdir(logs_dir)\r\n \r\n # define a rotating file handler\r\n #滚动时写入,按大小写入,每个50M\r\n #rotatingFileHandler = logging.handlers.RotatingFileHandler(filename =file_name,maxBytes = 1024 * 1024 * 50,backupCount = 5)\r\n #按日期写入\r\n ''' filename 日志文件名前缀\r\n when 日志名变更时间单位\r\n 'S' Seconds\r\n 'M' Minutes\r\n 'H' Hours\r\n 'D' Days\r\n 'W0'-'W6' Weekday (0=Monday)\r\n 'midnight' Roll over at midnight\r\n interval 间隔时间,是指等待N个when单位的时间后,自动重建文件\r\n backupCount 保留日志最大文件数,超过限制,删除最先创建的文件;默认值0,表示不限制。\r\n delay 延迟文件创建,直到第一次调用emit()方法创建日志文件\r\n atTime 在指定的时间(datetime.time格式)创建日志文件。\r\n '''\r\n rotatingFileHandler = logging.handlers.TimedRotatingFileHandler(log_file,when='D',interval=1,backupCount=40)\r\n \r\n formatter = logging.Formatter(\"%(asctime)s %(name)-12s %(levelname)-8s %(message)-8s %(filename)8s %(funcName)8s %(lineno)8d\")\r\n \r\n rotatingFileHandler.setFormatter(formatter)\r\n rotatingFileHandler.setLevel(logging.INFO)\r\n \r\n logging.getLogger(\"\").addHandler(rotatingFileHandler)\r\n \r\n #define a handler whitch writes messages to sys\r\n #控制台输出\r\n console = logging.StreamHandler()\r\n \r\n console.setLevel(logging.INFO)\r\n \r\n #set a format which is simple for console use\r\n \r\n formatter = logging.Formatter(\"%(asctime)s %(name)-12s %(levelname)-8s %(message)-8s %(filename)8s %(funcName)8s %(lineno)8d\")\r\n #formatter = logging.Formatter(\"%(name)s: %(levelname)s %(message)s\")\r\n \r\n #tell the handler to use this format\r\n \r\n console.setFormatter(formatter)\r\n \r\n #add the handler to the root logger\r\n \r\n logging.getLogger(\"\").addHandler(console)\r\n \r\n # set initial log level\r\n logger = logging.getLogger(\"\")\r\n logger.setLevel(logging.NOTSET) \r\n return logger\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n# msg = \"this is just a test\"\r\n# log=log()\r\n# log.info(msg)\r\n# log.error(msg)\r\n# log.debug(msg)\r\n# #log.WARNING(msg)\r\n# #log.NOTSET(msg)","sub_path":"loggingclass.py","file_name":"loggingclass.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"174970915","text":"import matplotlib.pyplot as plt\n\nimport os\nimport numpy as np\n\nfrom skimage import io\n\nfrom sklearn.decomposition import PCA\n\nimport glob\n\n\ndata = os.listdir(\"image_samples_resized\")\n\nnewlist = []\nfor photos in data:\n if photos.endswith(\".jpg\"):\n newlist.append(photos)\n \nimg = []\nfor d in newlist:\n img.append(io.imread(\"image_samples_resized/\" +d).mean(axis=2).flatten())\n \nimg = np.array(img)\n\nm,n = 209,247\n\nsklearn_pca = PCA(n_components=30, random_state=4)\nY_sklearn = sklearn_pca.fit_transform(img)\n\nimport pandas as pd\ndf = pd.DataFrame()\n\ndf[\"image\"] = newlist\n\nfrom scipy.cluster.hierarchy import dendrogram, linkage \nfrom scipy.cluster.hierarchy import fcluster\n\nlinked = linkage(Y_sklearn, 'ward')\n\nmax_d = 32000\nclusters = fcluster(linked, max_d, criterion='distance')\n \ndf['cluster'] = clusters\n\nlabelList = ['' for i in range(0,138)]\n\nplt.figure(figsize=(10, 7)) \ndendrogram(linked, \n orientation='right',\n labels = labelList,\n distance_sort='descending',\n show_leaf_counts=True,\n show_contracted=True,\n color_threshold=32000)\nplt.xlabel('Distance', fontsize=24)\nplt.xticks(fontsize = 18)\nplt.tight_layout() # fixes margins\n\nplt.axvline(x=32000) #plot vertical line\n\nplt.show()\n\nprint(df.sort_values(by= ['cluster','image'], ascending=True))\n\ndef cluster_lister(df, column):\n \n cluster_list = []\n k = df[column].max()\n x = df[column].min()\n \n k=k+1\n \n for i in range(x,k):\n \n cluster_i= df[df[column]==i].sort_values(by= 'image', ascending=True)\n cluster_i_list = cluster_i['image'].tolist()\n cluster_list.append(cluster_i_list)\n \n return cluster_list\n \ndef image_lister(cluster_list):\n \n image_list = []\n k = len(cluster_list)\n \n for i in range(k):\n img_i = []\n for image in cluster_list[i]:\n img_i.append(io.imread(\"image_samples_resized/\" +image))\n \n image_list.append(img_i)\n \n return image_list\n \ndef show_images(images, cols = 1, titles = None):\n \"\"\"Display a list of images in a single figure with matplotlib.\n \n Parameters\n ---------\n images: List of np.arrays compatible with plt.imshow.\n \n cols (Default = 1): Number of columns in figure (number of rows is \n set to np.ceil(n_images/float(cols))).\n \n titles: List of titles corresponding to each image. Must have\n the same length as titles.\n \"\"\"\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()\n \ndef image_cluster_viewer(image_list, cluster_list):\n \n k = len(image_list)\n \n for i in range(k):\n show_images(image_list[i], cols = 3, titles = cluster_list[i])\n\n \ncluster_list = cluster_lister(df, 'cluster')\nimage_list = image_lister(cluster_list)\nimage_cluster_viewer(image_list, cluster_list)\n\n\n\n","sub_path":"full_image_analysis.py","file_name":"full_image_analysis.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450541832","text":"from torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom PIL import Image\nfrom os.path import join\nfrom os import listdir\n\n\nclass DatasetFromfolder(Dataset):\n def __init__(self, dir_mnist):\n super(DatasetFromfolder, self).__init__()\n\n self.filelist = []\n self.lenlist = []\n self.lensum = 0\n for i in range(10):\n idir = join(dir_mnist, str(i))\n filelist_tmp = [join(idir, x) for x in listdir(idir)]\n self.filelist.append((filelist_tmp, i))\n self.lenlist.append(len(filelist_tmp))\n self.lensum = self.lensum + len(filelist_tmp)\n\n self.transform = transforms.Compose([transforms.ToTensor()])\n\n def __getitem__(self, index):\n\n c, cindex = self._findlabelfromindex(index)\n clist, label = self.filelist[c]\n resultimage = self.transform(Image.open(clist[cindex]).convert('L'))\n return resultimage, label\n\n def __len__(self):\n return self.lensum\n\n def _findlabelfromindex(self, index):\n label = 0\n indexsum = 0\n\n for i in range(10):\n indexsum += self.lenlist[i]\n if index < indexsum:\n label = i\n break\n\n classindex = index - indexsum\n\n\n return label, classindex\n\n","sub_path":"7월16일 gan 알고리즘/MNIST_gan(강사.ver)/Data_loader.py","file_name":"Data_loader.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62118612","text":"\"\"\"\n Test Data for LucidChart.com\n\"\"\"\n# URLs for LucidChart.com\nbaseURL = 'https://www.lucidchart.com/'\nloginURL = '/users/login'\n\n# Login Credentials\nusername = 'neovaqa@gmail.com'\npassword = 'neova123'\ninvalid_username = 'invaliduser'\ninvalid_password = 'invalidpasswd'\n\n\n\"\"\"\n Object Repository for LucidChart.com\n All CSSLocators are stored in this file.\n\"\"\"\n# Login page\nusername_input_locator=\"#username\"\npassword_input_locator=\"#password\"\nlogin_button_locator=\".right.button\"\nlogout_link_locator=\".tab.dem>a\"\nmessage_locator=\"#messagebar\"\n\n# Content Container\nrow_locator = \".wrapper\"\nname_file_folder_locator = \".name.link\"\nusername_viewpanel_locator = \".wrapper .col2\"\nselected_row_locator = \".selected .wrapper\"\n\n# Tree Component Container\ncreate_button_locator=\"#document-create-button\"\nimport_button_locator=\"#document-import-button\"\nnew_document_locator=\".menu-body.noScrollBars>div:nth-child(1)>div\"\nnew_folder_locator=\".menu-body.noScrollBars>div:nth-child(2)>div\"\n\n#Import File Dialog Locator\nimport_file_dialog_locator = \"[class='dialog-overlay modal'][style*='display: block;']\"\nupload_file_input_locator = \"#visio-import-form #import-choose-file-button\"\nimport_vision_file_button_locator = \"#visio-import-form #import-file-button\"\n\n# Create New Folder popup locator\nok_button_locator = \".button.btn.highlight\"\nfolder_popup_locator = '.dialog.non-ajax.ui-draggable.prompt'\nenter_folder_name_locator = \".input-text-textbox>input\"\n\n# Create New File Popup locator\nenter_file_name_locator = \".dialog.non-ajax.ui-draggable.prompt .input-text-textbox>input\"\nok_file_button_locator = \".dialog.non-ajax.ui-draggable.prompt .button.btn.highlight\"\nfile_popup_locator = \".dialog-container .dialog.non-ajax.ui-draggable.prompt\"\nstart_drawing_button_locator = \"#newdocdialog_startdrawing\"\n\n# Sharing Pane Locator\nshare_plus_button_locator=\".inverse.icon-13.icon-13-plus\"\nshare_panel_locator=\".file-details>div:nth-child(11).pane\"\n\n# Invitation Panel Locator\ninvitation_panel_locator=\".file-details>div:nth-child(14).pane\"\n\n# View\nlist_view_locator=\".icon-21.icon-21-list-view\"\nlist_view_selected_locator=\".list-view.button.selected\"\ngrid_view_locator=\".icon-21.icon-21-grid-view\"\ngrid_view_selected_locator=\".grid-view.button.selected\"\n\n#Share Popup Dialog locators\nemail_adresses_input_locator = \".left.fullwidth\"\nsend_invitation_button_locator = \"[value='Send Invitation']\"\n\n# Rename File Locators\nrename_button_locator = \"#rename-document-button\"\n\n# Delete File Locators\ndelete_button_locator = \"#delete-document-button\"\n\n# Selected Folder Locators\nselected_foldername_locator = \".breadcrumbs .selected\"\nselected_foldername_leftpanel_locator = \".folder.expanded .selected\"\n\n#Delete Folder Locator\ndelete_folder_button_locator = \".file-details-container>div>div:nth-child(5)>span:nth-child(2)>span\"\n\n#Rename Folder Locator\nrename_folder_button_locator = '.file-details-container>div>div:nth-child(5)>span:nth-child(1)>span'\n\n# Edit file Locators\npage_menu_locator = \"#page-menu-bar-button>span\"\nsubmenu_dialog_locator = \"[class='menu dropdown']:not([style*='display: none;'])>.menu-body.noScrollBars\"\nsubmenu_duplicate_page_locator = \"[class='menu-body noScrollBars']>div:nth-child(2)\"\nadded_duplicate_page_heading_locator = \"#tabs-page-1\"\nclose_page_button_locator = \".icon-13.icon-13-close\"\ndelete_confirm_popup_locator = \"[style='display: block;'][class='dialog-overlay modal']\"\n\n# Test Data\nFOLDER_NAME = \"NeovaTest1\"\nFILE_NAME=\"Neova1\" # File name use for the script, Share File, View File and Edit File\nSHARE_FOLDER_NAME=\"Test1\"\nEMAIL_ID=\"neovaqa1@gmail.com\"\nCREATE_FILE_NAME = \"test\"\nFOLDERPATH=\"C:\\\\FileUpload\\\\\"\nUPLOAD_FILE_NAME=\"ImportVsd\"\nDELETE_FILE_NAME = 'test'\nRENAME_FILE_NAME_NEW = 'Renamed_test2'\nRENAME_FILE_NAME_OLD = 'Test2'\nDELETE_FOLDER_NAME = 'TestFolder'\nRENAME_FOLDER_NAME_OLD = \"Test_folder\"\nRENAME_FOLDER_NAME_NEW = \"Renamed_Test_Folder\"\n\n\n","sub_path":"WIndows/nsUI/apps/LucidchartConfig.py","file_name":"LucidchartConfig.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"646337710","text":"import cocos\nimport pyglet\nfrom cocos.actions import *\n\ncDirector = cocos.director.director\ncScene = cocos.scene\ncText = cocos.text\ncLayer = cocos.layer\ncSprite = cocos.sprite\n\nclass HelloWorld(cLayer.Layer):\n is_event_handler = True\n\n def __init__(self):\n super(HelloWorld, self).__init__()\n\n self.txtKey = cText.Label(\n '',\n font_name='Times New Roman',\n font_size=32,\n anchor_x='center',\n anchor_y='center'\n )\n\n self.txtKey.position = 320, 240\n\n self.keys_pressed = set()\n self.update_text()\n self.add(self.txtKey)\n\n\n def on_key_press(self, key, modifiers):\n self.keys_pressed.add(key)\n self.update_text()\n\n\n def on_key_release(self, key, modifiers):\n self.keys_pressed.remove(key)\n self.update_text()\n\n\n def update_text(self):\n key_names = [pyglet.window.key.symbol_string(k) for k in self.keys_pressed]\n self.txtKey.element.text = ','.join(key_names)\n\n\ncDirector.init()\nhello_layer = HelloWorld()\n\nmain_scene = cScene.Scene(hello_layer)\n\ncDirector.run(main_scene)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"636254736","text":"import sys\nfrom vcfLine import vcfLine\n\n\n\n\n\n\nif __name__ == \"__main__\":\n vcf_in,out_loc = sys.argv[1],sys.argv[2]\n \n\n outFile = open(out_loc,\"w\")\n \n with open(vcf_in) as vcfFile:\n for line in vcfFile:\n vcf_line = vcfLine(line)\n if vcf_line.isDataLine:\n outFile.write(vcf_line.chrom + \" \" + vcf_line.pos + \"\\n\")\n\n vcfFile.close()\n outFile.close()\n\n\n\n\n\n\n\n\n","sub_path":"generatePosFile.py","file_name":"generatePosFile.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"190361248","text":"# -*- coding: utf-8 -*-\r\n\r\n'''\r\n使用ann模型做表情识别,情感分析:笑、不笑,保存模型\r\n\r\n用法:\r\npython /home/reed/Desktop/code/oldcare/imageclassifierwithann.py\r\n'''\r\n\r\n# 导入包\r\nfrom oldcare.preprocessing import SimplePreprocessor\r\nfrom oldcare.datasets import SimpleDatasetLoader\r\nfrom imutils import paths\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import classification_report\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense\r\nfrom keras.optimizers import SGD\r\nfrom keras.optimizers import Adam\r\nfrom keras.optimizers import Adagrad\r\nfrom keras.utils import to_categorical\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# 全局变量\r\ndataset_path = '/home/reed/Desktop/code/oldcare/images/emotion'# 图片数据,笑和不笑\r\naccuracy_plot_path = '/home/reed/Desktop/code/oldcare/plots/ann/accuracy64000140adagrad.png'# 测试准确度曲线图片\r\nloss_plot_path = '/home/reed/Desktop/code/oldcare/plots/ann/loss64000140adagrad.png'# 测试损失曲线图片\r\noutput_model_path = '/home/reed/Desktop/code/oldcare/models/ann/emotion_ann64000140adagrad.hdf5'# 模型存储位置\r\n\r\n# 全局常量\r\nTARGET_IMAGE_WIDTH = 28\r\nTARGET_IMAGE_HEIGHT = 28\r\n# 学习率\r\nLR = 0.001\r\n# LR = 0.0015\r\n# 批次大小\r\nBATCH_SIZE = 64\r\n# BATCH_SIZE = 128\r\n# 训练轮数\r\nEPOCHS = 40\r\n\r\n################################################\r\n# 第一部分:数据预处理\r\n\r\n# initialize the image preprocessor and datasetloader\r\nsp = SimplePreprocessor(TARGET_IMAGE_WIDTH, TARGET_IMAGE_HEIGHT)# 处理图片,调整大小\r\nsdl = SimpleDatasetLoader(preprocessors=[sp])\r\n\r\n# Load images\r\nprint(\"[INFO] 导入图像...\")\r\nimage_paths = list(paths.list_images(dataset_path)) # path included\r\n(X, y) = sdl.load(image_paths, verbose=500, grayscale = True)\r\n\r\n# Flatten (reshape the data matrix)\r\n# convert from (13164,TARGET_IMAGE_WIDTH,TARGET_IMAGE_HEIGHT)\r\n#into (13164,TARGET_IMAGE_WIDTH*TARGET_IMAGE_HEIGHT)\r\nX = X.reshape((X.shape[0], TARGET_IMAGE_WIDTH*TARGET_IMAGE_HEIGHT))\r\nX = X.astype(\"float\") / 255.0 # 特征缩放,是非常重要的步骤\r\n\r\n# Show some information on memory consumption of the images\r\nprint(\"[INFO] features matrix: {:.1f}MB\"\r\n .format(X.nbytes / (1024 * 1024.0)))\r\n\r\n# Label encoder\r\nle = LabelEncoder()\r\ny = to_categorical(le.fit_transform(y), 2)\r\nprint(le.classes_)\r\n\r\n# 拆分数据集,测试集占比25%\r\n(X_train, X_test, y_train, y_test) = train_test_split(X, y,test_size=0.25,random_state=42)\r\n\r\n################################################3\r\n# 第二部分:创建并训练模型\r\n\r\n# 创建模型\r\n# ann模型\r\nmodel = Sequential()\r\nmodel.add(Dense(1024,input_shape=(TARGET_IMAGE_WIDTH * TARGET_IMAGE_HEIGHT,),activation=\"relu\"))#使用relu做激活函数\r\nmodel.add(Dense(512, activation=\"relu\"))\r\nmodel.add(Dense(2, activation=\"softmax\"))# 全连接层2个节点,对应两种分类:笑、不笑\r\n# 训练模型\r\nprint(\"[INFO] 训练模型...\")\r\n# 优化器选择\r\nsgd = SGD(LR)\r\nadamModel = Adam(LR)\r\nadagradModel = Adagrad(LR)\r\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=adagradModel,metrics=[\"accuracy\"])\r\nH = model.fit(X_train, y_train, validation_data=(X_test, y_test),epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1)\r\n\r\n\r\n################################################\r\n# 第三部分:评估模型\r\n\r\n# 画出accuracy曲线\r\nplt.style.use(\"ggplot\")\r\nplt.figure()\r\nplt.plot(np.arange(1, EPOCHS+1), H.history[\"acc\"], label=\"train_acc\")\r\nplt.plot(np.arange(1, EPOCHS+1), H.history[\"val_acc\"],label=\"val_acc\")\r\nplt.title(\"Training Accuracy\")\r\nplt.xlabel(\"Epoch #\")\r\nplt.ylabel(\"Accuracy\")\r\nplt.legend()\r\nplt.savefig(accuracy_plot_path)\r\n\r\n# 画出loss曲线\r\nplt.style.use(\"ggplot\")\r\nplt.figure()\r\nplt.plot(np.arange(1, EPOCHS+1),H.history[\"loss\"],label=\"train_loss\")\r\nplt.plot(np.arange(1,EPOCHS+1),H.history[\"val_loss\"],label=\"val_loss\")\r\nplt.title(\"Training Loss\")\r\nplt.xlabel(\"Epoch #\")\r\nplt.ylabel(\"Loss\")\r\nplt.legend()\r\nplt.savefig(loss_plot_path)\r\n\r\n# 打印分类报告\r\nlabel_names = le.classes_.tolist()\r\nprint(\"[INFO] 评估模型...\")\r\npredictions = model.predict(X_test, batch_size=BATCH_SIZE)\r\nprint(classification_report(y_test.argmax(axis=1),predictions.argmax(axis=1), target_names=label_names))\r\n\r\n\r\n################################################\r\n# 第四部分:保存模型\r\nmodel.save(output_model_path)\r\n","sub_path":"imageclassifierwithann.py","file_name":"imageclassifierwithann.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"404404655","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport socket, time\nfrom datetime import datetime\nfrom PySide import QtCore, QtGui\n\n\n\ndef downloadFileFromServer(host, port, dir, progressBar, frame, timeLabel, rttLabel):\n\t\"\"\"\n\tCria um socket AF_INET (suporta protocolos IPv4) e do tipo SOCK_STREAM,\n\tque provê sequenciação, confiança e comunicação full-duples (características TCP)\n\t::.. Para mais infs: $man socket\n\t\"\"\"\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t# conecta o socket ao endereço formado pelo IP e porta definidos na tupla\n\t# este endereço deve ser um servidor em estado de listen\n\ttry:\n\t\tsock.connect((str(host), int(port)))\n\texcept socket.error as serr:\n\t\tmsgBox = QtGui.QMessageBox()\n\t\tmsgBox.setIcon(QtGui.QMessageBox.Critical)\n\t\tmsgBox.setText(\"Please make sure if the host and port are correct.\")\n\t\tmsgBox.setWindowTitle('Socket Error')\n\t\tmsgBox.setStandardButtons(QtGui.QMessageBox.Ok)\n\t\tretal = msgBox.exec_()\n\t\treturn\n\n\t# handshake definido no meu protocolo\n\tsock.send('HELO')\n\ttimeLabel.setText(\"Conexion established\")\n\t\"\"\"\n\tApós receber o handshake acima, o server saberá que deve enviar um pacote de 1KB\n\tcontendo duas informações: o tamanho do arquivo que será enviado e o seu nome.\n\ta informação excedente já faz parte do conteúdo do arquivo e deve ser anexada ao\n\tprogresso do cliente.\"\"\"\n\tsizercv = sock.recv(1024)\n\t#eu defini que '' servirá como um separador de conteúdos nesse pacote\n\tpresize = str.split(sizercv, '')\n\tsize = presize[0]\t#tamanho do arquivo\n\tfilename = presize[1] #nome do arquivo\n\tinitial_data = presize[2] #conteúdo inicial do arquivo que veio no primeiro pacote\n\n\t#cria o arquivo com o mesmo nome no lado do cliente, na pasta escolhida\n\tnewfile = dir + \"/\" + filename\n\tf = open(newfile, 'wb') #abrir/criar com permissão de escrita de bits\n\tf.write(initial_data) #escreve o conteúdo que veio por excedente no primeiro pacote\n\ttotal = len(initial_data) #anexa isso ao progresso do download\n\ttime0 = time.time()\n\tbegin_time = time.time()\n\twhile True:\n\t\tcontent = sock.recv(1024) #recebe pacote\n\t\trtt = time.time() - time0; time0 = time.time()\n\t\trtt = rtt*1000000\n\t\trttMsg = \"RTT = %.2f microseconds.\" % rtt\n\t\trttLabel.setText(rttMsg)\n\t\t# timeLabel.setText(unicode(i))\n\t\ttotal += len(content) #anexa pacote recebido no progresso do download\n\t\t#calcula o percentual do progresso com base no tamanho total do arquivo\n\t\tcurr_progress = (total/float(size))*100\n\t\tcurrent_time = time.time()\n\t\texpect_time = ((current_time - begin_time) * (100.00/curr_progress)) - (current_time - begin_time)\n\t\ttime_msg = \"%.2f seconds until finish download.\" % expect_time\n\t\ttimeLabel.setText(time_msg)\n\t\tprogressBar.setValue(int(curr_progress)) #coloca a visualização disso na progressBar\n\t\tframe.show() # atualiza o frame\n\t\tif not content:\n\t\t\tbreak\t# se não existe mais conteúdo recebido, chegou ao final, sai do loop\n\t\tf.write(content) # escreve conteúdo no arquivo criado no lado do cliente\n\tf.close() # fecha o buffer do arquivo\n\tsock.close() # fecha a conexão\n\t\n\ttimeLabel.setText(\"Download successfully completed\")\n\t_msg = \"Download successfully completed\"\n\trttLabel.setText(\"RTT = 0.0 microseconds\")\n\tprogressBar.setValue(0)\n\t\n\t","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"251211920","text":"from tensorflow import config\n\nfrom typing import List\nfrom tensorflow.keras import Model\nimport tensorflow\nfrom tensorflow import expand_dims, squeeze\nfrom tensorflow.compat.v1.keras.layers import CuDNNLSTM\nfrom tensorflow.keras.layers import Input, Lambda, LSTM, Bidirectional, Dense, ReLU, \\\n TimeDistributed, BatchNormalization, Dropout, ZeroPadding2D, Conv2D, Reshape\n\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import Optimizer, SGD, Adam\n\nfrom functools import partial\nfrom tensorflow.keras import backend as K\n\nfrom tensorflow.keras.callbacks import Callback, TerminateOnNaN, LearningRateScheduler, ReduceLROnPlateau, History\nimport importlib\nimport callbacks\nimportlib.reload(callbacks)\nfrom callbacks import ResultKeeper, CustomModelCheckpoint, CustomTensorBoard, CustomEarlyStopping\n\ndef get_model(gpus):\n input_dim = 80\n is_gpu = len(gpus) > 0\n output_dim = 28\n context = 7\n units = 1024\n dropouts = [0.1, .1, 0]\n #random_state = 1\n\n #np.random.seed(1)\n #tensorflow.random.set_seed(random_state)\n input_tensor = Input([None, input_dim], name='X') # Define input tensor [time, features]\n x = Lambda(expand_dims, arguments=dict(axis=-1))(input_tensor) # Add 4th dim (channel)\n x = ZeroPadding2D(padding=(context, 0))(x) # Fill zeros around time dimension\n receptive_field = (2*context + 1, input_dim) # Take into account fore/back-ward context\n x = Conv2D(filters=units, kernel_size=receptive_field)(x) # Convolve signal in time dim\n x = Lambda(squeeze, arguments=dict(axis=2))(x) # Squeeze into 3rd dim array\n x = ReLU(max_value=20)(x) # Add non-linearity\n x = Dropout(rate=dropouts[0])(x) # Use dropout as regularization\n\n x = TimeDistributed(Dense(units))(x) # 2nd and 3rd FC layers do a feature\n x = ReLU(max_value=20)(x) # extraction base on the context\n x = Dropout(rate=dropouts[1])(x)\n\n x = TimeDistributed(Dense(units))(x)\n x = ReLU(max_value=20)(x)\n x = Dropout(rate=dropouts[2])(x)\n\n x = Bidirectional(CuDNNLSTM(units, return_sequences=True) if is_gpu else # LSTM handle long dependencies\n LSTM(units, return_sequences=True, ),\n merge_mode='sum')(x)\n\n output_tensor = TimeDistributed(Dense(output_dim, activation='softmax'))(x) # Return at each time step prob along characters\n\n model = Model(inputs=input_tensor, outputs=output_tensor)\n return model\n\ndef ctc_loss(y, y_hat):\n print(\"calculating loss\")\n def get_length(tensor):\n lengths = tf.reduce_sum(tf.ones_like(tensor), 1)\n return tf.reshape(tf.cast(lengths, tf.int32), [-1, 1])\n\n\n sequence_length = get_length(tf.reduce_max(y_hat, 2))\n label_length = get_length(y)\n ret = tf.keras.backend.ctc_batch_cost(y, y_hat, sequence_length, label_length)\n print(ret)\n return ret\n\ndef get_optimizer():\n optimizer = Adam(\n learning_rate=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-07,\n amsgrad=False,\n name=\"Adam\",\n )\n\n return optimizer\n\ndef get_compiled_model():\n list_physical_devices = config.list_physical_devices\n model_dir = \"models/\"\n gpus = list_physical_devices(\"GPU\")\n model = get_model(gpus)\n loss = ctc_loss\n optimizer = get_optimizer()\n gpus_num = len(gpus)\n compiled_model = multi_gpu_model(model, gpus_num) if gpus_num > 1 else model\n y = Input(name='y', shape=[None], dtype='int32')\n compiled_model.compile(optimizer, loss, target_tensors=[y])\n\n return compiled_model\n\ndef batch_tensorflow_decode(y_hat, decoder, alphabet):\n \"\"\" Enable to batch decode using tensorflow decoder. \"\"\"\n labels, = decoder([y_hat])\n return alphabet.get_batch_transcripts(labels)\n\ndef get_decoder(output_tensor, alphabet):\n def get_length(tensor):\n lengths = tf.reduce_sum(tf.ones_like(tensor), 1)\n return tf.cast(lengths, tf.int32)\n\n sequence_length = get_length(tf.reduce_max(output_tensor, 2))\n top_k_decoded, _ = K.ctc_decode(output_tensor, sequence_length, greedy=False, beam_width=64)\n print(top_k_decoded[0])\n decoder = K.function([output_tensor], [top_k_decoded[0]])\n return partial(batch_tensorflow_decode, alphabet=alphabet, decoder=decoder)\n\n\ndef get_callbacks():\n callbacks = []\n callbacks.append(TerminateOnNaN())\n callbacks.append(ResultKeeper(\"results.bin\"))\n callbacks.append(CustomModelCheckpoint('checkpoints'))\n callbacks.append(CustomTensorBoard('tensorboard'))\n callbacks.append(CustomEarlyStopping(mini_targets={5: 200, 10:100}, monitor=\"val_loss\", patience=3))\n #lr_decay = lambda epoch, lr: lr / np.power(.1, epoch)\n #callbacks.append(LearningRateScheduler(lr_decay, verbose= 1))\n return callbacks\n\n\n","sub_path":"flask/Model/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616055377","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nmaze = [\n\t['x','x','x','x','x','x','x','x'],\n\t['x','_','_','_','x','_','_','_'],\n\t['x','_','x','_','x','_','x','x'],\n\t['x','_','x','_','x','_','_','x'],\n\t['x','_','x','_','x','x','_','x'],\n\t['_','_','_','_','_','_','_','x'],\n\t['x','x','x','x','x','x','x','x']\n]\nstartY,startX = 5,0\nendY,endX = 1,len(maze[1])-1\n\ndef printMaze(maze):\n\tfor line in maze:\n\t\tprint(''.join(line))\n\ndef inBounds(maze,x,y):\n\tif(x<0 or y<0): return False\n\tif(y >= len(maze)): return False\n\treturn x < len(maze[y])\n\ndef canWalk(maze,x,y):\n\treturn inBounds(maze,x,y) and maze[y][x]=='_'\n\ndef getMoves():\n\tleft = [ 0,-1]\n\tright = [ 0, 1]\n\tdown = [ 1, 0]\n\tup = [-1, 0]\n\treturn [up,left,right,down]\n\ndef copyMaze(maze):\n\treturn [[cell for cell in row] for row in maze ]\n\ndef solve(maze,currentX,currentY,endY,endX,counter=0):\n\tif(currentX == endX and currentY == endY):\n\t\treturn [counter,maze]\n\tbest = None\n\tbestCounter = -1\n\tfor move in getMoves():\n\t\tdeltaY,deltaX = move[0],move[1]\n\t\ty = currentY + deltaY\n\t\tx = currentX + deltaX\n\t\tif(canWalk(maze,x,y)):\n\t\t\tcpy = copyMaze(maze)\n\t\t\tcpy[y][x] = 'O'\n\t\t\tresult = solve(cpy,x,y,endY,endX,counter+1)\n\t\t\tif(result[1] is not None):\n\t\t\t\tcpyCounter = result[0]\n\t\t\t\tif(bestCounter < 0 or bestCounter > cpyCounter):\n\t\t\t\t\tbest = result[1]\n\t\t\t\t\tbestCounter = cpyCounter\n\treturn [bestCounter,best]\n\nprintMaze(maze)\nprint('')\nmaze[startY][startX] = 'O'\nresult = solve(maze,startX,startY,endY,endX)\nif(result[1] is not None):\n\tprintMaze(result[1])\nelse:\n\tprint('Pas de solutions')\n\n","sub_path":"Algorithmique/Récursivité/Retour sur traces/MazeBestSolution.py","file_name":"MazeBestSolution.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"101228856","text":"#\n# SConscript for cifparse-obj-v7.0\n# Created: Aug 22, 2006 - Jdw\n#\n# Updated: Mar 30, 2011 jdw - Clone environment\n## \nimport sys, os.path\nImport('env')\nenv=env.Clone()\n#\n#if (len(env.subst('$MYDEBUG')) > 0):\n#\tdict = env.Dictionary()\n#\tfor k,v in dict.items():\n#\t\tprint k, \" = \", str(v)\n#\n#print \"SConscript - in cifparse-obj-v7.0\"\nenv1 = env.Clone()\nmyOs = sys.platform\n\nenv1.Replace(LEXFLAGS = '-Cfr -L -Pcifparser_')\nenv1.Replace(YACCFLAGS = '-d -v -l -p cifparser_')\ns1 = env1.CFile(target = 'src/CifParser.c', source='src/CifParser.y')\n#\n#print s1[0]\n#print s1[1]\n# \ndst1= os.path.join(str(env.subst('$MY_INCLUDE_INSTALL_PATH')),'CifParser.h')\nif (myOs == \"darwin\"):\n# src1 = str(s1[0]) + '.h'\t\n src1 = 'src/CifParser.H'\nelif (myOs == \"linux\"):\n# src1 = 'src/CifParser.H'\n src1 = 'src/CifParser.h'\nelse:\n src1 = 'src/CifParser.h'\n#\n#print src1\n#print dst1\n#\nmv1 = Move(File(dst1).abspath,File(src1).abspath)\nenv1.AddPostAction(s1, mv1)\n#\ns2 = env1.CFile(target = 'src/CifScanner.c', source='src/CifScanner.l')\n#\n#\nenv2 = env.Clone()\nenv2.Replace(LEXFLAGS = '-Cfr -L -Pdicparser_')\nenv2.Replace(YACCFLAGS = '-d -v -l -p dicparser_')\ns3 = env2.CFile(target = 'src/DICParser.c', source='src/DICParser.y')\ndst3= os.path.join(str(env.subst('$MY_INCLUDE_INSTALL_PATH')),'DICParser.h')\nif (myOs == \"darwin\"):\n# src3 = str(s3[0]) + '.h'\n src3 = 'src/DICParser.H'\nelif (myOs == \"linux\"):\n# src3 = 'src/DICParser.H'\n src3 = 'src/DICParser.h'\nelse:\n src3 = 'src/DICParser.h'\n#\nmv3 = Move(File(dst3).abspath,File(src3).abspath)\nenv2.AddPostAction(s3, mv3)\n#\ns4 = env2.CFile(target = 'src/DICScanner.c', source='src/DICScanner.l')\n#\n#\nlibName = 'cifparse-obj'\nlibSrcList =['src/CifFileReadDef.C',\n\t\t\t 'src/CifParser.c',\n\t\t\t 'src/CifScanner.c',\n\t\t\t 'src/CifScannerBase.C',\n\t\t\t 'src/CifParserBase.C',\n\t\t\t 'src/DICScanner.c',\n\t\t\t 'src/DICScannerBase.C',\n\t\t\t 'src/DICParser.c',\n\t\t\t 'src/DICParserBase.C']\n\n#libObjList = [s.replace('.C','.o') for s in libSrcList]\nlibObjList = []\nfor s in libSrcList:\n\tif s.endswith('.c'):\n\t\tlibObjList.append(s.replace('.c','.o'))\n\telif s.endswith('.C'):\n\t\tlibObjList.append(s.replace('.C','.o'))\n\telse:\n\t\tpass\n\n\nlibIncList =['include/CifFileReadDef.h',\n\t\t\t 'include/CifParserBase.h',\n\t\t\t 'include/CifParserInt.h',\n\t\t\t 'include/CifScannerBase.h',\n\t\t\t 'include/CifScannerInt.h',\n\t\t\t 'include/DICParserBase.h',\n\t\t\t 'include/DICParserInt.h',\n\t\t\t 'include/DICScannerBase.h',\n\t\t\t 'include/DICScannerInt.h']\n\nmyLib=env.Library(libName,libSrcList)\n#\nenv.Install(env.subst('$MY_LIB_INSTALL_PATH'),myLib)\nenv.Alias('install-lib',env.subst('$MY_LIB_INSTALL_PATH'))\n#\nenv.Install(env.subst('$MY_INCLUDE_INSTALL_PATH'),libIncList)\nenv.Alias('install-include',env.subst('$MY_INCLUDE_INSTALL_PATH'))\n#\nenv.Install(env.subst('$MY_OBJ_INSTALL_PATH'),libObjList)\nenv.Alias('install-obj',env.subst('$MY_OBJ_INSTALL_PATH'))\n#\nenv.Default('install-include','install-obj','install-lib')\n#\n","sub_path":"SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"342528635","text":"import os.path as path\nfrom sys import path as syspath\n\nsyspath1 = __file__[:-12]\nsyspath.append(syspath1)\nsyspath2 = __file__[:-22]\nsyspath.append(syspath2)\n\nimport sys, time, os, math\nfrom noise import pnoise2, snoise2\nimport numpy as np\nimport tmxlib\nfrom Terrain.SandWater import SandWater\nfrom Terrain.GrassWater import GrassWater\nfrom Terrain.GrassSand import GrassSand\n\ndef setup(octaves, freq, x_offset, y_offset):\n array = []\n octaves = int(octaves)\n freq = int(freq) * octaves\n \n for y in range(259):\n temp_array = []\n for x in range(259):\n## temp_array.append(int(pnoise2((x*int(coord[0])) / freq,\n## (y*int(coords[1])) / freq,\n## octaves) * 196 + 128.0))\n x_pos = x + (x_offset * 256)\n y_pos = y + (y_offset * 256)\n #print(x_pos - x, y_pos - y)\n noise_level = pnoise2(x_pos / freq, y_pos / freq, octaves) * 196 + 128.0\n temp_array.append(noise_level)\n array.append(temp_array)\n\n total = 0\n for y in range(len(array)):\n for x in array[y]:\n total += x\n\n highest = 0\n for y in range(len(array)):\n for x in array[y]:\n if x > highest:\n highest = x\n \n #print(\"[INFO] Highest Mark: \" + str(int(highest)))\n #print(\"[INFO] Average Mark Height: \" + str(int(total / (len(array) * len(array[0])))))\n\n np_array = np.array(array)\n\n filename = os.path.join(__file__[:-12], 'key.tmx')\n key = tmxlib.Map.open(filename)\n string = open(os.path.join(__file__[:-12], 'maze.tmx'), 'rb').read()\n map = tmxlib.Map.load(string)\n\n layer = map.layers['Ground']\n layer_dict = layer.to_dict()\n layer_dict['width'], layer_dict['height'] = 256, 256\n\n map_array = np.array(layer_dict['data'])\n\n old_dimensions = int(math.sqrt(map_array.size))\n\n map_array = map_array.reshape(old_dimensions, old_dimensions)\n \n key = key.layers['Ground']\n\n #print(\"[INFO] Maps Loaded\")\n key_pieces = []\n\n\n key_pieces.append(42) # Make my helper image work right\n for i in range(0, 768): # 768 is arbitrarily large.\n try: # We won't get this many tiles\n key_pieces.append(key[i, 0].gid)\n except IndexError:\n break\n \n return array, map_array, key_pieces, map\n\nclass Terraform:\n def __init__(self, map_array, layer, key_pieces, map):\n self.map_array = map_array\n self.layer = layer\n self.key_pieces = key_pieces\n self.map = map\n \n def generate(self):\n for x in range(len(self.map_array)):\n for y in range(len(self.map_array[0])):\n self.layer = SandWater.edges(self, x, y)\n self.layer = GrassWater.edges(self, x, y)\n self.layer = GrassSand.edges(self, x, y)\n pass\n print(\"[INFO] Edges Terraformed\")\n\n for x in range(len(self.map_array)):\n for y in range(len(self.map_array[0])):\n self.layer = SandWater.t(self, x, y)\n self.layer = GrassWater.t(self, x, y)\n self.layer = GrassSand.t(self, x, y)\n pass\n print(\"[INFO] Ts Removed\") \n\n for y in range(len(self.map_array)):\n for x in range(len(self.map_array[0])):\n self.layer = SandWater.corners(self, x, y)\n self.layer = GrassWater.corners(self, x, y)\n self.layer = GrassSand.corners(self, x, y)\n \n pass\n print(\"[INFO] Corners Added\")\n\n for y in range(len(self.map_array)):\n for x in range(len(self.map_array[0])):\n self.layer = SandWater.steps(self, x, y)\n self.layer = GrassWater.steps(self, x, y)\n self.layer = GrassSand.steps(self, x, y) \n pass\n print(\"[INFO] Steps Smoothed\")\n map.width, map.height = 260, 260\n list_array = self.layer.flatten().tolist()\n layer_dict = map.layers['Ground'].to_dict()\n layer_dict['data'] = list_array\n layer = map.layers['Ground'].from_dict(layer_dict, map)\n\n return self.shrink(layer)\n\n def shrink(self, layer):\n layer_dict = layer.to_dict()\n layer_dict['width'], layer_dict['height'] = 256, 256\n\n array = np.array(layer_dict['data'])\n\n old_dimensions = int(math.sqrt(array.size))\n\n array = array.reshape(old_dimensions, old_dimensions)\n list_array = np.array(array[2:-2, 2:-2]).flatten().tolist()\n #list_array = np.array(array[2:, 2:]).flatten().tolist()\n\n\n self.map.width, self.map.height = old_dimensions-4, old_dimensions-4\n layer_dict['data'] = list_array\n self.layer = layer.from_dict(layer_dict, map)\n\n #print(\"[INFO] Map Shrunk\")\n return self.layer\n \n##def generate_map(output, octaves, freq):\ncoords = [0, 0]\noutput = sys.argv[1]\noctaves = int(sys.argv[2])\nfreq = float(sys.argv[3])\nx, y = int(sys.argv[4]), int(sys.argv[5])\narray, layer, key_pieces, map = setup(octaves, freq, x, y)\nfor y in range(len(array)):\n for x in range(len(array[0])):\n if array[y][x] < 133:\n layer[y, x] = key_pieces[5] # Grass\n## elif array[y][x] < 156:\n## layer[y, x] = key_pieces[14] # Cracks\n## elif array[y][x] < 169:\n## layer[y, x] = key_pieces[5] # Cobbles\n elif array[y][x] < 155:\n layer[y, x] = key_pieces[41] # Sand\n elif array[y][x] < 180:\n layer[y, x] = key_pieces[23] # Water\n elif array[y][x] < 195:\n layer[y, x] = key_pieces[5] # Grass\n \n \nprint(\"[INFO] Map Created\")\n\nlayer = Terraform(array, layer, key_pieces, map).generate()\n\nmap.layers['Ground'] = layer\nmap.save(str(output))\n\nprint(\"[INFO] Map Saved\")\n\n","sub_path":"lib/generate/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67053372","text":"#!/usr/bin/env python\n# coding=utf-8\n\n#when indexing, use commit=true&separator=%09&escape=\\&header=true\n\nimport pycurl\nimport json\nimport pprint\nimport time\nimport solr\nimport os.path\nfrom io import BytesIO\n# github_url = 'https://api.postmarkapp.com/email'\n\n# data = json.dumps({\"From\": \"user@example.com\", \"To\": \"receiver@example.com\", \"Subject\": \"Pycurl\", \"TextBody\": \"Some text\"})\n\n# c = pycurl.Curl()\n# c.setopt(pycurl.URL, github_url)\n# c.setopt(pycurl.HTTPHEADER, ['X-Postmark-Server-Token: API_TOKEN_HERE','Accept: application/json'])\n# c.setopt(pycurl.POST, 1)\n# c.setopt(pycurl.POSTFIELDS, data)\n# c.perform()\n\nport=8983\nsolr_jobs_base = 'http://localhost:8983/solr/jobdescriptions'\nsolr_resumes_base = 'http://localhost:8983/solr/resumes'\n\n\nquery = '-job_fips:[* TO *]'\ns = solr.Solr(solr_jobs_base)\n#(q, fields=None, highlight=None, score=True, sort=None, sort_order=\"asc\", **params)\nr = s.select(query, rows=10000)\n\n\ncols = {'zip' : 1, 'city' : 2, 'state' : 4, 'county': 5}\nfips_cols = {'state': 0, 'state_ansi':1, 'county_ansi': 2, 'county_name':3}\n\n# loads a file into a dictionary\ndef load_file_dict(filename, cols, delim=\"\\t\", comment=\"\"):\n\tlines = {}\n\tf = filename\n\twith open(f,'rU') as f:\n\t\tfor l in f:\n\t\t\tif l[0] == comment: continue\n\t\t\tdata = l.split(delim)\n\t\t\tline = []\n\t\t\tlines[data[cols['zip']]] = (data[cols['city']] , data[cols['state']], \"\" + data[cols['county']].replace(\" County\", \"\").replace(\"(CA)\", \"Census Area\").replace(\"\\xc3\\xb1a An\", \"n\") + \" County\")\n\treturn lines\n\n# loads a file into a dictionary\ndef load_file_dict_fips(filename, cols, delim=\",\", comment=\"\"):\n\tlines = {}\n\tf = filename\n\twith open(f,'rU') as f:\n\t\tfor l in f:\n\t\t\tif l[0] == comment: continue\n\t\t\tdata = l.split(delim)\n\t\t\tline = []\n\t\t\tlines[(data[cols['county_name']] , data[cols['state']])] = \"\" + data[cols['state_ansi']] + data[cols['county_ansi']]\n\treturn lines\n\nprint(os.path.abspath('../setup/geonames/usa_zip.tsv'))\n\nzips = load_file_dict(os.path.abspath('../setup/geonames/usa_zip.tsv'), cols)\nfips = load_file_dict_fips(os.path.abspath('../setup/geonames/usa_fips.csv'), fips_cols, delim=\",\", comment=\"State\")\n#print(zips)\n#print(fips)\n\nprint(zips['88011'])\n\n\nnew = []\nprint(r.numFound)\nfor job_id in r.results:\n\tdefault = 'a'\n\t#print(type(job_id['job_postal'][0]))\n\t#print(job_id.get('job_postal', default))\n\t#print('%s' % (type(zips[job_id['job_postal'][0]])))\n\tline = {}\n\t#print(job_id)\n\tif (job_id.get('job_postal', default) != \"a\"):\n\t\tprint(job_id['job_postal'])\n\t\tif (zips.get(job_id['job_postal'],default) != \"a\"):\n\t\t\tif (zips.get(job_id['job_postal'],default) != (job_id.get('job_city', default), job_id.get('job_state',default), job_id.get('job_county',default))):\n\t\t\t\t# try:\n\t\t\t\t# \tprint('Postal: %s, Old: %s %s New: %s' % (job_id['job_postal'] , job_id['job_city'], job_id['job_state'], zips[job_id['job_postal']]))\n\t\t\t\t# except KeyError, e:\n\t\t\t\t# \tprint('Postal: %s, New: %s %s %s' % (job_id['job_postal'] , zips[job_id['job_postal']], zips[job_id['job_postal']][1] , zips[job_id['job_postal']][2]))\n\t\t\t\t#line['job_id'] = job_id['job_id']\n\t\t\t\t#line['job_city'] = {\"set\", zips[job_id['job_postal'][0]][0]}\n\t\t\t\t# line['job_city'] = zips[job_id['job_postal']][0]\n\t\t\t\t# line['job_state'] = zips[job_id['job_postal']][1]\n\t\t\t\t# line['job_county'] = zips[job_id['job_postal']][2]\n\t\t\t\t# line['job_fips'] = fips.get((line['job_county'], line['job_state']), \"\")\n\t\t\t\t# print('FIPS: %s' % (line['job_fips']))\n\t\t\t\tjob_id['job_city'] = zips[job_id['job_postal']][0]\n\t\t\t\tjob_id['job_state'] = zips[job_id['job_postal']][1]\n\t\t\t\tjob_id['job_county'] = zips[job_id['job_postal']][2]\n\t\t\t\tjob_id['job_fips'] = fips.get((job_id['job_county'], job_id['job_state']), \"\")\n\t\t\t\tjob_id.pop('score')\n\t\t\t\tprint('FIPS: %s' % (job_id['job_fips']))\n\t\t\t\t#print(job_id)\n\t\t\t\ta = s.add(job_id, commit=False)\n\t\t\t\t#print(a)\n\n\nc = s.commit(wait_flush=True, wait_searcher=True)\nprint(c)\nd = s.close()\nprint(d)\nprint(r.numFound)\n\n# solr_url = 'http://localhost:8983/solr/resumes/dataimport?optimize=false&indent=true&clean=false&commit=true&verbose=false&command=full-import&debug=false&wt=json'\n# base_url = 'http://localhost:8983/solr/resumes/dataimport?'\n# status_command = base_url + 'indent=true&command=status&wt=json'\n# steps = range(0,7500000,20000)\n# for idx in range(0,len(steps)-1):\n# \tresponse = {\"status\" : 'busy'}\n# \t#pprint.pprint(response)\n# \twhile response[\"status\"] != \"idle\":\n# \t\ttime.sleep(2)\n# \t\tdata = BytesIO()\n# \t\tc = pycurl.Curl()\n# \t\tc.setopt(pycurl.URL, status_command)\n# \t\tc.setopt(pycurl.WRITEFUNCTION, data.write)\n# \t\tc.perform()\n# \t\tresponse = json.loads(data.getvalue())\n# \tprint('Progress: %s' % (response[\"statusMessages\"]))\n# \tprint('doing %d to %d' % (steps[idx], (steps[idx+1])))\n# \tidx2 = idx + 1\n# \tcommand = solr_url + '&minrun=' + `steps[idx]` + '&maxrun=' + `steps[idx2]`\n# \tprint(command)\n# \tdata = BytesIO()\n\n# \tc = pycurl.Curl()\n# \tc.setopt(pycurl.URL, command)\n# \tc.setopt(pycurl.WRITEFUNCTION, data.write)\n# \tc.perform()\n# \tresponse = json.loads(data.getvalue())\n# \t","sub_path":"index_solr/update_locations.py","file_name":"update_locations.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"506221652","text":"'''\nGiven an integer array, find three numbers whose product is maximum and output the maximum product.\n'''\nclass Solution:\n def maximumProduct(self, nums):\n if len(nums) == 3:\n return nums[-1]*nums[-2]*nums[-3]\n nums.sort()\n product = 0\n negetive = []\n positive = []\n for i in range(len(nums)):\n if nums[i] < 0:\n negetive.append(nums[i])\n else:\n positive.append(nums[i])\n if len(negetive) > 1:\n product = negetive[0] * negetive[1]\n if 0 2:\n # Return a 2d tile unless chosen channel has RGB\n needed_axes = 2 + int(tile.shape[2] > 1)\n tile = np.squeeze(tile, axis=tuple(range(needed_axes, len(tile.shape))))\n\n return tile\n\nclass Opener:\n\n def __init__(self, path):\n self.warning = ''\n self.path = path\n self.reader = None\n self.tilesize = 1024\n self.ext = check_ext(path)\n self.default_dtype = np.uint16\n\n if self.ext == '.ome.tif' or self.ext == '.ome.tiff':\n self.reader = 'tifffile'\n self.io = TiffFile(self.path)\n self.ome_version = self._get_ome_version()\n if self.ome_version == 5:\n self.io = TiffFile(self.path, is_ome=False)\n self.group = zarr.open(self.io.series[0].aszarr())\n # Treat non-pyramids as groups of one array\n if isinstance(self.group, zarr.core.Array):\n root = zarr.group()\n root[0] = self.group\n self.group = root\n print(\"OME \", self.ome_version)\n num_channels = self.get_shape()[0]\n dimensions = self.io.series[0].get_axes()\n self.wrapper = ZarrWrapper(self.group, dimensions)\n\n tile_0 = self.get_tifffile_tile(num_channels, 0, 0, 0, 0, 1024)\n if tile_0 is not None:\n self.default_dtype = tile_0.dtype\n\n if (num_channels == 3 and tile_0.dtype == 'uint8'):\n self.rgba = True\n self.rgba_type = '3 channel'\n elif (num_channels == 1 and tile_0.dtype == 'uint8'):\n self.rgba = True\n self.rgba_type = '1 channel'\n else:\n self.rgba = False\n self.rgba_type = None\n\n print(\"RGB \", self.rgba)\n print(\"RGB type \", self.rgba_type)\n\n elif self.ext == '.svs':\n self.io = OpenSlide(self.path)\n self.dz = DeepZoomGenerator(self.io, tile_size=1024, overlap=0, limit_bounds=True)\n self.reader = 'openslide'\n self.rgba = True\n self.rgba_type = None\n self.default_dtype = np.uint8\n\n print(\"RGB \", self.rgba)\n print(\"RGB type \", self.rgba_type)\n\n else:\n self.reader = None\n\n def _get_ome_version(self):\n try:\n software = self.io.pages[0].tags[305].value\n sub_ifds = self.io.pages[0].tags[330].value\n if \"Faas\" in software or sub_ifds is None:\n return 5\n else:\n return 6\n except Exception as e:\n print(e)\n return 5\n\n def read_metadata(self):\n if self.ext == '.ome.tif' or self.ext == '.ome.tiff':\n try:\n metadata = ome_types.from_tiff(self.path)\n except Exception:\n return None\n\n if not metadata or not metadata.images or not metadata.images[0]:\n return None\n\n return metadata\n\n return None\n\n def load_xml_markers(self):\n metadata = self.read_metadata()\n if not metadata:\n return []\n\n metadata_pixels = metadata.images[0].pixels\n if not metadata_pixels or not metadata_pixels.channels:\n return []\n\n return [c.name for c in metadata_pixels.channels if c.name]\n\n def close(self):\n self.io.close()\n\n def is_rgba(self, rgba_type=None):\n if rgba_type is None:\n return self.rgba\n else:\n return self.rgba and rgba_type == self.rgba_type\n\n def get_level_tiles(self, level, tile_size):\n if self.reader == 'tifffile':\n\n # Negative indexing to support shape len 3 or len 2\n ny = int(np.ceil(self.group[level].shape[-2] / tile_size))\n nx = int(np.ceil(self.group[level].shape[-1] / tile_size))\n return (nx, ny)\n elif self.reader == 'openslide':\n reverse_level = self.dz.level_count - 1 - level\n return self.dz.level_tiles[reverse_level]\n\n def get_shape(self):\n def parse_shape(shape):\n if len(shape) >= 3:\n (num_channels, shape_y, shape_x) = shape[-3:]\n else:\n (shape_y, shape_x) = shape\n num_channels = 1\n\n return (num_channels, shape_x, shape_y)\n\n if self.reader == 'tifffile':\n\n (num_channels, shape_x, shape_y) = parse_shape(self.group[0].shape)\n all_levels = [parse_shape(v.shape) for v in self.group.values()]\n num_levels = len([shape for shape in all_levels if max(shape[1:]) > 512])\n return (num_channels, num_levels, shape_x, shape_y)\n\n elif self.reader == 'openslide':\n\n (width, height) = self.io.dimensions\n\n def has_one_tile(counts):\n return max(counts) == 1\n\n small_levels = list(filter(has_one_tile, self.dz.level_tiles))\n level_count = self.dz.level_count - len(small_levels) + 1\n\n return (3, level_count, width, height)\n\n def read_tiles(self, level, channel_number, tx, ty, tilesize):\n ix = tx * tilesize\n iy = ty * tilesize\n\n try:\n tile = self.wrapper[level, ix:ix+tilesize, iy:iy+tilesize, 0, channel_number, 0]\n return tile\n except Exception as e:\n G['logger'].error(e)\n return None\n\n def get_tifffile_tile(self, num_channels, level, tx, ty, channel_number, tilesize=1024):\n\n if self.reader == 'tifffile':\n\n tile = self.read_tiles(level, channel_number, tx, ty, tilesize)\n\n if tile is None:\n return np.zeros((tilesize, tilesize), dtype=self.default_dtype)\n\n return tile\n\n def get_tile(self, num_channels, level, tx, ty, channel_number, fmt=None):\n\n if self.reader == 'tifffile':\n\n if self.is_rgba('3 channel'):\n tile_0 = self.get_tifffile_tile(num_channels, level, tx, ty, 0, 1024)\n tile_1 = self.get_tifffile_tile(num_channels, level, tx, ty, 1, 1024)\n tile_2 = self.get_tifffile_tile(num_channels, level, tx, ty, 2, 1024)\n tile = np.zeros((tile_0.shape[0], tile_0.shape[1], 3), dtype=np.uint8)\n tile[:, :, 0] = tile_0\n tile[:, :, 1] = tile_1\n tile[:, :, 2] = tile_2\n _format = 'I;8'\n else:\n tile = self.get_tifffile_tile(num_channels, level, tx, ty, channel_number, 1024)\n _format = fmt if fmt else 'I;16'\n\n if (_format == 'RGBA' and tile.dtype != np.uint32):\n tile = tile.astype(np.uint32)\n\n if (_format == 'I;16' and tile.dtype != np.uint16):\n if tile.dtype == np.uint8:\n tile = 255 * tile.astype(np.uint16)\n else:\n # TODO: real support for uint32, signed values, and floats\n tile = np.clip(tile, 0, 65535).astype(np.uint16)\n\n return Image.fromarray(tile, _format)\n\n elif self.reader == 'openslide':\n reverse_level = self.dz.level_count - 1 - level\n img = self.dz.get_tile(reverse_level, (tx, ty))\n return img\n\n def generate_mask_tiles(self, filename, mask_params, tile_size, level, tx, ty, should_skip_tiles={}):\n num_channels = self.get_shape()[0]\n tile = self.get_tifffile_tile(num_channels, level, tx, ty, 0, tile_size)\n\n for image_params in mask_params['images']:\n\n output_file = str(image_params['out_path'] / filename)\n if should_skip_tiles.get(output_file, False):\n continue\n\n target = np.zeros(tile.shape + (4,), np.uint8)\n skip_empty_tile = True\n\n for channel in image_params['settings']['channels']:\n rgba_color = [int(255 * i) for i in (colors.to_rgba(channel['color'], channel['opacity']))]\n ids = channel['ids']\n\n if len(ids) > 0:\n bool_tile = np.isin(tile, ids)\n # Signal that we must actually save the image\n if not skip_empty_tile or np.any(bool_tile):\n skip_empty_tile = False\n target[bool_tile] = rgba_color\n else:\n # Handle masks that color cells individually\n target = colorize_mask(target, tile, channel['opacity'])\n skip_empty_tile = False\n\n if skip_empty_tile:\n empty_file = get_empty_path(output_file)\n yield {\n 'img': None,\n 'empty_file': empty_file\n }\n else:\n img = Image.frombytes('RGBA', target.T.shape[1:], target.tobytes())\n yield {\n 'img': img,\n 'output_file': output_file\n }\n\n def save_mask_tiles(self, filename, mask_params, logger, tile_size, level, tx, ty):\n\n should_skip_tiles = {}\n\n for image_params in mask_params['images']:\n\n output_file = str(image_params['out_path'] / filename)\n path_exists = os.path.exists(output_file) or os.path.exists(get_empty_path(output_file))\n should_skip = path_exists and image_params.get('is_up_to_date', False)\n should_skip_tiles[output_file] = should_skip\n\n if all(should_skip_tiles.values()):\n logger.warning(f'Not saving tile level {level} ty {ty} tx {tx}')\n logger.warning(f'Every mask {filename} exists with same rendering settings')\n return\n\n if self.reader == 'tifffile':\n mask_tiles = self.generate_mask_tiles(\n filename, mask_params, tile_size, level, tx, ty, should_skip_tiles\n )\n\n for mask_tile in mask_tiles:\n img = mask_tile.get('img', None)\n empty_file = mask_tile.get('empty_file', None)\n output_file = mask_tile.get('output_file', None)\n\n if all([img, output_file]):\n img.save(output_file, compress_level=1)\n elif empty_file is not None:\n if not os.path.exists(empty_file):\n with open(empty_file, 'w'):\n pass\n\n def return_tile(self, output_file, settings, tile_size, level, tx, ty):\n if self.reader == 'tifffile' and self.is_rgba('3 channel'):\n\n num_channels = self.get_shape()[0]\n tile_0 = self.get_tifffile_tile(num_channels, level, tx, ty, 0, tile_size)\n tile_1 = self.get_tifffile_tile(num_channels, level, tx, ty, 1, tile_size)\n tile_2 = self.get_tifffile_tile(num_channels, level, tx, ty, 2, tile_size)\n tile = np.zeros((tile_0.shape[0], tile_0.shape[1], 3), dtype=np.uint8)\n tile[:, :, 0] = tile_0\n tile[:, :, 1] = tile_1\n tile[:, :, 2] = tile_2\n\n return Image.fromarray(tile, 'RGB')\n\n elif self.reader == 'tifffile' and self.is_rgba('1 channel'):\n\n num_channels = self.get_shape()[0]\n tile = self.get_tifffile_tile(num_channels, level, tx, ty, 0, tile_size)\n\n return Image.fromarray(tile, 'RGB')\n\n elif self.reader == 'tifffile':\n target = None\n for i, (marker, color, start, end) in enumerate(zip(\n settings['Channel Number'], settings['Color'],\n settings['Low'], settings['High']\n )):\n num_channels = self.get_shape()[0]\n tile = self.get_tifffile_tile(num_channels, level, tx, ty, int(marker), tile_size)\n\n if (tile.dtype != np.uint16):\n if tile.dtype == np.uint8:\n tile = 255 * tile.astype(np.uint16)\n else:\n tile = tile.astype(np.uint16)\n\n if i == 0 or target is None:\n target = np.zeros(tile.shape + (3,), np.float32)\n\n composite_channel(\n target, tile, colors.to_rgb(color), float(start), float(end)\n )\n\n if target is not None:\n np.clip(target, 0, 1, out=target)\n target_u8 = (target * 255).astype(np.uint8)\n return Image.frombytes('RGB', target.T.shape[1:], target_u8.tobytes())\n\n elif self.reader == 'openslide':\n reverse_level = self.dz.level_count - 1 - level\n return self.dz.get_tile(reverse_level, (tx, ty))\n\n def save_tile(self, output_file, settings, tile_size, level, tx, ty):\n img = self.return_tile(output_file, settings, tile_size, level, tx, ty)\n img.save(output_file, quality=85)\n\n\ndef api_error(status, message):\n return jsonify({\n \"error\": message\n }), status\n\n\ndef reset_globals():\n _g = {\n 'logger': logging.getLogger('app'),\n 'import_pool': ThreadPoolExecutor(max_workers=1),\n 'preview_cache': {},\n 'image_openers': {},\n 'mask_openers': {},\n 'save_progress': {},\n 'save_progress_max': {}\n }\n _g['logger'].setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(FORMATTER)\n _g['logger'].addHandler(ch)\n return _g\n\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder at _MEIPASS\n base_path = sys._MEIPASS\n except AttributeError:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\n\nG = reset_globals()\ntiff_lock = multiprocessing.Lock()\nmask_lock = multiprocessing.Lock()\napp = Flask(__name__,\n static_folder=resource_path('static'),\n static_url_path='')\n\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\ndef cache_opener(path, opener, key, multi_lock):\n global G\n if isinstance(opener, Opener):\n multi_lock.acquire()\n G[key][path] = opener\n multi_lock.release()\n return True\n return False\n\n\ndef cache_image_opener(path, opener):\n return cache_opener(path, opener, 'image_openers', tiff_lock)\n\n\ndef cache_mask_opener(path, opener):\n return cache_opener(path, opener, 'mask_openers', mask_lock)\n\n\ndef return_opener(path, key):\n if path not in G[key]:\n try:\n opener = Opener(path)\n return opener if opener.reader is not None else None\n except (FileNotFoundError, TiffFileError) as e:\n print(e)\n return None\n else:\n return G[key][path]\n\n\ndef convert_mask(path):\n sys.stdout.reconfigure(line_buffering=True)\n\n ome_path = tif_path_to_ome_path(path)\n if os.path.exists(ome_path):\n return\n\n print(f'Converting {path}')\n tmp_dir = 'minerva_author_tmp_dir'\n tmp_dir = os.path.join(os.path.dirname(path), tmp_dir)\n tmp_path = os.path.join(tmp_dir, 'tmp.tif')\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n if (os.path.exists(tmp_path)):\n os.remove(tmp_path)\n make_ome(\n [pathlib.Path(path)], pathlib.Path(tmp_path),\n is_mask=True, pixel_size=1\n )\n os.rename(tmp_path, ome_path)\n if os.path.exists(tmp_dir) and not len(os.listdir(tmp_dir)):\n os.rmdir(tmp_dir)\n print(f'Done creating {ome_path}')\n\n\ndef open_input_mask(path, convert=False):\n opener = None\n invalid = True\n ext = check_ext(path)\n if ext == '.ome.tif' or ext == '.ome.tiff':\n opener = return_opener(path, 'mask_openers')\n elif ext == '.tif' or ext == '.tiff':\n ome_path = tif_path_to_ome_path(path)\n convertable = os.path.exists(path) and not os.path.exists(ome_path)\n if convert and convertable:\n G['import_pool'].submit(convert_mask, path)\n elif os.path.exists(ome_path):\n opener = return_opener(ome_path, 'mask_openers')\n path = ome_path\n invalid = False\n\n success = cache_mask_opener(path, opener)\n return False if success else invalid\n\n\ndef check_mask_opener(path):\n global G\n opener = None\n ext = check_ext(path)\n\n if ext == '.ome.tif' or ext == '.ome.tiff':\n opener = G['mask_openers'].get(path)\n elif ext == '.tif' or ext == '.tiff':\n ome_path = tif_path_to_ome_path(path)\n opener = G['mask_openers'].get(ome_path)\n\n # Remove invalid openers\n if opener and not os.path.exists(opener.path):\n mask_lock.acquire()\n opener.close()\n G['mask_openers'].pop(opener.path, None)\n mask_lock.release()\n return None\n\n return opener\n\n\ndef return_mask_opener(path, convert):\n invalid = True\n if check_mask_opener(path) is None:\n invalid = open_input_mask(path, convert)\n opener = check_mask_opener(path)\n return (invalid, opener)\n\n\ndef return_image_opener(path):\n opener = return_opener(path, 'image_openers')\n success = cache_image_opener(path, opener)\n return (not success, opener)\n\n\ndef nocache(view):\n @wraps(view)\n def no_cache(*args, **kwargs):\n response = make_response(view(*args, **kwargs))\n response.headers['Last-Modified'] = datetime.now()\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response\n\n return update_wrapper(no_cache, view)\n\n\ndef load_mask_state_subsets(filename):\n all_mask_states = {}\n path = pathlib.Path(filename)\n if not path.is_file() or path.suffix != '.csv':\n return None\n\n with open(path, encoding='utf-8-sig') as cf:\n state_labels = []\n for row in csv.DictReader(cf):\n if 'CellID' not in row:\n print(f'No CellID found in {filename}')\n break\n try:\n cell_id = int(row.get('CellID', None))\n except TypeError:\n print(f'Cannot parse CellID in {filename}')\n continue\n\n # Determine whether to use State or sequentially numbered State\n if not len(state_labels):\n state_labels = ['State']\n if state_labels[0] not in row:\n state_labels = []\n for i in range(1, 10):\n state_i = f'State{i}'\n if state_i not in row:\n break\n state_labels.append(state_i)\n\n if not len(state_labels):\n print(f'No State headers found in {filename}')\n break\n\n # Load from each State label\n for state_i in state_labels:\n cell_state = row.get(state_i, '')\n if cell_state == '':\n print(f'Empty {state_i} for CellID \"{cell_id}\" in {filename}')\n continue\n\n mask_subsets = all_mask_states.get(state_i, {})\n mask_group = mask_subsets.get(cell_state, set())\n mask_group.add(cell_id)\n\n mask_subsets[cell_state] = mask_group\n all_mask_states[state_i] = mask_subsets\n\n if not len(all_mask_states):\n return None\n\n return {\n state: {\n k: sorted(v) for (k, v) in mask_subsets.items()\n } for (state, mask_subsets) in all_mask_states.items()\n }\n\n\ndef reload_all_mask_state_subsets(masks):\n all_mask_state_subsets = {}\n\n def is_mask_ok(mask):\n return 'map_path' in mask and 'channels' in mask\n\n for mask in masks:\n if is_mask_ok(mask):\n all_mask_state_subsets[mask['map_path']] = {}\n\n for map_path in all_mask_state_subsets:\n mask_state_subsets = load_mask_state_subsets(map_path)\n if mask_state_subsets is not None:\n all_mask_state_subsets[map_path] = mask_state_subsets\n\n for mask in masks:\n if not is_mask_ok(mask):\n continue\n\n mask_state_subsets = all_mask_state_subsets.get(mask['map_path'], {})\n\n # Support version 1.5.0 or lower\n mask_label = mask.get('label')\n default_label = mask.get('original_label')\n default_label = default_label if default_label else mask_label\n\n for chan in mask['channels']:\n state_label = chan.get('state_label', 'State')\n original_label = chan.get('original_label')\n original_label = original_label if original_label else default_label\n chan['ids'] = mask_state_subsets.get(state_label, {}).get(original_label, [])\n chan['original_label'] = original_label\n chan['state_label'] = state_label\n\n return masks\n\n\n@app.route('/')\n@nocache\ndef root():\n \"\"\"\n Serves the minerva-author web UI\n \"\"\"\n return app.send_static_file('index.html')\n\n\n@app.route('/story//', defaults={'path': 'index.html'})\n@app.route('/story//')\n@cross_origin()\n@nocache\ndef out_story(session, path):\n \"\"\"\n Serves any file path in the given story preview\n Args:\n session: unique string identifying save output\n path: any file path in story preview\n Returns: content of any given file\n \"\"\"\n cache_dict = G['preview_cache'].get(session, {})\n path_cache = cache_dict.get(path, None)\n\n not_found = '''\n \n \n \n \n Please restart Minerva Author and\n return here to reload your save file.\n \n \n '''\n\n if path_cache is None:\n response = make_response(not_found, 404)\n response.mimetype = \"text/html\"\n return response\n\n args = path_cache.get('args', [])\n kwargs = path_cache.get('kwargs', {})\n mimetype = path_cache.get('mimetype', None)\n function = path_cache.get('function', lambda: None)\n out_file = function(*args, **kwargs)\n\n try:\n if mimetype:\n return send_file(out_file, mimetype=mimetype)\n else:\n return send_file(out_file)\n except Exception:\n message = f'Unable to preview {session}/{path}'\n return api_error(500, message)\n\n\n@app.route('/api/validate/u32/')\n@cross_origin()\n@nocache\ndef u32_validate(key):\n \"\"\"\n Returns status for given image mask\n Args:\n key: URL-escaped path to mask\n\n Returns: status dict\n invalid: whether the original path does not exist\n ready: whether the ome-tiff version of the path is ready\n path: the ome-tiff version of the path\n \"\"\"\n path = unquote(key)\n\n # Open the input file on the first request only\n (invalid, opener) = return_mask_opener(path, convert=True)\n\n return jsonify({\n \"invalid\": invalid,\n \"ready\": True if isinstance(opener, Opener) else False,\n \"path\": opener.path if isinstance(opener, Opener) else ''\n })\n\n\n@app.route('/api/mask_subsets/')\n@cross_origin()\n@nocache\ndef mask_subsets(key):\n \"\"\"\n Returns the dictionary of mask subsets\n Args:\n key: URL-escaped path to mask group csv file\n\n Returns: Dictionary mapping mask subsets to cell ids\n\n \"\"\"\n path = unquote(key)\n\n if not os.path.exists(path):\n response = make_response('Not found', 404)\n response.mimetype = \"text/plain\"\n return response\n\n mask_state_subsets = load_mask_state_subsets(path)\n if mask_state_subsets is None:\n response = make_response('Not found', 404)\n response.mimetype = \"text/plain\"\n return response\n\n mask_states = []\n mask_subsets = []\n for (mask_state, state_subsets) in mask_state_subsets.items():\n for (k, v) in state_subsets.items():\n mask_states.append(mask_state)\n mask_subsets.append([k, v])\n\n return jsonify({\n 'mask_states': mask_states,\n 'mask_subsets': mask_subsets,\n 'subset_colors': [colorize_integer(v[0]) for [k, v] in mask_subsets]\n })\n\n\n@app.route('/api/u32//__.png')\n@cross_origin()\n@nocache\ndef u32_image(key, level, x, y):\n \"\"\"\n Returns a 32-bit tile from given image mask\n Args:\n key: URL-escaped path to mask\n level: Pyramid level\n x: Tile coordinate x\n y: Tile coordinate y\n\n Returns: Tile image in png format\n\n \"\"\"\n img_io = None\n path = unquote(key)\n\n # Open the input file without allowing any conversion\n (invalid, opener) = return_mask_opener(path, convert=False)\n\n if isinstance(opener, Opener):\n img_io = render_tile(opener, int(level), int(x), int(y), 0, 'RGBA')\n\n if img_io is None:\n response = make_response('Not found', 404)\n response.mimetype = \"text/plain\"\n return response\n\n return send_file(img_io, mimetype='image/png')\n\n\n@app.route('/api/u16///__.png')\n@cross_origin()\n@nocache\ndef u16_image(key, channel, level, x, y):\n \"\"\"\n Returns a single channel 16-bit tile from the image\n Args:\n key: URL-escaped path to image\n channel: Image channel\n level: Pyramid level\n x: Tile coordinate x\n y: Tile coordinate y\n\n Returns: Tile image in png format\n\n \"\"\"\n img_io = None\n path = unquote(key)\n\n # Open the input file if not already open\n (invalid, opener) = return_image_opener(path)\n\n if opener and not invalid:\n img_io = render_tile(\n opener, int(level),\n int(x), int(y), int(channel)\n )\n\n if img_io is None:\n response = make_response('Not found', 404)\n response.mimetype = \"text/plain\"\n return response\n\n return send_file(img_io, mimetype='image/png')\n\n\ndef make_saved_chan(chan):\n # We consider ids too large to store\n return {k: v for (k, v) in chan.items() if k != 'ids'}\n\n\ndef make_saved_mask(mask):\n new_mask = {k: v for (k, v) in mask.items() if k != 'channels'}\n new_mask['channels'] = list(map(make_saved_chan, mask.get('channels', [])))\n return new_mask\n\n\ndef make_saved_file(data):\n new_copy = {k: v for (k, v) in data.items() if k != 'masks'}\n new_copy['masks'] = list(map(make_saved_mask, data.get('masks', [])))\n return new_copy\n\n\n@app.route('/api/save/', methods=['POST'])\n@cross_origin()\n@nocache\ndef api_save(session):\n \"\"\"\n Saves minerva-author project information in json file.\n Args:\n session: unique string identifying save output\n Returns: OK on success\n\n \"\"\"\n if request.method == 'POST':\n data = request.json\n data = make_saved_file(data)\n\n root_dir = data['root_dir']\n out_name = data['out_name']\n\n if not os.path.exists(root_dir):\n os.makedirs(root_dir)\n\n out_dir, out_yaml, out_dat, out_log = get_story_folders(out_name, root_dir)\n\n saved = load_saved_file(out_dat)[0]\n # Only relegate to autosave if save file exists\n if saved and data.get('is_autosave'):\n # Copy new data to autosave and copy old saved to data\n data['autosave'] = copy_saved_states(data, {})\n data = copy_saved_states(saved, data)\n # Set the autosave timestamp\n data['autosave']['timestamp'] = time.time()\n else:\n # Set the current timestamp\n data['timestamp'] = time.time()\n # Persist old autosaves just in case\n if saved and 'autosave' in saved:\n data['autosave'] = saved['autosave']\n\n with open(out_dat, 'w') as out_file:\n json.dump(data, out_file)\n\n # Make a copy of the visualization csv files\n # for use with save_exhibit_pyramid.py\n copy_vis_csv_files(data['waypoints'], pathlib.Path(out_dat))\n\n return 'OK'\n\n\ndef render_progress_callback(current, maximum, session, key='default'):\n G['save_progress_max'][session] = G['save_progress_max'].get(session, {})\n G['save_progress'][session] = G['save_progress'].get(session, {})\n G['save_progress_max'][session][key] = maximum\n G['save_progress'][session][key] = current\n\n\ndef create_progress_callback(maximum, session='default', key='default'):\n def progress_callback(_current, _maximum=maximum):\n render_progress_callback(_current, _maximum, session, key)\n progress_callback(0)\n return progress_callback\n\n\n@app.route('/api/render//progress', methods=['GET'])\n@cross_origin()\n@nocache\ndef get_render_progress(session):\n \"\"\"\n Returns progress of rendering of tiles (0-100). The progress bar in minerva-author-ui uses this endpoint.\n Args:\n session: unique string identifying save output\n Returns: JSON which contains progress and max\n \"\"\"\n\n return jsonify({\n \"progress\": sum(G['save_progress'].get(session, {}).values()),\n \"max\": sum(G['save_progress_max'].get(session, {}).values())\n })\n\n\ndef format_arrow(a):\n return {\n 'Text': a['text'],\n 'HideArrow': a['hide'],\n 'Point': a['position'],\n 'Angle': 60 if a['angle'] == '' else a['angle']\n }\n\n\ndef format_overlay(o):\n return {\n 'x': o[0],\n 'y': o[1],\n 'width': o[2],\n 'height': o[3]\n }\n\n\ndef make_waypoints(d, mask_data, vis_path_dict={}):\n\n for waypoint in d:\n mask_labels = []\n if len(mask_data) > 0:\n wp_masks = waypoint['masks']\n mask_labels = [mask_label_from_index(mask_data, i) for i in wp_masks]\n wp = {\n 'Name': waypoint['name'],\n 'Description': waypoint['text'],\n 'Arrows': list(map(format_arrow, waypoint['arrows'])),\n 'Overlays': list(map(format_overlay, waypoint['overlays'])),\n 'Group': waypoint['group'],\n 'Masks': mask_labels,\n 'ActiveMasks': mask_labels,\n 'Zoom': waypoint['zoom'],\n 'Pan': waypoint['pan']\n }\n for vis in ['VisScatterplot', 'VisCanvasScatterplot', 'VisMatrix']:\n if vis in waypoint:\n wp[vis] = waypoint[vis]\n wp[vis]['data'] = vis_path_dict[wp[vis]['data']]\n\n if 'VisBarChart' in waypoint:\n wp['VisBarChart'] = vis_path_dict[waypoint['VisBarChart']]\n\n yield wp\n\n\ndef make_stories(d, mask_data=[], vis_path_dict={}):\n return [{\n 'Name': '',\n 'Description': '',\n 'Waypoints': list(make_waypoints(d, mask_data, vis_path_dict))\n }]\n\n\ndef make_mask_yaml(mask_data):\n for (i, mask) in enumerate(mask_data):\n yield {\n 'Path': mask_path_from_index(mask_data, i),\n 'Name': mask_label_from_index(mask_data, i),\n 'Colors': [c['color'] for c in mask['channels']],\n 'Channels': [c['label'] for c in mask['channels']]\n }\n\n\ndef make_group_path(groups, group):\n c_path = '--'.join(\n str(c['id']) + '__' + label_to_dir(c['label'])\n for c in group['channels']\n )\n g_path = group_path_from_label(groups, group['label'])\n return g_path + '_' + c_path\n\n\ndef make_groups(d):\n for group in d:\n yield {\n 'Name': group['label'],\n 'Path': make_group_path(d, group),\n 'Colors': [c['color'] for c in group['channels']],\n 'Channels': [c['label'] for c in group['channels']]\n }\n\n\ndef make_rows(d):\n for group in d:\n channels = group['channels']\n yield {\n 'Group Path': make_group_path(d, group),\n 'Channel Number': [str(c['id']) for c in channels],\n 'Low': [int(65535 * c['min']) for c in channels],\n 'High': [int(65535 * c['max']) for c in channels],\n 'Color': ['#' + c['color'] for c in channels]\n }\n\n\ndef make_mask_rows(out_dir, mask_data, session):\n all_mask_params = {}\n\n for (i, mask) in enumerate(mask_data):\n\n mask_params = {\n 'opener': None,\n 'images': []\n }\n mask_path = mask['path']\n\n if mask_path in all_mask_params:\n mask_params = all_mask_params[mask_path]\n else:\n # Open the input file without allowing any conversion\n (invalid, mask_opener) = return_mask_opener(mask_path, convert=False)\n mask_params['opener'] = mask_opener\n\n if isinstance(mask_params['opener'], Opener):\n mask_opener = mask_params['opener']\n num_levels = mask_opener.get_shape()[1]\n mask_total = _calculate_total_tiles(mask_opener, 1024, num_levels)\n mask_params['images'].append({\n 'settings': {\n 'channels': [{\n 'ids': c['ids'],\n 'color': '#'+c['color'],\n 'opacity': c['opacity']\n } for c in mask['channels']],\n 'source': str(mask_path)\n },\n 'progress': create_progress_callback(mask_total, session, str(i)),\n 'out_path': pathlib.Path(mask_path_from_index(mask_data, i, out_dir))\n })\n all_mask_params[mask_path] = mask_params\n else:\n print(f'Unable to access mask at {mask_path}')\n\n return all_mask_params.values()\n\n\ndef write_json_file(data):\n bytes_io = io.BytesIO()\n data_bytes = str.encode(json.dumps(data))\n bytes_io.write(data_bytes)\n bytes_io.seek(0)\n return bytes_io\n\n\ndef make_exhibit_config(opener, out_name, data):\n\n mask_data = data['masks']\n group_data = data['groups']\n waypoint_data = data['waypoints']\n vis_path_dict = deduplicate_data(waypoint_data, 'data')\n\n (num_channels, num_levels, width, height) = opener.get_shape()\n\n _config = {\n 'Images': [{\n 'Name': 'i0',\n 'Description': data['image']['description'],\n 'Path': 'images/' + out_name,\n 'Width': width,\n 'Height': height,\n 'MaxLevel': num_levels - 1\n }],\n 'Header': data['header'],\n 'Rotation': data['rotation'],\n 'Layout': {'Grid': [['i0']]},\n 'Stories': make_stories(waypoint_data, mask_data, vis_path_dict),\n 'Masks': list(make_mask_yaml(mask_data)),\n 'Groups': list(make_groups(group_data))\n }\n return _config\n\n\ndef render_image_tile(output_file, settings, **kwargs):\n tile_size = kwargs.get('tile_size', 1024)\n level = kwargs.get('level', 0)\n tx = kwargs.get('tx', 0)\n ty = kwargs.get('ty', 0)\n opener = kwargs['opener']\n img = opener.return_tile(output_file, settings, tile_size, level, tx, ty)\n img_io = io.BytesIO()\n img.save(img_io, 'JPEG', quality=85)\n img_io.seek(0)\n return img_io\n\n\ndef add_image_tiles_to_dict(cache_dict, config_rows, opener, out_dir_rel):\n output_path = pathlib.Path(out_dir_rel)\n ext = 'jpg'\n\n for settings in config_rows:\n num_levels = opener.get_shape()[1]\n group_dir = settings.get('Group Path', None)\n if group_dir is None:\n print('Missing group path for image')\n continue\n # Cache tile parameters for every tile\n for level in range(num_levels):\n (nx, ny) = opener.get_level_tiles(level, 1024)\n for ty, tx in itertools.product(range(0, ny), range(0, nx)):\n filename = '{}_{}_{}.{}'.format(level, tx, ty, ext)\n output_file = str(output_path / group_dir / filename)\n cache_dict[output_file] = {\n \"function\": render_image_tile,\n \"mimetype\": f'image/{ext}',\n \"args\": [output_file, settings],\n \"kwargs\": {\n 'opener': opener,\n 'tile_size': 1024,\n 'level': level,\n 'tx': tx,\n 'ty': ty\n }\n }\n\n return cache_dict\n\n\ndef render_mask_tile(filename, mask_params, **kwargs):\n tile_size = kwargs.get('tile_size', 1024)\n level = kwargs.get('level', 0)\n tx = kwargs.get('tx', 0)\n ty = kwargs.get('ty', 0)\n opener = mask_params['opener']\n # We except the mask params to only contain one image\n mask_tiles = opener.generate_mask_tiles(\n filename, mask_params, tile_size, level, tx, ty\n )\n img = next(mask_tiles, {}).get('img', None)\n img_io = io.BytesIO()\n if img is not None:\n img.save(img_io, 'PNG', compress_level=1)\n img_io.seek(0)\n return img_io\n\n\ndef add_mask_tiles_to_dict(cache_dict, mask_config_rows):\n all_mask_params = []\n ext = 'png'\n # Mask params must by no longer optimized for saving\n for mask_params in mask_config_rows:\n # Unpack all images from all mask params\n for image_params in mask_params.get('images', []):\n mask_params_copy = {\n 'opener': mask_params['opener'],\n 'images': [image_params]\n }\n all_mask_params.append(mask_params_copy)\n\n for mask_params in all_mask_params:\n opener = mask_params['opener']\n num_levels = opener.get_shape()[1]\n image_params = mask_params.get('images', [None])[0]\n output_path = image_params.get('out_path', None)\n if not all([image_params, output_path]):\n print('Missing image path for mask')\n continue\n # Cache tile parameters for every tile\n for level in range(num_levels):\n (nx, ny) = opener.get_level_tiles(level, 1024)\n for ty, tx in itertools.product(range(0, ny), range(0, nx)):\n filename = '{}_{}_{}.{}'.format(level, tx, ty, ext)\n output_file = str(output_path / filename)\n cache_dict[output_file] = {\n \"function\": render_mask_tile,\n \"mimetype\": f'image/{ext}',\n \"args\": [filename, mask_params],\n \"kwargs\": {\n 'tile_size': 1024,\n 'level': level,\n 'tx': tx,\n 'ty': ty\n }\n }\n\n return cache_dict\n\n\n@app.route('/api/preview/', methods=['POST'])\n@cross_origin()\n@nocache\ndef api_preview(session):\n \"\"\"\n Caches all preview parameters for given session\n Args:\n session: unique string identifying save output\n Returns: OK on success\n\n \"\"\"\n global G\n\n cache_dict = {}\n\n if request.method == 'POST':\n\n path = request.json['in_file']\n out_name = request.json['out_name']\n (invalid, opener) = return_image_opener(path)\n # Ensure path is relative to output directory\n out_dir_rel = get_story_folders(out_name, '')[0]\n out_dir_rel = pathlib.Path(*pathlib.Path(out_dir_rel).parts[1:])\n\n if invalid or not opener:\n return api_error(404, 'Image file not found: ' + str(path))\n\n config_rows = list(make_rows(request.json['groups']))\n mask_config_rows = list(make_mask_rows(out_dir_rel, request.json['masks'],\n session))\n exhibit_config = make_exhibit_config(opener, out_name, request.json)\n cache_dict['exhibit.json'] = {\n \"function\": write_json_file,\n \"args\": [exhibit_config],\n \"mimetype\": 'text/json'\n }\n index_filename = os.path.join(get_story_dir(), 'index.html')\n cache_dict['index.html'] = {\n \"function\": lambda: index_filename\n }\n\n vis_path_dict = deduplicate_data(request.json['waypoints'], 'data')\n for in_path, out_path in vis_path_dict.items():\n cache_dict[out_path] = {\n \"function\": lambda: in_path\n }\n\n cache_dict = add_mask_tiles_to_dict(cache_dict, mask_config_rows)\n cache_dict = add_image_tiles_to_dict(cache_dict, config_rows, opener, out_dir_rel)\n\n G['preview_cache'][session] = cache_dict\n return 'OK'\n\n\n@app.route('/api/render/', methods=['POST'])\n@cross_origin()\n@nocache\ndef api_render(session):\n \"\"\"\n Renders all image tiles and saves them under new minerva-story instance.\n Args:\n session: unique string identifying save output\n Returns: OK on success\n\n \"\"\"\n G['save_progress'] = {}\n G['save_progress_max'] = {}\n\n if request.method == 'POST':\n\n path = request.json['in_file']\n root_dir = request.json['root_dir']\n out_name = request.json['out_name']\n\n (invalid, opener) = return_image_opener(path)\n out_dir, out_yaml, out_dat, out_log = get_story_folders(out_name, root_dir)\n\n if invalid or not opener:\n return api_error(404, 'Image file not found: ' + str(path))\n\n data = request.json['groups']\n mask_data = request.json['masks']\n waypoint_data = request.json['waypoints']\n config_rows = list(make_rows(data))\n create_story_base(out_name, waypoint_data, mask_data, folder=root_dir)\n exhibit_config = make_exhibit_config(opener, out_name, request.json)\n\n with open(out_yaml, 'w') as wf:\n json.dump(exhibit_config, wf)\n\n mask_config_rows = make_mask_rows(out_dir, mask_data, session)\n\n # Render all uint16 image channels\n render_color_tiles(opener, out_dir, 1024, config_rows, G['logger'],\n progress_callback=create_progress_callback(0, session))\n\n # Render all uint32 segmentation masks\n for mask_params in mask_config_rows:\n render_u32_tiles(mask_params, 1024, G['logger'])\n\n return 'OK'\n\n\n@app.route('/api/import/groups', methods=['POST'])\n@cross_origin()\n@nocache\ndef api_import_groups():\n if request.method == 'POST':\n data = request.json\n input_file = pathlib.Path(data['filepath'])\n if not os.path.exists(input_file):\n return api_error(404, 'File not found: ' + str(input_file))\n\n saved = load_saved_file(input_file)[0]\n if not saved or 'groups' not in saved:\n return api_error(400, 'File contains invalid groups: ' + str(input_file))\n\n return jsonify({\n 'groups': saved['groups']\n })\n\n\ndef load_saved_file(input_file):\n saved = None\n autosaved = None\n input_path = pathlib.Path(input_file)\n if not input_path.exists():\n return (None, None)\n\n if input_path.suffix == '.dat':\n saved = pickle.load(open(input_path, \"rb\"))\n else:\n with open(input_path) as json_file:\n saved = json.load(json_file)\n autosaved = saved.get(\"autosave\")\n\n return (saved, autosaved)\n\n\ndef copy_saved_states(from_save, to_save):\n saved_keys = [\n 'sample_info', 'waypoints', 'groups', 'masks',\n 'in_file', 'csv_file', 'root_dir'\n ]\n for saved_key in saved_keys:\n if saved_key in from_save:\n to_save[saved_key] = from_save[saved_key]\n\n return to_save\n\n\ndef is_new_autosave(saved, autosaved):\n if saved is None or autosaved is None:\n return False\n\n autosaved_time = autosaved.get(\"timestamp\")\n saved_time = saved.get(\"timestamp\")\n if autosaved_time:\n if saved_time:\n # Decide if new autosave\n return autosaved_time > saved_time\n else:\n # Save file from before v1.6.0\n return True\n else:\n # Malformed autosave\n return False\n\n\n@app.route('/api/import', methods=['POST'])\n@cross_origin()\n@nocache\ndef api_import():\n if request.method == 'POST':\n response = {}\n chan_label = {}\n data = request.form\n default_out_name = 'out'\n input_file = pathlib.Path(data['filepath'])\n input_image_file = pathlib.Path(data['filepath'])\n loading_saved_file = input_file.suffix in ['.dat', '.json']\n root_dir = get_current_dir()\n\n if not os.path.exists(input_file):\n return api_error(404, 'Image file not found: ' + str(input_file))\n\n if (loading_saved_file):\n default_out_name = extract_story_json_stem(input_file)\n # autosave_logic should be \"ask\", \"skip\", or \"load\"\n autosave_logic = data.get(\"autosave_logic\", \"skip\")\n autosave_error = autosave_logic == \"ask\"\n\n (saved, autosaved) = load_saved_file(input_file)\n root_dir = os.path.dirname(input_file)\n\n if is_new_autosave(saved, autosaved):\n # We need to know whether to use autosave file\n if autosave_error:\n action = 'AUTO ASK ERR'\n return api_error(400, f'{action}: Autosave Error')\n # We will load a new autosave file\n elif autosave_logic == \"load\":\n saved = copy_saved_states(autosaved, saved)\n\n input_image_file = pathlib.Path(saved['in_file'])\n\n if (data['csvpath']):\n csv_file = pathlib.Path(data['csvpath'])\n if not os.path.exists(csv_file):\n return api_error(404, 'Marker csv file not found: ' + str(csv_file))\n else:\n csv_file = pathlib.Path(saved['csv_file'])\n if 'sample_info' in saved:\n response['sample_info'] = saved['sample_info']\n if 'rotation' not in response['sample_info']:\n response['sample_info']['rotation'] = 0\n\n if 'masks' in saved:\n # This step could take up to a minute\n response['masks'] = reload_all_mask_state_subsets(saved['masks'])\n\n response['waypoints'] = saved['waypoints']\n response['groups'] = saved['groups']\n for group in saved['groups']:\n for chan in group['channels']:\n chan_label[str(chan['id'])] = chan['label']\n else:\n csv_file = pathlib.Path(data['csvpath'])\n\n out_name = label_to_dir(data['dataset'], empty=default_out_name)\n if out_name == '':\n out_name = default_out_name\n\n out_dir, out_yaml, out_dat, out_log = get_story_folders(out_name, root_dir)\n\n if not loading_saved_file and os.path.exists(out_dat):\n action = 'OUT ASK ERR'\n verb = 'provide an' if out_name == default_out_name else 'change the'\n return api_error(400, f'{action}: Please {verb} output name, as {out_dat} exists.')\n elif loading_saved_file and os.path.exists(out_dat):\n if not os.path.samefile(input_file, out_dat):\n action = 'OUT ASK ERR'\n verb = 'provide an' if out_name == default_out_name else 'change the'\n command = f'Please {verb} output name or directly load {out_dat}'\n return api_error(400, f'{action}: {command}, as that file already exists.')\n\n opener = None\n try:\n print(\"Opening file: \", str(input_image_file))\n\n (invalid, opener) = return_image_opener(str(input_image_file))\n if invalid or not opener:\n return api_error(404, 'Image file not found: ' + str(input_image_file))\n\n (num_channels, num_levels, width, height) = opener.get_shape()\n\n response['maxLevel'] = num_levels - 1\n response['tilesize'] = opener.tilesize\n response['height'] = height\n response['width'] = width\n\n except Exception as e:\n print(e)\n return api_error(500, 'Invalid tiff file')\n\n try:\n labels = list(yield_labels(opener, csv_file, chan_label, num_channels))\n except Exception:\n return api_error(500, \"Error in loading channel marker names\")\n\n fh = logging.FileHandler(str(out_log))\n fh.setLevel(logging.INFO)\n fh.setFormatter(FORMATTER)\n G['logger'].addHandler(fh)\n\n if not os.path.exists(input_image_file):\n error_message = f'Input file {input_image_file} does not exist'\n G['logger'].error(error_message)\n return api_error(404, error_message)\n\n return jsonify({\n 'loaded': True,\n 'channels': labels,\n 'out_name': out_name,\n 'root_dir': str(root_dir),\n 'session': uuid.uuid4().hex,\n 'output_save_file': str(out_dat),\n 'marker_csv_file': str(csv_file),\n 'input_image_file': str(input_image_file),\n 'waypoints': response.get('waypoints', []),\n 'sample_info': response.get('sample_info', {\n 'rotation': 0,\n 'name': '',\n 'text': ''\n }),\n 'masks': response.get('masks', []),\n 'groups': response.get('groups', []),\n 'tilesize': response.get('tilesize', 1024),\n 'maxLevel': response.get('maxLevel', 1),\n 'height': response.get('height', 1024),\n 'width': response.get('width', 1024),\n 'warning': opener.warning if opener else '',\n 'rgba': opener.is_rgba() if opener else False\n })\n\n\n@app.route('/api/filebrowser', methods=['GET'])\n@cross_origin()\n@nocache\ndef file_browser():\n \"\"\"\n Endpoint which allows browsing the local file system\n\n Url parameters:\n path: path to a directory\n parent: if true, returns the contents of parent directory of given path\n Returns:\n Contents of the directory specified by path\n (or parent directory, if parent parameter is set)\n \"\"\"\n folder = request.args.get('path')\n orig_folder = folder\n parent = request.args.get('parent')\n if folder is None or folder == \"\":\n folder = Path.home()\n elif parent == 'true':\n folder = Path(folder).parent\n\n if not os.path.exists(folder):\n return api_error(404, 'Path not found')\n\n response = {\n \"entries\": [],\n \"path\": str(folder)\n }\n\n # Windows: When navigating back from drive root\n # we have to show a list of available drives\n is_win_dir = (os.name == 'nt' and folder is not None)\n if is_win_dir and str(orig_folder) == str(folder) and parent == 'true':\n match = re.search('[A-Za-z]:\\\\\\\\$', str(folder)) # C:\\ or D:\\ etc.\n if match:\n drives = _get_drives_win()\n for drive in drives:\n new_entry = {\n \"name\": drive + \":\\\\\",\n \"path\": drive + \":\\\\\",\n \"isDir\": True\n }\n response[\"entries\"].append(new_entry)\n return jsonify(response)\n\n # Return a list of folders and files within the requested folder\n for entry in os.scandir(folder):\n try:\n is_directory = entry.is_dir()\n new_entry = {\n \"name\": entry.name,\n \"path\": entry.path,\n \"isDir\": is_directory\n }\n\n is_broken = False\n is_hidden = entry.name[0] == '.'\n\n if not is_directory:\n try:\n stat_result = entry.stat()\n new_entry[\"size\"] = stat_result.st_size\n new_entry[\"ctime\"] = stat_result.st_ctime\n new_entry[\"mtime\"] = stat_result.st_mtime\n except FileNotFoundError:\n is_broken = True\n\n if not is_hidden and not is_broken:\n response[\"entries\"].append(new_entry)\n except PermissionError:\n pass\n\n return jsonify(response)\n\n\ndef _get_drives_win():\n '''\n Returns a list of drive letters in Windows\n https://stackoverflow.com/a/827398\n '''\n drives = []\n bitmask = windll.kernel32.GetLogicalDrives()\n for letter in string.ascii_uppercase:\n if bitmask & 1:\n drives.append(letter)\n bitmask >>= 1\n\n return drives\n\n\ndef close_tiff():\n print(\"Closing tiff files\")\n for opener in G['image_openers'].values():\n try:\n opener.close()\n except Exception as e:\n print(e)\n\n\ndef close_masks():\n print(\"Closing mask files\")\n for opener in G['mask_openers'].values():\n try:\n opener.close()\n except Exception as e:\n print(e)\n\n\ndef close_import_pool():\n print(\"Closing import pool\")\n if G['import_pool'] is not None:\n try:\n G['import_pool'].shutdown()\n except Exception as e:\n print(e)\n\n\ndef open_browser():\n webbrowser.open_new('http://127.0.0.1:' + str(PORT) + '/')\n\n\nif __name__ == '__main__':\n Timer(1, open_browser).start()\n\n atexit.register(close_tiff)\n atexit.register(close_masks)\n atexit.register(close_import_pool)\n\n sys.stdout.reconfigure(line_buffering=True)\n\n if '--dev' in sys.argv:\n app.run(debug=False, port=PORT)\n else:\n serve(app, listen=\"127.0.0.1:\" + str(PORT), threads=10)\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":58112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"283366470","text":"\"\"\"RD_Website URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom login import login_views\npath_base_01 = 'index_new/'\nurlpatterns = [\n path('user_login/', login_views.user_login), # 登录\n path('user_register/', login_views.user_register), # 注册\n path('index/', login_views.index), # 主页\n path('unindex/', login_views.un_index), # 未登录主页\n path('logout/', login_views.logout), # 登出\n path('captcha/', include('captcha.urls')),\n\n\n path('login_new/', login_views.login_new), # 登录\n path('index_new/', login_views.frame), # 主页\n path('rumor_detect/', login_views.rumor_detect), # 检测页面\n path('index_inside/', login_views.index_inside), # 内主页\n path('', login_views.unindex_new), # 未登录主页\n path('unindex_new/', login_views.unindex_new), # 未登录主页\n path('logout_new/', login_views.logout_new), # 登出\n path('register_new/', login_views.register_new), # 用户注册\n path('admin_register/', login_views.admin_register), # 管理员注册\n path('personal_center/', login_views.personal_center), # 个人中心\n path('personal_setting/', login_views.personal_setting), # 个人设置\n\n\n]\n","sub_path":"RD_Website/login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609655879","text":"from wmbus import WMbus\nfrom time import sleep\nimport asyncio\nfrom wmbus.devices import Device\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n loop = asyncio.get_event_loop()\n wmbus = WMbus(\"IM871A_USB\")\n target_device = \"b05c74720000021b\"\n\n def handle_new_device(device: Device):\n if device.id == target_device:\n sleep(1)\n device.set_aes_key(\n key=b\"\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0A\\x0B\\x0C\\x0D\\x0E\\x0F\"\n )\n\n wmbus.on_device_registration = handle_new_device\n wmbus.start()\n\n loop.run_forever()\n loop.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"examples/set_aes_key.py","file_name":"set_aes_key.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"393336013","text":"# Produces a sorted list of weighted ratings from a dictionary of\r\n# user ratings and a user id.\r\n# You can choose the function of similarity between users.\r\ndef weightedRating(dictio, user, similarity = pearsonCoeff):\r\n # In the first place a dictionary is generated with the similarities\r\n # of our user with all other users.\r\n # This dictionary could be stored to avoid recomputing it.\r\n simils = {x: similarity(dictio[user], dictio[x])\r\n for x in dictio if x != user}\r\n\r\n # Auxiliary dictionaries {movieId: [rating*users similarity]}\r\n # and {movieId: [users similarity]} (numerator and denominator\r\n # of the weighted rating)\r\n numerator = {}\r\n denominator = {}\r\n\r\n # The ratings dictionary is traversed, while filling the auxiliary\r\n # dictionaries with the values found.\r\n for userId in simils:\r\n for movieId in dictio[userId]:\r\n if not numerator.has_key(movieId):\r\n numerator [movieId] = []\r\n denominator[movieId] = []\r\n s = simils[userId]\r\n numerator [movieId].append(dictio[userId][movieId]*s)\r\n denominator[movieId].append(s)\r\n\r\n # Compute and sort weighted ratings \r\n result = []\r\n for movieId in numerator:\r\n s1 = sum(numerator [movieId])\r\n s2 = sum(denominator[movieId])\r\n if s2 == 0:\r\n mean = 0.0\r\n else:\r\n mean = s1/s2\r\n result.append((movieId,mean))\r\n\r\n result.sort(key = lambda x: x[1], reverse=True)\r\n return result\r\n","sub_path":"IAAPRAC/materials/Tema2/2.6_WeightedRatings.py","file_name":"2.6_WeightedRatings.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609492544","text":"import torch\nimport torch.utils.data\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport ConfigSpace as CS\nimport ConfigSpace.hyperparameters as CSH\nfrom hpbandster.core.worker import Worker\nimport logging\nimport torch.optim as optim\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\"\"\"\n hyperparameter_ranges = {\n 'lr': ContinuousParameter(0.0001, 0.01),\n 'hidden_nodes': IntegerParameter(20, 100),\n 'batch_size': CategoricalParameter([128, 256, 512]),\n 'conv1_channels': CategoricalParameter([32, 64, 128]),\n 'conv2_channels': CategoricalParameter([64, 128, 256, 512]),\n }\n\n\"\"\"\n\n\nclass Net(nn.Module):\n config = None\n\n def __init__(self, config):\n self.config = config\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(\n 3, config['conv1_channels'], 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(config['conv1_channels'],\n config['conv2_channels'], 5)\n self.fc1 = nn.Linear(config['conv2_channels'] *\n 5 * 5, config['hidden_nodes'])\n self.fc2 = nn.Linear(config['hidden_nodes'], 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, self.config['conv2_channels'] * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nclass PyTorchWorker(Worker):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.NUM_WORKERS = 8\n\n # Setup the database\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n self.trainset = torchvision.datasets.CIFAR10(root=\"../data\", train=True, transform=transform, download=True)\n # self.train_loader = torch.utils.data.DataLoader(trainset,\n # batch_size=config.batch_size,\n # shuffle=True,\n # num_workers=self.NUM_WORKERS)\n\n # self.train_loader, self.val_loader = self.get_train_validate_loaders(trainset,\n # train_batch_size=config.batch_size,\n # val_batch_size=config.batch_size,\n # shuffle=True,\n # train_size=40000,\n # val_size=10000)\n\n self.testset = torchvision.datasets.CIFAR10(root=\"../data\", train=False, transform=transform, download=True)\n # self.test_loader = torch.utils.data.DataLoader(testset,\n # batch_size=config.batch_size,\n # shuffle=False,\n # num_workers=self.NUM_WORKERS)\n\n def evaluate_accuracy(self, model, data_loader):\n model.eval()\n correct = 0\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n with torch.no_grad():\n for x, y in data_loader:\n x, y = x.to(device), y.to(device)\n output = model(x)\n # test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(y.view_as(pred)).sum().item()\n # import pdb; pdb.set_trace()\n accuracy = correct / len(data_loader.sampler)\n return (accuracy)\n\n def get_train_validate_loaders(self, full_dataset, train_size=40000, val_size=10000, train_batch_size=64,\n val_batch_size=1024, shuffle=True):\n \"\"\"\n Returns the training and validation data loaders. Assume train_size + val_size < 50,000\n \"\"\"\n # 50000 is the total trainset for CIFAR\n if train_size + val_size == 50000:\n train_dataset, val_dataset = torch.utils.data.random_split(full_dataset, [train_size, val_size])\n else:\n train_dataset, _ = torch.utils.data.random_split(full_dataset, [train_size, (50000 - train_size)])\n _, val_dataset = torch.utils.data.random_split(full_dataset, [(50000 - val_size), val_size])\n\n t_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=train_batch_size, shuffle=shuffle, num_workers=self.NUM_WORKERS\n )\n v_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=val_batch_size, shuffle=shuffle, num_workers=self.NUM_WORKERS\n )\n return t_loader, v_loader\n\n def get_data_loaders(self, config):\n train_loader, val_loader = self.get_train_validate_loaders(self.trainset,\n train_batch_size=config['batch_size'],\n val_batch_size=config['batch_size'],\n shuffle=True,\n train_size=40000,\n val_size=10000)\n test_loader = torch.utils.data.DataLoader(self.testset,\n batch_size=config['batch_size'],\n shuffle=False,\n num_workers=self.NUM_WORKERS)\n return train_loader, val_loader, test_loader\n\n def compute(self, config_id, config, budget, working_directory):\n budget = int(budget)\n MOMENTUM = 0.9\n\n train_loader, val_loader, test_loader = self.get_data_loaders(config)\n net = Net(config)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n net = net.to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=float(\n config['lr']), momentum=MOMENTUM)\n\n for epoch in range(budget):\n running_loss = 0\n for i, data in enumerate(train_loader):\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n optimizer.zero_grad()\n\n # zero the parameter gradients\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n train_accuracy = self.evaluate_accuracy(net, train_loader)\n val_accuracy = self.evaluate_accuracy(net, val_loader)\n test_accuracy = self.evaluate_accuracy(net, test_loader)\n return ({\n 'loss': 1 - val_accuracy, # remember: HpBandSter always minimizes!\n 'info': {'test accuracy': test_accuracy,\n 'train accuracy': train_accuracy,\n 'validation accuracy': val_accuracy,\n }\n\n })\n\n @staticmethod\n def get_configspace():\n \"\"\"\n It builds the configuration space with the needed hyperparameters.\n It is easily possible to implement different types of hyperparameters.\n Beside float-hyperparameters on a log scale, it is also able to handle categorical input parameter.\n :return: ConfigurationsSpace-Object\n \"\"\"\n cs = CS.ConfigurationSpace()\n lr = CSH.UniformFloatHyperparameter('lr', lower=1e-5, upper=1e-1, default_value='1e-2', log=True)\n hidden_nodes = CSH.UniformIntegerHyperparameter('hidden_nodes', lower=20, upper=100)\n batch_size = CSH.CategoricalHyperparameter('batch_size', [128, 256, 512])\n conv1_channels = CSH.CategoricalHyperparameter('conv1_channels', [32, 64, 128])\n conv2_channels = CSH.CategoricalHyperparameter('conv2_channels', [64, 128, 256, 512])\n cs.add_hyperparameters([lr, hidden_nodes, batch_size, conv1_channels, conv2_channels])\n return cs\n\n\nif __name__ == '__main__':\n worker = PyTorchWorker(run_id='0')\n cs = worker.get_configspace()\n c = cs.sample_configuration().get_dictionary()\n print(c)\n res = worker.compute(config_id='1', config=c, budget=2, working_directory='.')\n print(res)\n","sub_path":"gcp/dockers/20200306/files/Worker_CIFAR10_Iter.py","file_name":"Worker_CIFAR10_Iter.py","file_ext":"py","file_size_in_byte":8707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106436792","text":"#\n# @lc app=leetcode.cn id=917 lang=python3\n#\n# [917] 仅仅反转字母\n#\n# https://leetcode-cn.com/problems/reverse-only-letters/description/\n#\n# algorithms\n# Easy (56.12%)\n# Likes: 61\n# Dislikes: 0\n# Total Accepted: 18.2K\n# Total Submissions: 32.4K\n# Testcase Example: '\"ab-cd\"'\n#\n# 给定一个字符串 S,返回 “反转后的” 字符串,其中不是字母的字符都保留在原地,而所有字母的位置发生反转。\n#\n#\n#\n#\n#\n#\n# 示例 1:\n#\n# 输入:\"ab-cd\"\n# 输出:\"dc-ba\"\n#\n#\n# 示例 2:\n#\n# 输入:\"a-bC-dEf-ghIj\"\n# 输出:\"j-Ih-gfE-dCba\"\n#\n#\n# 示例 3:\n#\n# 输入:\"Test1ng-Leet=code-Q!\"\n# 输出:\"Qedo1ct-eeLg=ntse-T!\"\n#\n#\n#\n#\n# 提示:\n#\n#\n# S.length <= 100\n# 33 <= S[i].ASCIIcode <= 122 \n# S 中不包含 \\ or \"\n#\n#\n#\n\n# @lc code=start\nclass Solution:\n def reverseOnlyLetters(self, S: str) -> str:\n res = \"\"\n characters = []\n for index in range(len(S) - 1, -1, -1):\n if S[index].isalpha():\n characters.append(S[index])\n for c in S:\n res = res + characters.pop(0) if c.isalpha() else res + c\n\n return res\n# @lc code=end\n\n","sub_path":"Week_09/917.仅仅反转字母.py","file_name":"917.仅仅反转字母.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"97128841","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 17 10:32:33 2021\n\n@author: jashcraft\n\"\"\"\nimport numpy as np\nimport numpy\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n#from skimage.restoration import unwrap_phase\n\nmpl.rcParams['font.family'] = 'sans-serif'\nmpl.rcParams['font.size'] = 18\n\ndef ReadMatlabMat(fn):\n # import h5py\n # f = h5py.File(fn,'r')\n # data = f.get('data/variable1')\n # data = np.array(data)\n\n from scipy.io import loadmat\n data = loadmat(fn)\n data = data['plotbox']\n return data\n\ndef grab_center(array,cut):\n \n d1,d2 = array.shape\n return array[int(d1/2-cut):int(d1/2+cut),int(d2/2-cut):int(d2/2+cut)]\n\ndef zeropadft(array,os):\n # assumes a square array\n \n dim = array.shape[0]*os\n box = np.zeros([dim,dim],dtype='complex128')\n box[int(dim/2-array.shape[0]/2):int(dim/2+array.shape[0]/2),int(dim/2-array.shape[0]/2):int(dim/2+array.shape[0]/2)] = array\n ftbox = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(box)))\n return ftbox\n \n\ndef amp_phase_plot(array,logintensity=True,vmin=None,vmax=None):\n import matplotlib.pyplot as plt\n from matplotlib.colors import LogNorm\n \n plt.set_cmap('plasma')\n fig,ax = plt.subplots(ncols=2,figsize=[14,7])\n if logintensity == True:\n cmapi = ax[0].imshow(np.abs(array),norm=LogNorm(),origin='lower',vmin=vmin,vmax=vmax)\n else:\n cmapi = ax[0].imshow(np.abs(array),origin='lower',vmin=vmin,vmax=vmax)\n \n \n ax[0].set_title('Irradiance')\n fig.colorbar(cmapi,ax=ax[0])\n \n plt.set_cmap('coolwarm')\n cmapp = ax[1].imshow(np.angle(array),origin='lower',vmin=vmin,vmax=vmax)\n ax[1].set_title('Phase')\n fig.colorbar(cmapp,ax=ax[1])\n \n plt.show()\n \ndef angularspectrum(array,pixelscale,wavelength):\n \n spectrum = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(array)))\n spectrum_phase = unwrap_phase(np.angle(spectrum))\n spectrum_amp = np.abs(spectrum)\n \n spectrum_angle_x,spectrum_angle_y = np.gradient(spectrum_phase,pixelscale)\n \n return spectrum,wavelength*spectrum_angle_x,wavelength*spectrum_angle_y\n\ndef hexagonal_grid(radius,spacing=1.):\n \n cosv = np.cos(np.pi/6)\n sinv = np.sin(np.pi/6)\n \n nsteps = int(radius/(spacing*cosv))\n i = np.arange(-nsteps-1, nsteps+2)\n j = np.arange(-nsteps-1, nsteps+2)\n \n vi,vj = np.meshgrid(i,j)\n \n x = (vi + sinv*vj)*spacing\n y = cosv*vj*spacing\n \n r2 = x**2 + y**2\n select = r2 < (radius**2)\n \n return x[select], y[select]\n\ndef fourbyfour(array,x,y,size,coords=None):\n \n import matplotlib.tri as tri\n \n # amax = None\n # amin = None\n # bmax = None\n # bmin = None\n # cmax = None\n # cmin = None \n # dmax = None\n # dmin = None\n\n # What I think they should be\n da = 5e-4\n db = 5\n dc = 5e-5\n dd = 1\n amax = 3.10014059e-15 + da\n amin = 3.10014059e-15 - da\n bmax = -5.75998549e+01+db\n bmin = -5.75998549e+01-db\n cmax = -1.73611548e-02 + dc\n cmin = -1.73611548e-02 - dc\n dmax = -8.22543594 + dd\n dmin = -8.22543594 - dd\n \n Axx = array[0,0,:]\n Axy = array[0,1,:]\n Ayx = array[1,0,:]\n Ayy = array[1,1,:]\n \n Bxx = array[0,2,:]\n Bxy = array[0,3,:]\n Byx = array[1,2,:]\n Byy = array[1,3,:]\n \n Cxx = array[2,0,:]\n Cxy = array[2,1,:]\n Cyx = array[3,0,:]\n Cyy = array[3,1,:]\n \n Dxx = array[2,2,:]\n Dxy = array[2,3,:]\n Dyx = array[3,2,:]\n Dyy = array[3,3,:]\n \n if coords is not None:\n x = coords[0,:]\n y = coords[1,:]\n print(x.shape)\n abcd = [Axx,Axy,Ayx,Ayy,Bxx,Bxy,Byx,Byy,Cxx,Cxy,Cyx,Cyy,Dxx,Dxy,Dyx,Dyy]\n titl = ['Axx','Axy','Ayx','Ayy','Bxx','Bxy','Byx','Byy','Cxx','Cxy','Cyx','Cyy','Dxx','Dxy','Dyx','Dyy']\n \n # for i in range(16):\n \n # # plt.figure()\n # # plt.imshow(np.reshape(abcd[i],[50,50]),vmin=0,vmax=1e-5)\n # # plt.title(titl[i])\n # # plt.colorbar()\n # # plt.show()\n \n # plt.figure()\n # if coords is not None:\n # plt.scatter(x,y,c=abcd[i])\n # else:\n # x = np.linspace(-size/2,size/2,int(np.sqrt(array[0,0,:].size)))\n # x,y = np.meshgrid(x,x)\n # x = np.ravel(x)\n # y = np.ravel(y)\n # plt.scatter(x,y,c=abcd[i])\n # plt.title(titl[i])\n # plt.colorbar()\n # plt.show()\n \n \n fig,ax = plt.subplots(ncols=4,nrows=4,figsize=[14,9])\n \n plt.suptitle('Ray Transfer Matrix')\n \n pca = ax[0,0].scatter(x,y,c=Axx,vmin=amin,vmax=amax)\n ax[0,0].axis('off')\n ax[0,1].scatter(x,y,c=Axy,vmin=amin,vmax=amax)\n ax[0,1].axis('off')\n ax[1,0].scatter(x,y,c=Ayx,vmin=amin,vmax=amax)\n ax[1,0].axis('off')\n ax[1,1].scatter(x,y,c=Ayy,vmin=amin,vmax=amax)\n ax[1,1].axis('off')\n \n pcb = ax[0,2].scatter(x,y,c=Bxx,vmin=bmin,vmax=bmax)\n ax[0,2].axis('off')\n ax[0,3].scatter(x,y,c=Bxy,vmin=bmin,vmax=bmax)\n ax[0,3].axis('off')\n ax[1,2].scatter(x,y,c=Byx,vmin=bmin,vmax=bmax)\n ax[1,2].axis('off')\n ax[1,3].scatter(x,y,c=Byy,vmin=bmin,vmax=bmax)\n ax[1,3].axis('off')\n \n pcc = ax[2,0].scatter(x,y,c=Cxx,vmin=cmin,vmax=cmax)\n ax[2,0].axis('off')\n ax[2,1].scatter(x,y,c=Cxy,vmin=cmin,vmax=cmax)\n ax[2,1].axis('off')\n ax[3,0].scatter(x,y,c=Cyx,vmin=cmin,vmax=cmax)\n ax[3,0].axis('off')\n ax[3,1].scatter(x,y,c=Cyy,vmin=cmin,vmax=cmax)\n ax[3,1].axis('off')\n \n pcd = ax[2,2].scatter(x,y,c=Dxx,vmin=dmin,vmax=dmax)\n ax[2,2].axis('off')\n ax[2,3].scatter(x,y,c=Dxy,vmin=dmin,vmax=dmax)\n ax[2,3].axis('off')\n ax[3,2].scatter(x,y,c=Dyx,vmin=dmin,vmax=dmax)\n ax[3,2].axis('off')\n ax[3,3].scatter(x,y,c=Dyy,vmin=dmin,vmax=dmax)\n ax[3,3].axis('off')\n \n fig.colorbar(pca, ax=ax[0:2,0:2], shrink=0.6, location='right')\n fig.colorbar(pcb, ax=ax[0:2,2:4], shrink=0.6, location='right')\n fig.colorbar(pcc, ax=ax[2:4,0:2], shrink=0.6, location='right')\n fig.colorbar(pcd, ax=ax[2:4,2:4], shrink=0.6, location='right')\n \n plt.show()\n ","sub_path":"glets/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"419931042","text":"#!/usr/bin/python3\nimport uuid\nfrom datetime import datetime\nimport models\n\n\n\"\"\"\nBase class for all models will contain id, created_at\nand updated at attributes. Save() and to_json() methods\n\"\"\"\n\n\nclass BaseModel:\n \"\"\"\n Instantiation of class BaseModel\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n initializing variables\n \"\"\"\n if kwargs:\n for key, value in kwargs.items():\n if key == \"created_at\" or key == \"updated_at\":\n value = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%f\")\n if key != \"__class__\":\n setattr(self, key, value)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = self.updated_at = datetime.now()\n models.storage.new(self)\n\n def __str__(self):\n \"\"\"\n Method returns string representation\n \"\"\"\n return (\"[{}] ({}) {}\".format(str(type(self).__name__),\n self.id, str(self.__dict__)))\n\n def __repr__(self):\n \"\"\"\n Method returns official repreentations\n of string\n \"\"\"\n cls = self.__class__.__name__\n string = (\"[{}] ({}) {}\".format(cls, self.id, self.__dict__))\n return (string)\n\n def save(self):\n \"\"\"\n Method to update attrb updated_at\n \"\"\"\n self.updated_at = datetime.now()\n models.storage.save()\n\n def to_dict(self):\n \"\"\"\n Method to return a dict containing all key/value of __dict__\n instance\n \"\"\"\n dic = dict(**self.__dict__)\n dic['__class__'] = str(type(self).__name__)\n dic['created_at'] = self.created_at.isoformat()\n dic['updated_at'] = self.updated_at.isoformat()\n\n return (dic)\n","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166701232","text":"MOD = 10 ** 9 + 7\n\nclass Solution:\n def concatenatedBinary(self, n: int) -> int:\n r = 0\n for i in range(1, n + 1):\n t = i.bit_length()\n r = ((r << t) + i) % MOD\n\n return r\n","sub_path":"src/leetcode/P1680.py","file_name":"P1680.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635799637","text":"import sys\nimport random\nimport os\nimport glob\nfrom PIL import Image\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport shutil\nimport numpy as np\n\n# text_fold = 'train_crop_annot_37'\ntext_fold = 'train_crop_annot'\npixel_per_char = 512\ntarShape = (300, 250)\n\n\n\ndef get_text_dir(img_dir,text_fold):\n img_name = img_dir.split('/')[-1]\n text_title = img_name[:-3] + 'txt'\n text_dir = os.path.join(text_fold, text_title)\n return text_title,text_dir\n\n\n\ndef pad_image(image, target_size):\n iw, ih = image.size # source size\n w, h = target_size # target size\n nw = iw\n nh = ih\n if iw>w or ih>h:\n scale = min(w / iw, h / ih) # minimal ratio\n\n\n nw = int(iw * scale)\n nh = int(ih * scale)\n\n image = image.resize((nw, nh), Image.BICUBIC)\n new_image = Image.new('RGB', target_size, (128, 128, 128)) # new gray image\n # // round down to int\n new_image.paste(image, ((w - nw) // 2, (h - nh) // 2)) # fill the center of new gray image with target image\n\n return new_image\n\n\n\ndef is_every_char_big_enough(shape,img_path,pixel_per_char):\n area =shape[0]*shape[1]\n _, text_dir = get_text_dir(img_path, text_fold)\n with open(text_dir, \"r\", encoding=\"utf-8\") as f_r:\n text = f_r.read()\n area_per_char = area/len(text)\n if area_per_char=int(file_num)):\n break\n\n return sample\n\n# resize and store image with tensorflow methods\n\n# def resize_and_store_image(sample_L,tarShape,tarDir):\n# if not os.path.exists(tarDir):\n# os.makedirs(tarDir)\n#\n# with tf.Session() as sess:\n# cur=1\n# for img_dir in sample_L:\n# # copy correspoding text to tarDir\n# text_title, text_dir = get_text_dir(img_dir, text_fold)\n# shutil.copy(text_dir,os.path.join(tarDir,text_title))\n#\n#\n# print(('{}/{}'+img_dir).format(cur,len(sample_L)))\n# image_raw_data = tf.gfile.FastGFile(img_dir,'rb').read()\n# img_data = tf.image.decode_jpeg(image_raw_data)\n#\n# img_resized = tf.image.resize_images(img_data,tarShape,method=1)\n#\n# # channel_num = img_resized.eval().shape[2]\n# # noise_img = generate_noise_image((150, 450, channel_num))\n# # noise_img = tf.image.convert_image_dtype(noise_img, dtype=tf.uint8)\n# # img_resized = tf.concat([img_resized, noise_img], 1)\n#\n# img_resized = tf.image.convert_image_dtype(img_resized,dtype=tf.uint8)\n# print(type(img_resized))\n# encoded_image = tf.image.encode_jpeg(img_resized)\n# with tf.gfile.GFile(os.path.join(tarDir,img_dir.split('/')[-1]),\"wb\") as f:\n# f.write(encoded_image.eval())\n# cur = cur+1\n\n\ndef resize_and_store_image(sample_L,tarShape,tarDir):\n if not os.path.exists(tarDir):\n os.makedirs(tarDir)\n cur = 1\n for img_dir in sample_L:\n text_title, text_dir = get_text_dir(img_dir, text_fold)\n shutil.copy(text_dir,os.path.join(tarDir,text_title))\n\n\n print(('{}/{}'+img_dir).format(cur,len(sample_L)))\n img = Image.open(img_dir)\n # img_resized = pad_image(img, tarShape)\n img.save(os.path.join(tarDir,img_dir.split('/')[-1]))\n cur = cur + 1\n\n\ndef generate_noise_image(shape):\n out_img=tf.random_uniform(shape,0,255)\n return out_img\n\n\ndef main(argv):\n if(len(argv)<4):\n print('need parameters: fileDir,file_num,tarDir')\n return\n sample = get_copy_file(argv[1],argv[2])\n resize_and_store_image(sample,tarShape,argv[3])\n\nif __name__ == \"__main__\":\n main(sys.argv)","sub_path":"generate_tfrecord/generate_fsns_test_image.py","file_name":"generate_fsns_test_image.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586133560","text":"# part one\ncalc_fuel_mass = lambda x: int(x)//3 -2\nwith open('data/01_01.txt', 'r') as f:\n total_fuel = sum(list(map(calc_fuel_mass, f.readlines())))\nprint('part one total_fuel:', total_fuel)\n\n# part two\ndef calc_fuel_fuel(fuel):\n fuel_for_fuel = 0\n adjusted_fuel = calc_fuel_mass(fuel)\n while adjusted_fuel > 0:\n fuel_for_fuel += adjusted_fuel\n adjusted_fuel = calc_fuel_mass(adjusted_fuel)\n return fuel_for_fuel\n\nwith open('data/01_01.txt', 'r') as f:\n module_fuel = list(map(calc_fuel_mass, f.readlines()))\n module_fuel_fuel = list(map(calc_fuel_fuel, module_fuel))\n\nprint('module fuel', sum(module_fuel))\nprint('total total fuel:', sum(module_fuel) + sum(module_fuel_fuel))\n","sub_path":"2019/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"163493575","text":"\ndef zarojelek_ellenorzese(sor):\n cnt = 0\n for c in sor:\n if c == '(':\n cnt += 1\n if c == ')':\n cnt -= 1\n if cnt < 0:\n return False\n\n\n return cnt == 0\n\n\ndef elemez(sor):\n zarojek = zarojelek_ellenorzese(sor)\n if zarojek == False:\n return False\n\n if (not sor.endswith(\"')\")) and (not sor.endswith(\"');\")):\n return False\n\n if not sor.startswith(\"if(\"):\n return False\n\n if \"):\" not in sor:\n return False\n\n if \":print('\" not in sor:\n return False\n\n poz1 = sor.find(\"print(\") + 5\n poz2 = sor.rfind(\")\")\n belul = sor[poz1+1:poz2]\n if not (len(belul) >= 2 and belul[0] == \"'\" and belul[-1] == \"'\"):\n return False\n\n # ha minden ellenorzesen atment a sor\n return True\n\n\ndef main():\n\n\n\n\n f = open(\"be.txt\", \"r\")\n\n\n for sor in f:\n sor = sor.rstrip(\"\\n\")\n eredmeny = elemez(sor)\n if eredmeny:\n print(\"A sor '{0}' helyes.\".format(sor))\n else:\n print(\"A sor '{0}' NEM helyes.\".format(sor))\n # vege if\n # vege for\n\n f.close()\n\n\n\nmain()\n\n\n\n\n\n\n\n","sub_path":"ex34.py","file_name":"ex34.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"174639917","text":"from urllib import urlencode\nfrom urllib2 import urlopen\n\nfrom rapidsms.backends.base import BackendBase\n\nbase_url = 'https://api.tropo.com/1.0/sessions'\n\nclass TropoBackend(BackendBase):\n \"\"\"A RapidSMS threadless backend for Tropo\"\"\"\n\n def configure(self, config=None, **kwargs):\n self.config = config\n \n def start(self):\n \"\"\"Override BackendBase.start(), which never returns\"\"\"\n self._running = True\n\n def send(self, message):\n self.debug(\"send(%s,%s)\" % (message.connection.identity,message.text))\n token = self.config['messaging_token']\n action = 'create'\n # Tropo doesn't like dashes in phone numbers\n callerID = self.config['number'].replace(\"-\",\"\")\n numberToDial = message.connection.identity.replace(\"-\",\"\")\n\n params = urlencode([('action', action), ('token', token), ('numberToDial', numberToDial), ('msg', message.text), ('callerID', callerID)])\n self.debug(\"%s?%s\" % (base_url, params))\n data = urlopen('%s?%s' % (base_url, params)).read()\n self.debug(data)\n return True\n\n def call_tropo(self,callback_url,message_type='text'):\n \"\"\"Other apps can call this and pass a function. \n We'll ask tropo to kick off our application and return.\n Soon, Tropo will POST to us. When we get the post, we'll pass it to the function\n we were originally given to handle, which it should do by parsing the JSON it\n was POSTed and responding with some more JSON. (See the Tropo WebAPI docs.)\n\n The callback_url is a URL. When Tropo calls us back,\n we'll pass the request to whatever view django would normally\n use for that URL. It can include parameters\n (e.g. \"/patient/callback/1\"). An easy way to build this is\n to use reverse:\n\n url = reverse('patient-callback', kwargs={ 'patient_id': patient_id })\n\n message_type is optional, or pass 'voice' to use the voice token instead of text.\n\n (We do this by adding some parms to the call we make to Tropo and looking for\n them on the return post.)\n \"\"\"\n\n if message_type == 'text':\n token = self.config['messaging_token']\n else:\n token = self.config['voice_token']\n\n # Call Tropo\n parms = urlencode([('action','create'),\n ('callback_url',callback_url),\n ('token', token),\n ])\n urlopen(\"%s?%s\" % (base_url, parms)).read()\n","sub_path":"rtropo/outgoing.py","file_name":"outgoing.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"323637531","text":"\"\"\"\n Name: Dario Ugalde\n MavID: 1001268068\n Course: CSE 4345 Computational Methods\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimg1 = mpimg.imread('book_fausett_gray.jpg')\nimg2 = mpimg.imread('book_horstmann1_gray2.png')\nimg3 = mpimg.imread('book_horstmann1_gray.jpg')\n\nplt.figure(1)\nplt.subplot(131)\nplt.imshow(img1)\n\nplt.subplot(132)\nplt.imshow(img2)\n\nplt.subplot(133)\nplt.imshow(img3)\n\nplt.show()\n\nfro1 = np.linalg.norm(img1, 'fro')\nfro2 = np.linalg.norm(img2, 'fro')\nfro3 = np.linalg.norm(img3, 'fro')\n\none_two = fro1 - fro2\ntwo_three = fro2 - fro3\none_three = fro1 - fro3\n\nstack1 = np.column_stack(img1)\nstack2 = np.column_stack(img2)\nstack3 = np.column_stack(img3)\n\none_two_2 = np.linalg.norm(stack1 - stack2)\ntwo_three_2 = np.linalg.norm(stack2 - stack3)\none_three_2 = np.linalg.norm(stack1 - stack3)\n\nprint(\"book_fausett_gray.jpg, book_horstmann1_gray2.png\")\nprint(\" Frobenious norm = \",one_two)\nprint(\" Vector 2-norm = \", one_two_2)\nprint(\"\\nbook_horstmann1_gray2.png, book_horstmann1_gray.jpg\")\nprint(\" Frobenius norm = \",two_three)\nprint(\" Vector 2-norm = \",two_three_2)\nprint(\"\\nbook_fausett_gray.jpg, book_horstmann1_gray.jpg\")\nprint(\" Frobenius norm = \", one_three)\nprint(\" Vector 2-norm = \", one_three_2)\n","sub_path":"Homework 2/hw02_5.py","file_name":"hw02_5.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"157695404","text":"\n\n#calss header\nclass _REVEAL():\n\tdef __init__(self,): \n\t\tself.name = \"REVEAL\"\n\t\tself.definitions = [u'an occasion at the end of a television programme, etc. when something that has been hidden or kept secret until then is shown to the audience: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_reveal.py","file_name":"_reveal.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69645745","text":"import json\nimport math\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\nDATA_PATH = \"C:\\\\Users\\\\illak\\\\git\\\\TempProject\\\\data\\\\problems\\\\\"\nJSON_TARGET = [\"percentCorrect\", \"correctNumber\", \"submittedNumber\", \"solvedPeopleNumber\", \"problemNumber\"]\nAVERAGE_CORRECT = 54.405\n\nproblemData = defaultdict(list)\nproblemData['percentCorrect'] = []\nproblemData['correctNumber'] = []\nproblemData['submittedNumber'] = []\nproblemData['solvedPeopleNumber'] = []\nproblemData['problemNumber'] = []\n\ndef transform(target,result):\n ret = result\n if target == \"percentCorrect\":\n ret = ret[:-1]\n ret = float(ret)\n\n elif target == \"correctNumber\":\n ret = int(ret)\n\n elif target == \"submittedNumber\":\n ret = int(ret)\n \n elif target == \"solvedPeopleNumber\":\n ret = float(ret)\n if ret >= 100: \n ret = 10000/(math.log2(ret))\n ret = int(ret)\n else:\n ret = -1\n\n return ret\n \n\n\ndef collectAllFromJSON():\n for i in range(1000,17300):\n problemJson = str(i)+\".json\"\n jsonFilePath = DATA_PATH+problemJson\n fileContent = \"\"\n try:\n f = open(jsonFilePath,\"r\", encoding=\"utf-8\") \n fileContent = f.read()\n jsonDict = json.loads(fileContent)\n for key in JSON_TARGET:\n problemData[key].append(jsonDict[key])\n except FileNotFoundError:\n continue\n\ndef collectAllFromRecord():\n for target in JSON_TARGET:\n collectFromRecord(target)\n\ndef collectFromRecord(target):\n with open(target+\".txt\",\"r\",encoding=\"utf-8\") as f:\n lines = f.readlines();\n for line in lines:\n result = line[:-1]\n problemData[target].append(result)\n\ndef showHist(target):\n plt.hist(target,bins=1650,range=(450,2100))\n plt.show()\n \ndef record():\n for target in JSON_TARGET:\n with open(target+\".txt\",\"w\",encoding=\"utf-8\") as f:\n result = \"\"\n for x in problemData[target]:\n result += x+\"\\n\"\n f.write(result)\n\ndef scrap():\n rating = [0] * 13835\n for i in range(0,13835):\n rating[i] = transform(\"solvedPeopleNumber\",problemData[\"solvedPeopleNumber\"][i])\n if rating[i] != -1:\n rating[i] = int(rating[i] * (100 - transform(\"percentCorrect\",problemData[\"percentCorrect\"][i]) + AVERAGE_CORRECT) / 100)\n with open(\"ratings.txt\",\"w\",encoding=\"utf-8\") as f:\n result = \"\"\n for i in range(0,13835):\n result += problemData[\"problemNumber\"][i] + \":\" + str(rating[i]) + \"\\n\"\n f.write(result)\n rating.sort()\n print(rating)\n showHist(rating)\n\ndef main():\n collectAllFromRecord()\n scrap()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"stats/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"342655869","text":"from django.shortcuts import render\nimport socket\nfrom page.models import Contact\nfrom django.contrib import messages\nfrom bs4 import BeautifulSoup\nimport requests\n\nhostname = socket.gethostname()\nip_address = socket.gethostbyname(hostname)\n\nprint(f\"Hostname: {hostname}\")\nprint(f\"IP Address: {ip_address}\")\n\n# Create your views here.\ndef home(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n email = request.POST.get('email')\n massage = request.POST.get('massage')\n contact = Contact(name = name, email = email , massage=massage)\n contact.save()\n messages.success(request, 'Thank you for contacting us. We will replay you very soon as posiable.')\n\n context = { \"ip\" : ip_address , \"host\" : hostname}\n return render(request , 'index.html' , context) \n\ndef scrapping(request):\n\n return render(request, 'webscrapping.html')\n\n\n\ndef result(request):\n if request.method == 'POST':\n urls = request.POST.get('url')\n r = requests.get(urls)\n content = r.content\n soup = BeautifulSoup(content, \"html.parser\") \n context = {\n \"final_html\" : content\n }\n return render(request, 'final.html')\n","sub_path":"page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"76778921","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\Jesus\\Documents\\GitHub\\django-microsip-base\\django_microsip_base\\django_microsip_base\\apps\\plugins\\djmicrosip_faexist\\djmicrosip_faexist\\config.py\n# Compiled at: 2015-01-05 18:49:30\nsettings = {'name': 'Factura existencias', 'icon_class': 'glyphicon glyphicon-file', \n 'url': '/djmicrosip_faexist/', \n 'url_main_path': 'djmicrosip_faexist/', \n 'users': [\n 'SYSDBA']}","sub_path":"pycfiles/djmicrosip_faexist-0.1.15/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560449027","text":"#!/usr/bin/env python3\nimport argparse\nimport tensorflow as tf\nimport gzip\nimport os\nimport sys\nimport yaml\nimport textwrap\nimport tfprocess\n\nfrom net import Net\n\nSTART_FROM = 0\n\nYAMLCFG = \"\"\"\n%YAML 1.2\n---\nname: 'online-64x6'\ngpu: 0\n\ndataset:\n num_chunks: 200000\n train_ratio: 0.90\n\ntraining:\n batch_size: 2048\n total_steps: 60000\n shuffle_size: 1048576\n lr_values:\n - 0.04\n - 0.002\n lr_boundaries:\n - 35000\n policy_loss_weight: 1.0\n value_loss_weight: 1.0\n path: /dev/null\n\nmodel:\n filters: 64\n residual_blocks: 6\n...\n\"\"\"\nYAMLCFG = textwrap.dedent(YAMLCFG).strip()\ncfg = yaml.safe_load(YAMLCFG)\nargparser = argparse.ArgumentParser(description='Convert net to model.')\nargparser.add_argument('net', type=str,\n help='Net file to be converted to a model checkpoint.')\nargparser.add_argument('--start', type=int, default=0,\n help='Offset to set global_step to.')\nargs = argparser.parse_args()\nSTART_FROM = args.start\nnet = Net()\nnet.parse_proto(args.net)\n\nfilters, blocks = net.filters(), net.blocks()\ncfg['model']['filters'] = filters\ncfg['model']['residual_blocks'] = blocks\ncfg['name'] = 'online-{}x{}'.format(filters, blocks)\nweights = net.get_weights()\n\nprint(yaml.dump(cfg, default_flow_style=False))\n\nx = [\n tf.placeholder(tf.float32, [None, 112, 8*8]),\n tf.placeholder(tf.float32, [None, 1858]),\n tf.placeholder(tf.float32, [None, 1])\n ]\n\ntfp = tfprocess.TFProcess(cfg)\ntfp.init_net(x)\ntfp.replace_weights(weights)\npath = os.path.join(os.getcwd(), cfg['name'])\nupdate_global_step = tfp.global_step.assign(START_FROM)\ntfp.session.run(update_global_step)\nsave_path = tfp.saver.save(tfp.session, path, global_step=START_FROM)\nprint(\"Writted model to {}\".format(path))\n","sub_path":"tf/net_to_model.py","file_name":"net_to_model.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"143566811","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/fastsom/interp/interp.py\n# Compiled at: 2020-04-24 11:45:18\n# Size of source mod 2**32: 9753 bytes\n\"\"\"\nThis file contains interpretation\nutilities for Self-Organizing Maps.\n\"\"\"\nimport math, torch, numpy as np, seaborn as sns, matplotlib.pyplot as plt\nfrom torch import Tensor\nfrom torch.utils.data import TensorDataset, BatchSampler\nfrom typing import Optional, List, Union\nfrom fastai.basic_data import DatasetType\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import KBinsDiscretizer\nfrom fastprogress.fastprogress import progress_bar\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\nfrom fastsom.core import ifnone, idxs_2d_to_1d\nfrom fastsom.datasets import get_sampler\n__all__ = [\n 'SomInterpretation']\n\nclass SomInterpretation:\n __doc__ = '\\n SOM interpretation utility.\\n\\n Displays various information about a trained Self-Organizing Map, such as\\n topological weight distribution, features distribution and training set\\n distribution over the map.\\n\\n Parameters\\n ----------\\n learn : SomLearner\\n The learner to be used for interpretation.\\n '\n\n def __init__(self, learn) -> None:\n self.learn = learn\n self.data = learn.data\n self.pca = None\n self.w = learn.model.weights.clone().view(-1, learn.model.size[(-1)]).cpu()\n if self.data.normalizer is not None:\n self.w = self.data.denormalize(self.w).numpy()\n\n @classmethod\n def from_learner(cls, learn):\n \"\"\"\n Creates a new instance of `SomInterpretation` from a `SomLearner`.\n\n Parameters\n ----------\n learn : SomLearner\n The learner to be used for interpretation.\n \"\"\"\n return cls(learn)\n\n def _get_train(self):\n return self.data.train_ds.tensors[0].cpu()\n\n def _init_pca(self):\n \"\"\"Initializes and fits the PCA instance.\"\"\"\n self.pca = PCA(n_components=3)\n self.pca.fit(self.w)\n\n def show_hitmap(self, data: Tensor=None, bs: int=64, save: bool=False) -> None:\n \"\"\"\n Shows a hitmap with counts for each codebook unit over the dataset.\n\n Parameters\n ----------\n data : Tensor default=None\n The dataset to be used for prediction; defaults to the training set if None.\n bs : int default=64\n The batch size to be used to run model predictions.\n save : bool default=False\n If True, saves the hitmap into a file.\n \"\"\"\n _, ax = plt.subplots(figsize=(10, 10))\n d = data if data is not None else self._get_train()\n bs = min(bs, len(d))\n sampler = BatchSampler((get_sampler('seq', TensorDataset(d, d), bs)), batch_size=bs, drop_last=True)\n preds = torch.zeros(0, 2).cpu().long()\n for xb_slice in iter(sampler):\n preds = torch.cat([preds, self.learn.model(d[xb_slice]).cpu()], dim=0)\n\n out, counts = preds.unique(return_counts=True, dim=0)\n z = torch.zeros(self.learn.model.size[:-1]).long()\n for i, c in enumerate(out):\n z[(c[0], c[1])] += counts[i]\n\n sns.heatmap((z.cpu().numpy()), linewidth=0.5, annot=True, ax=ax, fmt='d')\n plt.show()\n\n def show_feature_heatmaps(self, dim: Optional[Union[(int, List[int])]]=None, cat_labels: Optional[List[str]]=None, cont_labels: Optional[List[str]]=None, recategorize: bool=True, save: bool=False) -> None:\n \"\"\"\n Shows a heatmap for each feature displaying its value distribution over the codebook.\n\n Parameters\n ----------\n dim : Optional[Union[int, List[int]]] default=None\n Indices of features to be shown; defaults to all features.\n cat_labels : Optional[List[str]] default=None\n Categorical feature labels.\n cont_labels : Optional[List[str]] default=None\n Continuous feature labels.\n recategorize : bool default=True\n If True, converts back categorical features that were previously made continuous.\n save : bool default=False\n If True, saves the charts into a file.\n \"\"\"\n n_variables = self._get_train().shape[(-1)]\n cat_labels = ifnone(cat_labels, [])\n cont_labels = ifnone(cont_labels, [])\n labels = cat_labels + cont_labels if len(cat_labels + cont_labels) > 0 else [f\"Feature #{i}\" for i in range(n_variables)]\n if dim is not None:\n if isinstance(dim, list):\n dims = dim\n else:\n dims = [\n dim]\n else:\n dims = list(range(len(labels)))\n cols = 4 if len(dims) > 4 else len(dims)\n rows = math.ceil(len(dims) / cols)\n fig, axs = plt.subplots(rows, cols, figsize=(8 * cols, 6 * rows))\n if recategorize:\n w = torch.tensor(self.w)\n encoded_count = self.w.shape[(-1)] - len(cont_labels)\n cat = self.learn.data.cat_enc.make_categorical(w[:, :encoded_count])\n w = np.concatenate([cat, torch.tensor(self.w[:, encoded_count:])], axis=(-1))\n else:\n w = self.w\n if len(dims) == 1:\n axs = [\n [\n axs]]\n else:\n if rows == 1 or cols == 1:\n axs = [\n axs]\n for d in progress_bar(range(len(dims))):\n i = d // cols\n j = d % cols\n ax = axs[i][j]\n ax.set_title(labels[d])\n sns.heatmap((w[:, d].reshape(self.learn.model.size[:-1])), ax=ax, annot=True)\n\n fig.show()\n\n def show_weights(self, save: bool=False) -> None:\n \"\"\"\n Shows a colored heatmap of the SOM codebooks.\n data = idxs_1d_to_2d(data, self.learn.model.size[1])\n\n Parameters\n ----------\n save : bool default=False\n If True, saves the heatmap into a file.\n \"\"\"\n image_shape = (\n self.learn.model.size[0], self.learn.model.size[1], 3)\n if self.w.shape[(-1)] != 3:\n if self.pca is None:\n self._init_pca()\n d = (self.pca.transform(self.w).reshape)(*image_shape)\n else:\n d = self.w\n\n def rescale(d):\n return ((d - d.min(0)) / d.ptp(0) * 255).astype(int)\n\n d = rescale(d)\n plt.imshow(d.reshape(image_shape))\n\n def show_preds(self, ds_type: DatasetType=DatasetType.Train, class_names: List[str]=None, n_bins: int=5, save: bool=False) -> None:\n \"\"\"\n Displays most frequent label for each map position in `ds_type` dataset.\n If labels are countinuous, binning on `n_bins` is performed.\n\n Parameters\n ----------\n ds_type : DatasetType default=DatasetType.Train\n The enum of the dataset to be used.\n n_bins : int default=5\n The number of bins to use when labels are continous.\n save : bool default=False\n Whether or not the output chart should be saved on a file.\n \"\"\"\n if not self.learn.data.has_labels:\n raise RuntimeError('Unable to show predictions for a dataset that has no labels. Please pass labels when creating the `UnsupervisedDataBunch` or use `interp.show_hitmap()`')\n else:\n preds, labels = self.learn.get_preds(ds_type)\n continuous_labels = 'float' in str(labels.dtype)\n if continuous_labels:\n if n_bins > 0:\n labels = KBinsDiscretizer(n_bins=n_bins, encode='ordinal').fit_transform(labels.numpy())\n labels = torch.tensor(labels)\n map_size = (\n self.learn.model.size[0], self.learn.model.size[1])\n data = torch.zeros(map_size[0] * map_size[1])\n preds_1d = idxs_2d_to_1d(preds, map_size[0])\n unique_bmus = preds_1d.unique(dim=0)\n for idx, bmu in enumerate(unique_bmus):\n bmu_labels = labels[(preds_1d == bmu).nonzero()]\n if continuous_labels and n_bins <= 0:\n data[idx] = bmu_labels.mean()\n else:\n unique_labels, label_counts = bmu_labels.unique(return_counts=True)\n data[idx] = unique_labels[label_counts.argmax()]\n\n if not continuous_labels or n_bins > 0:\n unique_labels = labels.unique()\n class_names = ifnone(class_names, [str(label) for label in unique_labels.numpy()])\n colors = plt.cm.Pastel2(np.linspace(0, 1, len(unique_labels)))\n cmap = LinearSegmentedColormap.from_list('Custom', colors, len(colors))\n else:\n palette = sns.palettes.SEABORN_PALETTES['deep6']\n cmap = ListedColormap(palette)\n f, ax = plt.subplots(figsize=(11, 9))\n ax = sns.heatmap((data.view(map_size)), annot=True, cmap=cmap, square=True, linewidths=0.5)\n if not continuous_labels or n_bins > 0:\n colorbar = ax.collections[0].colorbar\n colorbar.set_ticks(unique_labels.numpy())\n colorbar.set_ticklabels(class_names)\n plt.show()","sub_path":"pycfiles/fastsom-0.1.6-py3.6/interp.cpython-36.py","file_name":"interp.cpython-36.py","file_ext":"py","file_size_in_byte":9307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"68661368","text":"# -*-coding:utf-8-*-\n\n'''\nDescription:\n Ce fichier est le script du test de la performance d'algiruthme avec l'outil \"profile\"\n\nVersion: 1.0\n\nAuteur: Peng BI\n'''\n\nimport time\nfrom controllers.ProbabilityController import ProbabilityController\nfrom models.antAlgorithm.SolutionModel import SolutionModel\nimport random\nimport copy\n\nclass ProfilerAlgorithmController:\n '''\n Description:\n Cette classe est le contrôleur d'algorithme avec l'annotation de profile\n\n Attributs:\n instance: (l'objet de la classe InstanceModel) l'instance préparée par la classe InstanceControleur,y compris\n la liste de bâtiments,la liste de cares, la liste de phéromones sur les nœuds de bâtiment, la matrice\n de phéromones sur les arcs entre le bâtiment et le care,la liste de fourmis\n bestSolution: (l'objet de la classe SolutionModel) la meilleure solution trouvé finalement\n careEffectRadius: (int) le rayon d'attraction initial de cercle dont le centre est chaque care\n bestQualityOfSolutionForEachIterationList: (float[]) la liste de qualités de meilleure solution de chaque itération\n averageQualityOfSolutionForEachIterationList: (float[]) la liste de qualités moyenne des soltuons de chaque itération\n distanceTotalOfBestSolutionForEachIterationList: (float[]) la liste de distance totale de la meilleure solution de chaque itération\n populationAllocatedOfBestSolutionForEachIterationList: (float[]) la liste de sans-abris totaux hébergés de la meilleure solution de chaque itération\n buildingAllocatedOfBestSolutionForEachIterationList: (float[]) la liste de nombre de bâtiments affectés de la meilleure solution de chaque itération\n '''\n\n def __init__(self, instance, careEffectRadius):\n '''\n Description:\n cette méthode est le constructeur de la classe AlgorithmeControlleur\n\n :param instance: (l'objet de la classe InstanceModel) l'instance de\n programme,qui est préparée par la classe InstanceControleur\n :param careEffectRadius: (int) le rayon d'attraction initial de chaque care, qui\n est définit par l'utilisateur dans le script main.py\n '''\n\n self.instance = instance # (l'objet de la classe InstanceModel) l'instance préparée par la classe InstanceControleur\n self.bestSolution = SolutionModel() # (l'objet de la classe SolutionModel) la meilleure solution trouvé finalement\n self.careEffectRadius = careEffectRadius # (int) le rayon d'attraction initial de cercle dont le centre est chaque care\n self.bestQualityOfSolutionForEachIterationList = [] # (float[]) la liste de qualités de meilleure solution de chaque itération\n self.averageQualityOfSolutionForEachIterationList = [] # (float[]) la liste de qualités moyenne des solutions de chaque itération\n self.distanceTotalOfBestSolutionForEachIterationList = [] # (float[]) la liste de distance totale de la meilleure solution de chaque itération\n self.populationAllocatedOfBestSolutionForEachIterationList = [] # (float[]) la liste de sans-abris totaux hébergés de la meilleure solution de chaque itération\n self.buildingAllocatedOfBestSolutionForEachIterationList = [] # (float[]) la liste de nombre de bâtiments affectés de la meilleure solution de chaque itération\n\n\n def run(self, iterationTimes):\n '''\n Description:\n Cette méthode est l'entrée de l'algorithme, et synthétise les solutions générées\n par chaque fourmi dans chaque itération, et obtenir la meilleure solution\n\n :param iterationTimes: (int) la fois d'itérations\n\n :return rien\n '''\n\n # (int[][]) La matrice d'indice de bâtiment dont la taille est len(listCare) * len(listBuilding)\n # pour chaque ligne (i.e. pour chaque care), les indices de bâtiment sont trie par l'ordre croissante en référant\n # les distances entre le care et chaque bâtiment, la format de cette matrice est la matrice transposée de la\n # matrice de distance\n distanceSortedBuildingIndexMatrix = self.sortBuildingIndexForEachCareInDistanceMatrix()\n\n bestSolutionForEachIterationList = [] #(SolutionModel[]) la liste de meilleure solution de chaque itération\n\n # commencer à faire l'itération\n iterationCounter = 0\n while iterationCounter < iterationTimes:\n solutionForOneIterationList = [] # (SolutionModel[]) la liste de solutions d'une itération\n qualityOfSolutionForOneIterationList = [] # (float[]) la liste de qualités de solution d'une itération\n\n allocateStartTime = time.time() # enregistrer le temps de début où une itération d'affectation de bâtiment commence\n\n # les fourmis commencent à trouver sa solution\n for k,ant in enumerate(self.instance.antList):\n # (int [][]) le copie de la matrice d'indices trièes de bâtiment pour chaque care\n # parce que on ne peut pas modifier la matrice originale, il faut faire un copie profond\n copyDistanceSortedBuildingIndexMatrix = copy.deepcopy(distanceSortedBuildingIndexMatrix)\n\n # appeler la méthode "self.allocateBuilding" pour affecter les bâtiments\n self.allocateBuilding(ant,copyDistanceSortedBuildingIndexMatrix,solutionForOneIterationList,\n qualityOfSolutionForOneIterationList)\n\n # (Solution) chercher la meilleure solution parmis les solutions generèe par chaque fourmi dans une itération\n bestSolutionIndexForOneIteration = qualityOfSolutionForOneIterationList.index(max(qualityOfSolutionForOneIterationList))\n # ajouter la qualité de meilleure solution de l'itération actuelle dans la liste \"bestQualityOfSolutionForEachIterationList\"\n self.bestQualityOfSolutionForEachIterationList.append(max(qualityOfSolutionForOneIterationList))\n # ajouter la meilleure solution de l'itération actuelle dans la liste "bestSolutionForEachIterationList"\n bestSolutionForEachIterationList.append(solutionForOneIterationList[bestSolutionIndexForOneIteration])\n # calculer la qualité moyenne de solutions dans l'itération actuelle en appelant la méthode\n # \"self.calculateAverageSolutionQualityForEachIteration\", et l'ajouter dans la liste \"averageQualityOfSolutionForEachIterationList\"\n self.averageQualityOfSolutionForEachIterationList.append(self.calculateAverageSolutionQualityForEachIteration\n (qualityOfSolutionForOneIterationList))\n # calculer la distance totale de la meilleure solution d'iteration actuelle\n self.distanceTotalOfBestSolutionForEachIterationList.append(self.calculateDistanceTotalOfOneSolution\n (solutionForOneIterationList[bestSolutionIndexForOneIteration]))\n # calculer le nombre de sans-abris hébergés de la meilleure solution d'iteration actuelle\n self.populationAllocatedOfBestSolutionForEachIterationList.append(self.calculatePopulationAllocatedOfOneSolution\n (solutionForOneIterationList[bestSolutionIndexForOneIteration]))\n # calculer le nombre de bâtiments affectés de la meilleure solution d'iteration actuelle\n self.buildingAllocatedOfBestSolutionForEachIterationList.append(self.calculateBuildingAllocatedOfOneSolution\n (solutionForOneIterationList[bestSolutionIndexForOneIteration]))\n\n allocateEndTime = time.time() # enregistrer le temps de fin où une itération d'affectation de bâtiment finit\n print('Finishe one iteration of allocating buildings, it takes %ds' % (allocateEndTime - allocateStartTime))\n iterationCounter += 1\n\n # (int) chercher l'indice de meilleure solution finale parmis les meilleure solutions trouvées dans chaque itèration\n bestSolutionIndex = self.bestQualityOfSolutionForEachIterationList.index(max(self.bestQualityOfSolutionForEachIterationList))\n # trouver la meilleure solution finale selon l'indice \"bestSolutionIndex\"\n self.bestSolution = bestSolutionForEachIterationList[bestSolutionIndex]\n\n\n @profile\n def allocateBuilding(self, ant,copyDistanceSortedBuildingIndexMatrix, solutionForOneIterationList,qualityOfSolutionForOneIterationList):\n '''\n Description:\n Cette méthode est pour sélectionner les bâtiments à affecter\n\n :param ant: (l'objet de la classe AntModel) une fourmi qui va chercher sa solution\n :param copyDistanceSortedBuildingIndexMatrix: (int[][]) la matrice copiée d'indices\n de bâtiment référant la matrice de distance\n :param solutionForOneIterationList: (SolutionModel[]) la liste de solutions pour une itération\n :param qualityOfSolutionForOneIterationList: (float[]) la liste de qualités de solution pour une itération\n\n :return: rien\n '''\n\n ant.solution = SolutionModel()\n\n buildingToAllocateList = copy.deepcopy(self.instance.buildingList) # (BuildingModel[]) la liste de bâtiments\n careToFillList = copy.deepcopy(self.instance.careList) # (CareModel[]) la liste de care\n\n # (Boolean[]) la liste qui marque si le bâtiment est déjà affecté,les valeurs initiales sont \"False\"\n isBuildingSelectedList = [False] * len(buildingToAllocateList)\n # (Boolean[]) la liste qui marque si le care est déjà plein, les valeurs initiales sont \"False\"\n isCareFullList = [False] * len(careToFillList)\n # (int[]) la liste de rayon d'attraction de care, les valeurs initiales sont égales au rayon initiale\n radiusList = [self.careEffectRadius] * len(careToFillList)\n # (int[]) un solution, les éléments sont les indices de care, les valeurs initiales sont -1\n # Si la valeur est -1, ça veut dire que aucun care peut héberger ce bâtiment\n ant.solution.solutionArray = [-1] * len(buildingToAllocateList)\n\n probabilityCtrl = ProbabilityController()\n\n # construire la liste de candidat pour chaque care (le candidat est l'indice de bâtiment)\n print(\"Start to initialize candidate list for care...\")\n candidateListForCare = [[]]\n j = 0\n while j < len(careToFillList):\n # prendre une colonne dans la matrice de distance originale\n # i.e. prendre la liste de distance entre le care actuel et chaque bâtiment\n originalDistanceColumn = [originalColumn[j] for originalColumn in self.instance.distanceMatrix]\n # prendre une ligne dans la matrice copiée d'indice de bâtiment\n # i.e. prendre la liste d'indice de bâtiments trié pour le care actuel\n sortedDistanceColumn = copyDistanceSortedBuildingIndexMatrix[j]\n\n i = 0\n while i < len(sortedDistanceColumn):\n # chercher un élément dans la liste \"sortedDistanceColumn\", parce que cette liste est déjà trié par l'ordre\n # croissante, on peut prendre l'élément directement sans la cherche\n minIndex = sortedDistanceColumn[i]\n # obtenir la distance selon l'indice trouvée dans la matrice de distance originale comme la valeur minimum\n minVar = originalDistanceColumn[minIndex]\n\n # si cette distance minimum est inférieure ou égale au rayon du care actuel, et si la taille de liste de\n # candidat est inférieure ou égale au 10 (on limite la taille maximum de liste de candidat est de 10)\n if minVar <= radiusList[j] and len(candidateListForCare[j]) <= 10:\n # ajouter cette indice de bâtiment dans la liste de candidat de care actuel\n candidateListForCare[j].append(minIndex)\n # supprimer cette indice de bâtiment dans la liste copyDistanceSortedBuildingIndexMatrix[j]\n copyDistanceSortedBuildingIndexMatrix[j].remove(minIndex)\n # si la taille de liste de candidat atteint 10, quitter le boucle\n if len(candidateListForCare[j]) >= 10:\n break\n i += 1\n # si le care actuel n'est pas le dernier care, ajouter un liste dans la liste candidateListForCare et\n # passer au care suivant pour continuer à construire sa liste de candidat\n if j != len(careToFillList) - 1:\n candidateListForCare.append([])\n j += 1\n print('Finish initializing candidate list for care...')\n\n # commencer à affecter les bâtiments\n print('Start to allocate buildings...')\n step = 0\n careToFillIndexOfLastStep = -1 # (int) l'indice de care qui est sélectionné dans le pas précédent\n # une liste qui stocke juste l'indece de bâtiment, elle sert à sélectionner un bâtiment au hasard\n # (par contre la liste \"buidlingToAllocateIndex\" stocke l'id de bâtiment)\n buildingIndexList = [index for index in range(0,len(buildingToAllocateList))]\n while step < len(buildingToAllocateList):\n # si c'est le premier pas ou aucun care est sélectionné dans le\n # pas précédent, sélectionner un bâtiment non-affecté au hasard\n if step == 0 or careToFillIndexOfLastStep == -1:\n randomNumber = random.randint(0, len(buildingIndexList) - 1)\n buidlingToAllocateIndex = buildingIndexList[randomNumber]\n buildingIndexList.remove(buidlingToAllocateIndex)\n # sinon, il faut calculer la probabilité de transition de bâtiment pour sélectionner un bâtiment\n else:\n # si la liste de candidat du care n'est pas vide\n if len(candidateListForCare[careToFillIndexOfLastStep]) != 0:\n buildingProbabilityList = [] # la liste de probabilité de déplacement de chaque bâtiment\n buildingIndexForProbabilityList = [] # la liste d'indices de bâtiment qui correspond à la liste buildingProbabilityList\n\n print(\"batiment *******************************************\")\n # prendre les bâtiments à partir de la liste de candidat de care actuel\n iCandidate = 0\n while iCandidate < len(candidateListForCare[careToFillIndexOfLastStep]):\n # obtenir l'indice de bâtiment i\n i = candidateListForCare[careToFillIndexOfLastStep][iCandidate]\n # si le bâtiment i n'est pas encore affecté, alors calculer sa probabilité de transition\n if isBuildingSelectedList[i] == False:\n eta = self.instance.pheromoneNodeList[i].eta\n tau = self.instance.pheromoneNodeList[i].tau\n # appeler la méthode \"calculateProbability()\" de la classe ProbabilityController pour\n # calculer sa probabilité de déplacement\n buildingProbability = probabilityCtrl.calculateProbability(eta, tau,\n isBuildingSelectedList)\n # ajouter sa probabilité de déplacement dans la liste buildingProbabilityList\n buildingProbabilityList.append(buildingProbability)\n # ajouter son indice dans la liste buildingIndexForProbabilityList\n buildingIndexForProbabilityList.append(i)\n # sinon ( si le bâtiment i est déjà affecter à l'autre care), alors enlever le bâtiment i\n # de la liste de candidat de care actuel\n else:\n candidateListForCare[careToFillIndexOfLastStep].remove(i)\n iCandidate += 1\n # si la liste buildingProbabilityList est vide, ça veut dire que tous les bâtiments dans la liste de\n # candidat de care actuel sont déjà affecté\n if len(buildingProbabilityList) == 0:\n continue\n\n print(\"batiment start\")\n print(buildingIndexForProbabilityList, buildingProbabilityList)\n # appeler la méthode \"generateProbability()\" de la classe ProbabilityController pour choisir un bâtiment\n # selon leurs probabilités de déplacement\n buidlingToAllocateIndex = probabilityCtrl.generateProbability(buildingIndexForProbabilityList,\n buildingProbabilityList)\n buildingIndexList.remove(buidlingToAllocateIndex)\n # enlever le bâtiment[buidlingToAllocateIndex] de la liste de candidat de care actuel\n candidateListForCare[careToFillIndexOfLastStep].remove(buidlingToAllocateIndex)\n print(\"batiment end\")\n\n # sinon( si la liste de candidat de care actuel est vide), il faut agrandir son rayon et reremplir sa\n # liste de candidat\n else:\n # mettre à jour la liste d'indices de bâtiment par rapport à la matrice de distance pour le care actuel,\n # enlever les bâtiments qui sont déjà affectés\n sortedDistanceColumn = copyDistanceSortedBuildingIndexMatrix[careToFillIndexOfLastStep]\n for index in sortedDistanceColumn:\n if isBuildingSelectedList[index] == True:\n copyDistanceSortedBuildingIndexMatrix[careToFillIndexOfLastStep].remove(index)\n\n # obtenir la liste de distance origine et la nouvelle liste d'indice de bâtiment\n originalDistanceColumn = [originalColumn[careToFillIndexOfLastStep] for originalColumn in\n self.instance.distanceMatrix]\n sortedDistanceColumn = copyDistanceSortedBuildingIndexMatrix[careToFillIndexOfLastStep]\n\n # si la distance du premier bâtiment dans la liste d'indices de bâtiment triée dépasse le\n # rayon de care actuel, agrandir le rayon\n index = sortedDistanceColumn[0]\n if self.instance.distanceMatrix[index][careToFillIndexOfLastStep] > radiusList[careToFillIndexOfLastStep]:\n # le rayon de care actuel augmente 1000m\n radiusList[careToFillIndexOfLastStep] += 1000\n\n # commencer à remplir la liste de candidat de care actuel\n i = 0\n while i < len(sortedDistanceColumn):\n # chercher un élément dans la liste \"sortedDistanceColumn\", parce que cette liste est déjà trié\n # par l'ordre croissante, on peut prendre l'élément directement sans la cherche\n minIndex = sortedDistanceColumn[i]\n # obtenir la distance selon l'indice trouvée dans la matrice de distance originale comme\n # la valeur minimum\n minVar = originalDistanceColumn[minIndex]\n\n # si cette distance minimum est inférieure ou égale au rayon du care actuel, et si la taille\n # de liste de candidat est inférieure ou égale au 10\n if minVar <= radiusList[careToFillIndexOfLastStep] and len(\n candidateListForCare[careToFillIndexOfLastStep]) <= 10:\n # ajouter cette indice de bâtiment dans la liste de candidat de care actuel\n candidateListForCare[careToFillIndexOfLastStep].append(minIndex)\n # supprimer cette indice de bâtiment dans la liste copyDistanceSortedBuildingIndexMatrix[careToFillIndexOfLastSte]\n copyDistanceSortedBuildingIndexMatrix[careToFillIndexOfLastStep].remove(minIndex)\n # si la taille de liste de candidat atteint 10, quitter la boucle\n if len(candidateListForCare[careToFillIndexOfLastStep]) >= 10:\n break\n i += 1\n # quitter cette boucle après mettre à jour la liste de candidat\n continue\n\n # appeler la méthode \"chooseCare\" pour sélectionner un care et savoir si tous les cares sont pleins\n isAllCareFull, careToFillIndex = self.chooseCare(buidlingToAllocateIndex, buildingToAllocateList,\n careToFillList, isCareFullList,ant.solution)\n\n # si un care est sélectionné dans le pas actuel\n if careToFillIndex != -1:\n # mettre à jour careToFillIndexOfLastStep\n careToFillIndexOfLastStep = careToFillIndex\n\n # mettre à jours la phéromone déposée sur les nœuds de bâtiment\n rho = self.instance.pheromoneNodeList[buidlingToAllocateIndex].rho\n deltaTau = rho * self.objectiveFunctionG(ant.solution) # multiplication car G(x) est une fonction à maximiser\n tau = self.instance.pheromoneNodeList[buidlingToAllocateIndex].tau\n self.instance.pheromoneNodeList[buidlingToAllocateIndex].deltaTau = deltaTau\n self.instance.pheromoneNodeList[buidlingToAllocateIndex].tau = (1 - rho) * tau + deltaTau\n # marquer que le bâtiment[buidlingToAllocateIndex] est déjà affecté ou aucun care ne peut pas l'héberger\n isBuildingSelectedList[buidlingToAllocateIndex] = True\n\n # si tous les cares sont pleins, quitter la boucle globale, cette fourmi trouve sa solution dans cette itération\n if isAllCareFull == True:\n break\n step += 1\n\n # calculer la qualité de solution trouvée (Q = G(x)/(1+F(x))) ou (Q = H(x)/(1+F(x))\n ant.solution.quality = self.objectiveFunctionG(ant.solution) / (1 + self.objectiveFunctionF(ant.solution))\n # ajouter la qualité calculée dans la liste qualityOfSolutionForOneIterationList\n qualityOfSolutionForOneIterationList.append(ant.solution.quality)\n # ajouter la solution dans la liste solutionForOneIterationList\n solutionForOneIterationList.append(ant.solution)\n\n\n @profile\n def chooseCare(self, buidlingToAllocateIndex, buildingToAllocateList, careToFillList, isCareFullList, solution):\n '''\n Description:\n Cette méthode est pour sélectionner les cares à remplir\n\n :param buidlingToAllocateIndex: (int) l'indice de bâtiment sélectionné\n :param buildingToAllocateList: (BuildingModel[]) la liste de bâtiments\n :param careToFillList: (CareModel[]) la liste de care\n :param isCareFullList: (Boolean[]) la liste qui marque si le care est plein\n :param solution: (l'objet de la classe SolutionModel) la solution\n :return: (boolean) une variable booléenne qui signifie si tous les cares sont pleins\n\n :return: careToFillIndex: (int) l'indice de care sélectionné\n '''\n\n careProbabilityList = [] # la liste de probabilité de déplacement de chaque care\n careIndexForProbabilityList = [] # la liste d'indices de bâtiment qui correspond à la liste careProbabilityList\n\n allowedCareLenght = len(careToFillList) # (int) le nombre de cares qui sont encore disponibles\n\n probabilityCtrl = ProbabilityController()\n\n # prendre chaque care\n j = 0\n while j < len(careToFillList):\n # si la population de bâtiment est inférieur ou égale à la capacité de care, et ce care n'est pas plein\n if buildingToAllocateList[buidlingToAllocateIndex].population <= careToFillList[j].capacity and \\\n isCareFullList[j] == False:\n eta = self.instance.pheromoneEdgeMatrix[buidlingToAllocateIndex][j].eta\n tau = self.instance.pheromoneEdgeMatrix[buidlingToAllocateIndex][j].tau\n # appeler la méthode \"calculateProbability()\" de la classe ProbabilityController pour\n # calculer la probabilité de déplacement de care[j]\n careProbability = probabilityCtrl.calculateProbability(eta, tau, isCareFullList)\n # ajouter sa probabilité de déplacement dans la liste careProbabilityList\n careProbabilityList.append(careProbability)\n # ajouter son indice dans la liste careIndexForProbabilityList\n careIndexForProbabilityList.append(j)\n j += 1\n\n # s'il existe un care qui peut héberger ce bâtiment\n if len(careProbabilityList) != 0:\n # appeler la méthode \"generateProbability()\" de la classe ProbabilityController pour choisir un care\n # selon leurs probabilités de déplacement\n careToFillIndex = probabilityCtrl.generateProbability(careIndexForProbabilityList, careProbabilityList)\n # ajouter l'indice de care dans la solution\n solution.solutionArray[buidlingToAllocateIndex] = careToFillIndex\n\n # mettre à jour la phéromone déposée sur\n rho = self.instance.pheromoneEdgeMatrix[buidlingToAllocateIndex][careToFillIndex].rho\n deltaTau = rho / self.objectiveFunctionF(solution) # division car F(x) est une fonction à minimiser\n tau = self.instance.pheromoneEdgeMatrix[buidlingToAllocateIndex][careToFillIndex].tau\n self.instance.pheromoneEdgeMatrix[buidlingToAllocateIndex][careToFillIndex].deltaTau = deltaTau\n self.instance.pheromoneEdgeMatrix[buidlingToAllocateIndex][careToFillIndex].tau = (1 - rho) * tau + deltaTau\n\n # mettre à jour la capacité de care sélectionné\n careToFillList[careToFillIndex].capacity = careToFillList[careToFillIndex].capacity - buildingToAllocateList[buidlingToAllocateIndex].population\n # vérifier si la capacité de care sélectionné est plein\n # i.e. si sa capacité peut héberger le bâtiment dont la population est minimum\n populationList = []\n\n # chercher les populations de bâtiments non-affectés\n i = 0\n while i < len(buildingToAllocateList):\n # si le care affecté au bâtiment i est -1, ça veut dire que le bâtiment i n'est pas affecté\n if solution.solutionArray[i] == -1:\n populationList.append(buildingToAllocateList[i].population)\n i += 1\n\n # si la liste populationList est vide, ça veut dire que tous les bâtiments sont déjà affecté\n if len(populationList) == 0:\n return True, careToFillIndex\n # sinon, il reste des bâtiments non-affectés\n else:\n # chercher la population minimum\n minPopulation = min(populationList)\n # si la capacité de care sélectionné est inférieur à la population minimum\n if careToFillList[careToFillIndex].capacity < minPopulation:\n # marquer que ce care est déjà plein\n isCareFullList[careToFillIndex] = True\n # le nombre de cares disponibles - 1\n allowedCareLenght -= 1\n # si tous les cares ne peuvent pas héberger ce bâtiment\n else:\n # mettre l'indice de care pour ce bâtiment en -1\n careToFillIndex = -1\n\n # si le nombre de cares disponible est de 0, tous les cares sont pleins\n if allowedCareLenght == 0:\n return True,careToFillIndex\n # sinon, il reste des cares disponibles\n else:\n return False,careToFillIndex\n\n\n def sortBuildingIndexForEachCareInDistanceMatrix(self):\n '''\n Description:\n Cette méthode est pour trier les indices de bâtiments\n pour chaque care en référant la matrice de distance\n\n :return: distanceSortedBuildingIndexMatrix: (int[][]) la matrice de indices de bâtiments triés\n '''\n\n print('Start to sort distance...')\n sortStartTime = time.time()\n\n # copier la matrice de distance\n copiedDistanceMatrix = copy.deepcopy(self.instance.distanceMatrix)\n\n # créer la matrice d'indice de bâtiment triée\n distanceSortedBuildingIndexMatrix = [list(range(0, len(self.instance.distanceMatrix)))] * len(\n self.instance.distanceMatrix[0])\n\n # construire la matrice d'indice de bâtiment triée\n indexCare = 0\n while indexCare < len(copiedDistanceMatrix[0]):\n sortOneColumnStartTime = time.time()\n print(\"Sort buildings for %dth care...\" % (indexCare + 1))\n # prendre une colonne dans la matrice de distance\n distanceColumn = [column[indexCare] for column in copiedDistanceMatrix]\n # prendre une ligne dans la matrice d'indice de bâtiment triée\n distanceIndexRow = copy.copy(distanceSortedBuildingIndexMatrix[indexCare])\n\n # appeler la méthode \"merge_sort\" pour faire le tri\n distanceColumn, distanceIndexRow = self.mergeSort(distanceColumn, distanceIndexRow)\n # mettre à jour la ligne triée dans la matrice distanceSortedBuildingIndexMatrix\n distanceSortedBuildingIndexMatrix[indexCare] = distanceIndexRow\n sortOneColumnEndTime = time.time()\n print(\"Finish sorting for %dth care, it tackes %ds\" % (\n indexCare + 1, sortOneColumnEndTime - sortOneColumnStartTime))\n indexCare += 1\n\n sortEndTime = time.time()\n print('Finish solving the problem, it takes %d s!\\n\\n' % (sortEndTime - sortStartTime))\n\n return distanceSortedBuildingIndexMatrix\n\n\n def mergeSort(self, distanceList, distanceIndexList):\n '''\n Description:\n Cette méthode est pour trier une liste d'indice de bâtiments\n en référant la matrice de distance avec le tri par fusion\n\n :param distanceList: (float[]) la liste de distance à référer\n :param distanceIndexList: (int[]) la liste d'indice de bâtiment à trier\n\n :return: result: (float[]) la liste de distances triées\n :return: resultIndex: (int[]) la liste d'indices de bâtiments triés\n '''\n\n # si la taille liste distanceList est 1 après la division, retourner les deux liste\n if len(distanceList) <= 1:\n return distanceList, distanceIndexList\n\n # respectivent diviser les deux liste en deux sous-liste\n num = int(len(distanceList) / 2)\n left, leftIndex = self.mergeSort(distanceList[:num], distanceIndexList[:num])\n right, rightIndex = self.mergeSort(distanceList[num:], distanceIndexList[num:])\n\n # faire le tri\n i, j = 0, 0\n result = []\n resultIndex = []\n while i < len(left) and j < len(right):\n # si l'élément de la sous-liste gauche est inférieur ou égale à l'élément de la sous-liste droite\n if left[i] <= right[j]:\n # ajouter l'élément de la sous-liste gauche dans la liste resultat\n result.append(left[i])\n resultIndex.append(leftIndex[i])\n i += 1\n # si l'élément de la sous-liste gauche est supérieur à l'élément de la sous-liste droite\n else:\n # ajouter l'élément de la sous-liste droite dans la liste resultat\n result.append(right[j])\n resultIndex.append(rightIndex[j])\n j += 1\n\n # fusionner les deux sous-liste de distanceList en une liste entière\n result += left[i:]\n result += right[j:]\n\n # fusionner les deux sous-liste de distanceIndexList en une liste entière\n resultIndex += leftIndex[i:]\n resultIndex += rightIndex[j:]\n\n return result, resultIndex\n\n\n def objectiveFunctionF(self, solution):\n '''\n Description:\n Cette méthode est pour réaliser la fonction objective f(x)\n\n :param solution: (l'object de la classe SolutionModel) une solution\n\n :return: fx: (float) la valeur calculée de f(x)\n '''\n\n solutionArray = solution.solutionArray\n\n # obtenir les valeurs de décision x[i][j]\n i = 0\n x = [[0] * len(self.instance.careList) for row in range(len(self.instance.buildingList))]\n while i < len(solutionArray):\n if solutionArray[i] != -1:\n j = solutionArray[i]\n x[i][j] = 1\n i += 1\n\n # calculer sum(population[i]*x[i][j]*dist[i][j]) avec i de 1 à n et j de 1 à m\n i = 0\n fx = 0\n while i < len(self.instance.buildingList):\n j = 0\n while j < len(self.instance.careList):\n fx += self.instance.buildingList[i].population * x[i][j] * self.instance.distanceMatrix[i][j]\n j += 1\n i += 1\n\n return fx\n\n\n def objectiveFunctionG(self, solution):\n '''\n Description:\n Cette méthode est pour réaliser la fonction objective g(x)\n\n :param solution: (l'object de la classe SolutionModel) une solution\n\n :return: gx: (float) la valeur calculée de g(x)\n '''\n\n solutionArray = solution.solutionArray\n\n # obtenir les valeurs de décision x[i][j]\n i = 0\n x = [[0] * len(self.instance.careList) for row in range(len(self.instance.buildingList))]\n while i < len(solutionArray):\n if solutionArray[i] != -1:\n j = solutionArray[i]\n x[i][j] = 1\n i += 1\n\n # calculer sum(population[i]*x[i][j]) avec i de 1 à n et j de 1 à m\n i = 0\n gx = 0\n while i < len(self.instance.buildingList):\n j = 0\n while j < len(self.instance.careList):\n gx += self.instance.buildingList[i].population * x[i][j]\n j += 1\n i += 1\n\n return gx\n\n\n def objectiveFunctionH(self, solution):\n '''\n Description:\n Cette méthode est pour réaliser la fonction objective h(x)\n\n :param solution: (l'object de la classe SolutionModel) une solution\n\n :return: h(x): (float) la valeur calculée de h(x)\n '''\n\n solutionArray = solution.solutionArray\n\n # obtenir les valeurs de décision x[i][j]\n i = 0\n x = [[0] * len(self.instance.careList) for row in range(len(self.instance.buildingList))]\n while i < len(solutionArray):\n if solutionArray[i] != -1:\n j = solutionArray[i]\n x[i][j] = 1\n i += 1\n\n # calculer sum(x[i][j]) avec i de 1 à n et j de 1 à m\n i = 0\n hx = 0\n while i < len(self.instance.buildingList):\n j = 0\n while j < len(self.instance.careList):\n hx += x[i][j]\n j += 1\n i += 1\n\n return hx\n\n\n def calculateAverageSolutionQualityForEachIteration(self,qualityOfSolutionForOneIterationList):\n '''\n Description:\n Cette méthode est pour calculer la qualité moyenne des\n solutions générées par chaque fourmi dans une itération\n\n :param qualityOfSolutionForOneIterationList: (float[]) la liste de qualités de chaque solution d'une itération\n\n :return: average: (float) la qualité moyenne des solutions\n '''\n\n sum = 0.00\n for k in range(len(self.instance.antList)):\n sum += qualityOfSolutionForOneIterationList[k]\n average = sum / len(self.instance.antList)\n\n return average\n\n\n def calculateDistanceTotalOfOneSolution(self,oneSolution):\n '''\n Description:\n Cette méthode est pour calculer la distance totale d'une solution\n\n :param oneSolution: (l'objet de la classe SolutionModel) une solution\n\n :return: distanceTotal: (float) la distance totale calculée d'une solution\n '''\n\n solution = oneSolution.solutionArray\n\n i = 0\n distanceTotal = 0\n while i < len(solution):\n j = solution[i]\n # si l'indice de care qui héberge le bâtiment i n'est pas -1\n if j != -1:\n distanceTotal += self.instance.distanceMatrix[i][j]\n i += 1\n\n return distanceTotal\n\n\n def calculatePopulationAllocatedOfOneSolution(self, oneSolution):\n '''\n Description:\n Cette méthode est pour calculer le nombre de sans-abris hébergés d'une solution\n\n :param oneSolution: (l'objet de la classe SolutionModel) une solution\n\n :return: populationAllocated: (float) le nombre de sans-abris hébergés d'une solution\n '''\n\n solution = oneSolution.solutionArray\n\n i = 0\n populationAllocated = 0\n while i < len(solution):\n j = solution[i]\n # si l'indice de care qui héberge le bâtiment i n'est pas -1\n if j != -1:\n populationAllocated += self.instance.buildingList[i].population\n i += 1\n\n return populationAllocated\n\n\n def calculateBuildingAllocatedOfOneSolution(self, oneSolution):\n '''\n Description:\n Cette méthode est pour calculer le nombre de bâtiments affectés d'une solution\n\n :param oneSolution: (l'objet de la classe SolutionModel) une solution\n\n :return: buildingAllocated: (int) le nombre de bâtiments affectés d'une solution\n '''\n\n solution = oneSolution.solutionArray\n\n i = 0\n buildingAllocated = 0\n while i < len(solution):\n j = solution[i]\n # si l'indice de care qui héberge le bâtiment i n'est pas -1\n if j != -1:\n buildingAllocated += 1\n i += 1\n\n return buildingAllocated","sub_path":"tests/PerformanceTest/ProfilerAlgorithmController.py","file_name":"ProfilerAlgorithmController.py","file_ext":"py","file_size_in_byte":38659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643633293","text":"from datetime import datetime\n\nfrom .models import Customers\n\n\ndef create_customer(data):\n try:\n customer = Customers(\n updated_at=datetime.now(),\n first_name=data.get('first_name').capitalize(),\n last_name=data.get('last_name').capitalize(),\n gender=data.get('gender'),\n email=data.get('email'),\n contact_no=data.get('contact_no'),\n city=data.get('city').capitalize(),\n state=data.get('state').capitalize(),\n address_line1=data.get('address_line1').capitalize(),\n address_line2=data.get('address_line2').capitalize(),\n )\n except Exception as err:\n print(\"Exception--\",err)\n return customer\n","sub_path":"customer/validations.py","file_name":"validations.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374808118","text":"import logging.config\nimport unittest\n\nimport os\n\nfrom botocore.exceptions import ClientError\nfrom mock import patch, MagicMock, call, Mock\nfrom captain.docker_controller import DockerController\nfrom captain import exceptions\nfrom captain.tests.util_mock import ClientMock\nfrom requests.exceptions import ConnectionError, SSLError\nfrom testfixtures import LogCapture\nimport itertools\n\n\nclass TestDockerController(unittest.TestCase):\n\n def setUp(self):\n self.config = MagicMock()\n self.config.docker_nodes = [\"http://node-1/\", \"http://node-2/\", \"http://node-3/\"]\n self.config.slug_runner_command = \"runner command\"\n self.config.slug_runner_image = \"runner/image\"\n self.config.slug_runner_version = \"0.0.73\"\n self.config.docker_gc_grace_period = 86400\n self.config.slots_per_node = 10\n self.config.slot_memory_mb = 128\n self.config.default_slots_per_instance = 2\n self.config.aws_docker_host_tag_name = None\n self.config.aws_docker_host_tag_value = None\n self.docker_proxy_username = None\n self.docker_proxy_password = None\n\n\n self.docker_node_resolver = MagicMock()\n self.docker_node_resolver.get_docker_nodes = MagicMock(return_value=[\"http://node-1/\", \"http://node-2/\", \"http://node-3/\"])\n\n @patch('docker.Client')\n def test_returns_summary_of_instances(self, docker_client):\n # given\n (docker_conn1, docker_conn2, docker_conn3) = ClientMock().mock_two_docker_nodes(docker_client)\n\n # when\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n summary = docker_controller.get_instance_summary()\n\n # then\n self.assertEqual(3, summary['total_instances'])\n self.assertEqual(1, summary['apps']['ers-checking-frontend-27'])\n self.assertEqual(2, summary['apps']['paye'])\n\n @patch('docker.Client')\n def test_logs_exception_when_docker_nodes_config_is_bad(self, docker_client):\n \"\"\"\n With three nodes configured but with one bad node an error should be logged but 2 nodes should be returned\n \"\"\"\n # given\n (docker_conn1, docker_conn2, docker_conn3) = ClientMock().mock_two_docker_nodes(docker_client)\n expected_nodes = ['node-1', 'node-2']\n\n # when\n # Note the very subtle ']' which results in the URL being considered as an invalid IPv6 URL.\n self.config.docker_nodes = [\"http://node-1/\", \"http://node-2/\", \"http://node-3]\"]\n self.docker_node_resolver.get_docker_nodes = MagicMock(return_value=self.config.docker_nodes)\n\n\n # given\n with LogCapture(names='captain.docker_controller', level=logging.ERROR) as l:\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n l.check(\n ('captain.docker_controller', 'ERROR', \"{'message': 'Could not obtain connection to docker node: http://node-3]. Exception: Invalid IPv6 URL'}\")\n )\n nodes = docker_controller.get_nodes()\n self.assertTrue(len(nodes) == 2)\n for node in nodes:\n self.assertIn(node['id'], expected_nodes)\n\n @patch('docker.Client')\n def test_returns_all_instances_with_ports(self, docker_client):\n # given\n (docker_conn1, docker_conn2, docker_conn3) = ClientMock().mock_two_docker_nodes(docker_client)\n\n # when\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n # get_instances is async and order isn't guaranteed, sort it for the tests\n instances = sorted(docker_controller.get_instances(), key=lambda i: i[\"id\"])\n\n # then\n self.assertEqual(3, instances.__len__())\n\n instance1 = instances[0]\n self.assertEqual(\"656ca7c307d178\", instance1[\"id\"])\n self.assertEqual(\"ers-checking-frontend-27\", instance1[\"app\"])\n self.assertEqual(\"node-1\", instance1[\"node\"])\n self.assertEqual(9225, instance1[\"port\"])\n self.assertEqual(\"https://host/ers-checking-frontend_27.tgz\", instance1[\"slug_uri\"])\n self.assertEqual(2, instance1[\"environment\"].__len__())\n self.assertEqual(\"-Dapplication.secret=H7dVw$PlJiD)^U,oa4TA1pa]pT:4ETLqbL&2P=n6T~p,A*}^.Y46@PQOV~9(B09Hc]t7-hsf~&@w=zH -Dapplication.log=INFO -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080 -Dgovuk-tax.Prod.google-analytics.token=UA-00000000-0 -Drun.mode=Prod -Dsession.secure=true -Dsession.httpOnly=true -Dcookie.encryption.key=fqpLDZ4smuDsekHkrEBlCA==\", instance1[\"environment\"][\"HMRC_CONFIG\"])\n self.assertEqual(\"-Xmx256m -Xms256m\", instance1[\"environment\"][\"JAVA_OPTS\"])\n\n instance2 = instances[2]\n self.assertEqual(\"eba8bea2600029\", instance2[\"id\"])\n self.assertEqual(\"paye\", instance2[\"app\"])\n self.assertEqual(\"node-1\", instance2[\"node\"])\n self.assertEqual(9317, instance2[\"port\"])\n self.assertEqual(\"https://host/paye_216.tgz\", instance2[\"slug_uri\"])\n self.assertEqual(2, instance2[\"environment\"].__len__())\n self.assertEqual(\"-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080\", instance2[\"environment\"][\"HMRC_CONFIG\"])\n self.assertEqual(\"-Xmx256m -Xms256m\", instance2[\"environment\"][\"JAVA_OPTS\"])\n\n instance3 = instances[1]\n self.assertEqual(\"80be2a9e62ba00\", instance3[\"id\"])\n self.assertEqual(\"paye\", instance3[\"app\"])\n self.assertEqual(\"node-2\", instance3[\"node\"])\n self.assertEqual(9317, instance3[\"port\"])\n self.assertEqual(2, instance3[\"environment\"].__len__())\n self.assertEqual(\"-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080\", instance3[\"environment\"][\"HMRC_CONFIG\"])\n self.assertEqual(\"-Xmx256m -Xms256m\", instance3[\"environment\"][\"JAVA_OPTS\"])\n # One container stopped\n docker_conn1.remove_container.assert_has_calls([call(\"381587e2978216\")])\n # One container with FinishedAt time of 0 removed\n docker_conn1.remove_container.assert_has_calls([call(\"3815178hgdasf6\")])\n self.assertEqual(docker_conn1.remove_container.call_count, 2)\n self.assertEqual(docker_conn2.remove_container.call_count, 1)\n # jh23899fg00029 doesn't have captain ports defined and should be ignored.\n self.assertFalse([i for i in instances if i[\"id\"] == \"jh23899fg00029\"])\n\n self.assertRaises(ConnectionError, docker_conn3.containers)\n\n @patch('docker.Client')\n @patch('uuid.uuid4')\n def test_starts_instance(self, uuid_mock, docker_client):\n # given\n (mock_client_node1, mock_client_node2, mock_client_node3) = ClientMock().mock_two_docker_nodes(docker_client)\n uuid_mock.return_value = 'SOME-UUID'\n\n # when\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n started_instance = docker_controller.start_instance(\n \"paye\", \"https://host/paye_216.tgz\", \"node-1\", None,\n {'HMRC_CONFIG': \"-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080\",\n 'JAVA_OPTS': \"-Xmx256m -Xms256m\"}, 2)\n\n # then\n self.assertEqual(\"eba8bea2600029\", started_instance[\"id\"])\n self.assertEqual(\"paye\", started_instance[\"app\"])\n self.assertEqual(\"node-1\", started_instance[\"node\"])\n self.assertEqual(9317, started_instance[\"port\"])\n self.assertEqual(\"https://host/paye_216.tgz\", started_instance[\"slug_uri\"])\n self.assertEqual(2, started_instance[\"environment\"].__len__())\n self.assertEqual(\"-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080\", started_instance[\"environment\"][\"HMRC_CONFIG\"])\n self.assertEqual(\"-Xmx256m -Xms256m\", started_instance[\"environment\"][\"JAVA_OPTS\"])\n self.assertEqual(2, started_instance[\"slots\"])\n\n mock_client_node1.create_container.assert_called_with(image=\"{}:{}\".format(self.config.slug_runner_image, str(self.config.slug_runner_version)),\n command=self.config.slug_runner_command,\n ports=[8080],\n environment={\n 'PORT': '8080',\n 'SLUG_URL': 'https://host/paye_216.tgz',\n 'HMRC_CONFIG': '-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080',\n 'JAVA_OPTS': '-Xmx256m -Xms256m'\n },\n detach=True,\n name=\"paye_SOME-UUID\",\n cpu_shares=2,\n hostname=None,\n mem_limit=256 * 1024 * 1024,\n )\n\n docker_controller.start_instance(\n \"paye\", \"http://host/paye-216-slug.tgz\", \"node-1\", None,\n {'HMRC_CONFIG': \"-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080\",\n 'JAVA_OPTS': \"-Xmx256m -Xms256m\"})\n\n mock_client_node1.create_container.assert_called_with(image=\"{}:{}\".format(self.config.slug_runner_image, str(self.config.slug_runner_version)),\n command=self.config.slug_runner_command,\n ports=[8080],\n environment={\n 'PORT': '8080',\n 'SLUG_URL': 'http://host/paye-216-slug.tgz',\n 'HMRC_CONFIG': '-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080',\n 'JAVA_OPTS': '-Xmx256m -Xms256m'\n },\n detach=True,\n name=\"paye_SOME-UUID\",\n cpu_shares=2,\n mem_limit=256 * 1024 * 1024,\n hostname=None,\n )\n\n mock_client_node1.start.assert_called_with(\"eba8bea2600029\", port_bindings={8080: None})\n\n self.assertFalse(mock_client_node2.create_container.called)\n self.assertFalse(mock_client_node2.start.called)\n\n @patch('docker.Client')\n @patch('uuid.uuid4')\n def test_starts_instance_on_specific_slug_runner_version(self, uuid_mock, docker_client):\n # given\n (mock_client_node1, mock_client_node2, mock_client_node3) = ClientMock().mock_two_docker_nodes(docker_client)\n uuid_mock.return_value = 'SOME-OTHER-UUID'\n\n # when\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n started_instance = docker_controller.start_instance(\n \"paye\", \"https://host/paye_216.tgz\", \"node-1\", None,\n {'HMRC_CONFIG': \"-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080\",\n 'JAVA_OPTS': \"-Xmx256m -Xms256m\"}, 2, None, \"0.0.99\")\n\n # then\n mock_client_node1.create_container.assert_called_with(image=\"{}:{}\".format(self.config.slug_runner_image, \"0.0.99\"),\n command=self.config.slug_runner_command,\n ports=[8080],\n environment={\n 'PORT': '8080',\n 'SLUG_URL': 'https://host/paye_216.tgz',\n 'HMRC_CONFIG': '-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080',\n 'JAVA_OPTS': '-Xmx256m -Xms256m'\n },\n detach=True,\n name=\"paye_SOME-OTHER-UUID\",\n cpu_shares=2,\n hostname=None,\n mem_limit=256 * 1024 * 1024,\n )\n\n @patch('docker.Client')\n def test_stops_instance(self, docker_client):\n # given\n (mock_client_node1, mock_client_node2, mock_client_node3) = ClientMock().mock_two_docker_nodes(docker_client)\n\n # when\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n result = docker_controller.stop_instance(\"80be2a9e62ba00\")\n\n # then\n self.assertTrue(result)\n\n self.assertFalse(mock_client_node1.stop.called)\n mock_client_node1.remove_container.assert_not_called_with(\"80be2a9e62ba00\")\n\n mock_client_node2.stop.assert_called_with('80be2a9e62ba00')\n mock_client_node2.remove_container.assert_called_with('80be2a9e62ba00', force=True)\n\n @patch('docker.Client')\n def test_stops_instance_even_if_remove_container_fails(self, docker_client):\n # given\n (mock_client_node1, mock_client_node2, mock_client_node3) = ClientMock().mock_two_docker_nodes(docker_client)\n\n # when\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n result = docker_controller.stop_instance(\"80be2a9e62ba00\")\n\n # then\n self.assertTrue(result)\n\n self.assertFalse(mock_client_node1.stop.called)\n mock_client_node1.remove_container.assert_not_called_with('80be2a9e62ba00')\n\n mock_client_node2.stop.assert_called_with('80be2a9e62ba00')\n mock_client_node2.remove_container.assert_called_with('80be2a9e62ba00', force=True)\n\n @patch('docker.Client')\n def test_returns_false_when_trying_to_stop_nonexisting_instance(self, docker_client):\n # given\n (mock_client_node1, mock_client_node2, mock_client_node3) = ClientMock().mock_two_docker_nodes(docker_client)\n\n # when\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n result = docker_controller.stop_instance(\"nonexisting-instance\")\n\n # then\n self.assertFalse(result)\n\n self.assertFalse(mock_client_node1.stop.called)\n mock_client_node1.remove_container.assert_not_called_with('nonexisting-instance')\n\n self.assertFalse(mock_client_node2.stop.called)\n mock_client_node2.remove_container.assert_not_called_with('nonexisting-instance')\n\n @patch('docker.Client')\n def test_over_capacity(self, docker_client):\n # given\n (mock_client_node1, mock_client_node2, mock_client_node3) = ClientMock().mock_two_docker_nodes(docker_client)\n\n # when\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n # Force an over capacity error\n current_slot_count = sum([i[\"slots\"] for i in docker_controller.get_instances() if i['node'] == 'node-1'])\n self.assertTrue(current_slot_count != self.config.slots_per_node)\n\n # then\n self.assertRaises(exceptions.NodeOutOfCapacityException,\n docker_controller.start_instance, \"paye\", \"http://host/paye-216-slug.tgz\", \"node-1\", None,\n {'HMRC_CONFIG': \"-Dapplication.log=INFO -Drun.mode=Prod -Dlogger.resource=/application-json-logger.xml -Dhttp.port=8080\",\n 'JAVA_OPTS': \"-Xmx256m -Xms256m\"}, self.config.slots_per_node - current_slot_count + 1)\n\n @patch('docker.Client')\n def test_get_node_details(self, docker_client):\n (mock_client_node1, mock_client_node2, mock_client_node3) = ClientMock().mock_two_docker_nodes(docker_client)\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n\n self.assertRaises(exceptions.NoSuchNodeException, docker_controller.get_node, \"bum-node-1\")\n\n node_details = docker_controller.get_node(\"node-1\")\n self.assertDictEqual(\n {\"id\": \"node-1\",\n \"slots\": {\"free\": 6, \"used\": 4, \"total\": 10}, \"state\": \"healthy\"},\n node_details\n )\n\n @patch('docker.Client')\n def test_aws_exceptions_are_not_bubbled_up_when_refreshing_docker_nodes(self, docker_client):\n \"\"\"\n Directly tests that the private method '__refresh_docker_node_connections' will catch\n ClientErrors that it encounters and will not re-throw them.\n \"\"\"\n # Given\n ClientMock().mock_one_docker_node(docker_client)\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n\n # When\n self.docker_node_resolver.get_docker_nodes = Mock(side_effect=ClientError({}, 'Request limit exceeded.'))\n # Absence of an exception being raised confirms the desired functionality here.\n\n # Then\n with LogCapture(names='captain.docker_controller', level=logging.WARNING) as l:\n docker_controller._DockerController__refresh_docker_node_connections()\n l.check(\n ('captain.docker_controller', 'WARNING',\n \"{'message': 'Unable to refresh docker nodes. Exception: An error occurred (Unknown) when calling the Request limit exceeded. operation: Unknown'}\")\n )\n\n\n @patch('docker.Client')\n def test_node_connections_are_unaffected_by_aws_exceptions(self, docker_client):\n \"\"\"\n We've focussed on get_node here but what we're really testing is that\n the the stateful node_connections list is unaffected when '__refresh_docker_node_connections'\n encounters an exception. This is important as these errors\n were previously being bubbled up to Flask/ GUnicorn and were causing threads to die.\n \"\"\"\n ClientMock().mock_one_docker_node(docker_client)\n\n # Initially populate the internal docker_nodes list.\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n node1_details = {\"id\": \"node-1\", \"slots\": {\"free\": 6, \"used\": 4, \"total\": 10}, \"state\": \"healthy\"}\n\n # Prove that the internal node_connections list was populated as we expected.\n returned_node_details = docker_controller.get_node(\"node-1\")\n self.assertDictEqual(node1_details, returned_node_details)\n\n # Simulate an AWS failure.\n self.docker_node_resolver.get_docker_nodes = Mock(side_effect=ClientError({}, 'Request limit exceeded.'))\n\n # Assert that we still get the originally stored node back\n # i.e. the AWS failure didn't affect the stores node_connections list.\n returned_node_details = docker_controller.get_node(\"node-1\")\n self.assertDictEqual(node1_details, returned_node_details)\n\n @patch('docker.Client')\n def test_node_ping_retry_is_eventually_successful(self, docker_client):\n # Given\n mock_client_node1 = ClientMock().mock_one_docker_node(docker_client)\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n\n # When\n mock_client_node1.ping = Mock(side_effect=[ConnectionError(\"node-1 is unhealthy\"),\n ConnectionError(\"node-1 is unhealthy\"),\n None])\n node_details = docker_controller.get_node('node-1')\n\n # Then\n self.assertDictEqual(\n {\"id\": \"node-1\",\n \"slots\": {\"free\": 6, \"used\": 4, \"total\": 10}, \"state\": \"healthy\"},\n node_details\n )\n\n @patch('docker.Client')\n def test_node_ping_retry_exhaustion(self, docker_client):\n # Given\n mock_client_node1 = ClientMock().mock_one_docker_node(docker_client)\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n\n # When\n mock_client_node1.ping = Mock(side_effect=[ConnectionError(\"node-1 is unhealthy\"),\n ConnectionError(\"node-1 is unhealthy\"),\n ConnectionError(\"node-1 is unhealthy\")])\n node_details = docker_controller.get_node('node-1')\n\n # Then\n self.assertDictEqual(\n {\"id\": \"node-1\",\n \"slots\": {\"free\": 0, \"used\": 0, \"total\": 0}, \"state\": \"ConnectionError('node-1 is unhealthy',)\"},\n node_details\n )\n\n @patch('docker.Client')\n def test_get_logs(self, docker_client):\n (mock_client_node1, mock_client_node2, mock_client_node3) = ClientMock().mock_two_docker_nodes(docker_client)\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n\n self.assertRaises(exceptions.NoSuchInstanceException, docker_controller.get_logs, \"non-existant\")\n\n instance_logs = docker_controller.get_logs(\"80be2a9e62ba00\")\n self.assertEqual(\n ({\"msg\": \"this is line 1\\n\"}, {\"msg\": \"this is line 2\\n\"}),\n tuple(itertools.islice(instance_logs, 2)))\n\n instance_logs = docker_controller.get_logs(\"eba8bea2600029\", follow=True)\n self.assertEqual(\n ({\"msg\": \"this is line 1\"}, {\"msg\": \"this is line 2\"}, {\"msg\": \"this is line 3\"}),\n tuple(itertools.islice(instance_logs, 3)))\n\n @patch('docker.Client')\n def test_get_nodes_when_exceptions_are_raised(self, docker_client):\n with LogCapture(names='captain.docker_controller', level=logging.ERROR) as l:\n # Given\n ClientMock().mock_one_docker_node(docker_client)\n self.config.docker_nodes = [\"http://node-1/\"]\n self.docker_node_resolver.get_docker_nodes = MagicMock(return_value=[\"http://node-1/\"])\n\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n docker_controller.get_node = Mock(side_effect=SSLError('BOOM!'))\n\n # When\n nodes = docker_controller.get_nodes()\n\n # Then\n self.assertTrue(len(nodes) == 0)\n l.check(\n ('captain.docker_controller', 'ERROR', '{\\'message\\': \"Getting details for node-1 raised an exception of type \\'SSLError\\': BOOM!\"}')\n )\n\n @patch('docker.Client')\n def test_get_nodes(self, docker_client):\n (mock_client_node1, mock_client_node2, mock_client_node3) = ClientMock().mock_two_docker_nodes(docker_client)\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n\n nodes = docker_controller.get_nodes()\n self.assertTrue(len(nodes) == 3)\n self.assertIn(\n {\"id\": \"node-1\",\n \"slots\": {\"free\": 6, \"used\": 4, \"total\": 10}, \"state\": \"healthy\"},\n nodes\n )\n\n @patch('docker.Client')\n def test_gc(self, docker_client):\n # given\n (docker_conn1, docker_conn2, docker_conn3) = ClientMock().mock_two_docker_nodes(docker_client)\n\n # when\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n # trigger gc\n docker_controller.get_instances()\n\n # then\n # 61c2695fd82a is a freshly created but not yet started container and so shouldn't be gc'd\n self.assertNotIn(call(\"61c2695fd82a\"), docker_conn2.start.mock_calls)\n\n # 61c2695fd82b is an old container with epoch start and exit times and should be gc'd\n docker_conn2.remove_container.assert_has_calls([call(\"61c2695fd82b\")])\n\n @patch('docker.Client')\n def test_change_in_number_of_configured_nodes(self, docker_client):\n \"\"\"\n When the number of configured nodes is changed this should be reflected in the call to get nodes\n \"\"\"\n docker_controller = DockerController(self.config, self.docker_node_resolver)\n\n # Given 3 nodes from the default mock in Setup\n nodes = docker_controller.get_nodes()\n self.assertTrue(len(nodes) == 3)\n # Override the default mock in Setup to return 2 different nodes\n self.docker_node_resolver.get_docker_nodes = MagicMock(\n return_value=[\"http://node-3/\", \"http://node-4/\"])\n nodes = docker_controller.get_nodes()\n self.assertEqual(len(nodes), 2, \"Expected number of nodes weren't returned\")\n self.assertListEqual(sorted([item[\"id\"] for item in nodes]), [\"node-3\", \"node-4\"], \"Expected node names weren't returned\")\n","sub_path":"captain/tests/test_docker_controller.py","file_name":"test_docker_controller.py","file_ext":"py","file_size_in_byte":25515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248354412","text":"import os\r\nimport pandas as pd\r\n\r\nos.getcwd()\r\nos.chdir('C:\\\\Users\\\\Octavian Rosca\\\\Desktop\\\\')\r\nos.listdir()\r\n\r\n# Preparing Our Data\r\n# applying multiple supervised machine learning algorithms and we will see shich one yields the highest accuracy\r\n# this will be measured with K-Fold cross validation score where K = 10\r\n\r\nmasses_data = pd.read_csv(\"mammographic_masses.data.txt\")\r\nmasses_data.head()\r\n\r\n# we will make sure that we convert the optional parameters in read_csv to read the missing data as NaN, and we can also add the appropriate columns names as well\r\n\r\nmasses_data = pd.read_csv('mammographic_masses.data.txt', na_values = '?', names = ['BI_RADS', 'age', 'shape', 'margin', 'density', 'severity'])\r\n\r\n# we can evaluate to see if our data needs much cleaning\r\nmasses_data.describe()\r\n\r\n# we can see that there are quite a few missing data points in this dataset.\r\nmasses_data.loc[(masses_data['age'].isnull()) | masses_data['shape'].isnull() | masses_data['margin'].isnull() | masses_data['density'].isnull() | masses_data['severity'].isnull()]\r\n\r\n# from eyeballing this I think that the missing values are randomly distributed so I will drop them \r\nmasses_data.dropna(inplace = True)\r\nmasses_data.describe()\r\n\r\n# next we will convert the pandas dataframe into numpy arrays so that they can be used by scikit_learn.\r\nall_features = masses_data[['age', 'shape', 'margin', 'density']].values\r\n\r\nall_classes = masses_data['severity'].values\r\n\r\nfeature_names = ['age', 'shape', 'margin', 'density']\r\nall_features\r\n\r\n#some of the models that we will be using in this project will require that the data be normalised, for this we will be using the StandardScaler()\r\n\r\nfrom sklearn import preprocessing\r\nscaler = preprocessing.StandardScaler()\r\nall_features_scaled = scaler.fit_transform(all_features)\r\nall_features_scaled\r\n\r\n\r\n### DECISION TREES ###\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nnumpy.random.seed(1234)\r\n\r\n(training_inputs, testing_inputs, training_classes, testing_classes) = train_test_split(all_features_scaled, all_classes, train_size = .75, random_state = 1)\r\n\r\n# now we will create a decision tree and we will fit it to our training data\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nclf = DecisionTreeClassifier(random_state = 1)\r\n\r\n#train the classifier on the training_set\r\nclf.fit(training_inputs, training_classes)\r\n\r\n# we can see what the resulting decision tree will look like\r\nfrom IPython.display import Image\r\nfrom sklearn.externals.six import StringIO\r\nfrom sklearn import tree\r\nfrom pydotplus import graph_from_dot_data\r\n\r\ndot_data = StringIO() \r\ntree.export_graphviz(clf, out_file=dot_data, \r\n feature_names=feature_names) \r\ngraph = graph_from_dot_data(dot_data.getvalue()) \r\n# Image(graph.create_png())\r\n\r\n# measure the score of the resulting decision tree on my test data\r\nclf.score(testing_inputs, testing_classes)\r\n\r\n\r\n# now instead of a single train/test split we can use K-Fold Cross Validation to get a better measure of my model's accuracy\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nclf = DecisionTreeClassifier(random_state = 1)\r\ncv_scores = cross_val_score(clf, all_features_scaled, all_classes, cv = 10)\r\ncv_scores.mean()\r\n\r\n# we can try and use a RandomForest classifier instead to see if we can improve upon these results\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nclf = RandomForestClassifier(n_estimators = 10, random_state = 1)\r\n\r\ncv_scores = cross_val_score(clf, all_features_scaled, all_classes, cv = 10)\r\ncv_scores.mean()\r\n\r\n\r\n\r\n### SVM ###\r\n# we will try a different model and use a SVM with a linear kernel and see how this does\r\nfrom sklearn import svm\r\nC = 1.0\r\nsvc = svm.SVC(kernel = 'linear', C = C)\r\n\r\ncv_scores = cross_val_score(svc, all_features_scaled, all_classes, cv=10)\r\n\r\ncv_scores.mean()\r\n\r\n### KNN ### \r\n\r\n# We can try fitting a K-nearest neighbours to this as well and see what the outcome of this is.\r\n# We use a k value of 10 first\r\nfrom sklearn import neighbors\r\nclf = neighbors.KNeighborsClassifier(n_neighbors = 10)\r\n\r\ncv_scores = cross_val_score(clf, all_features_scaled, all_classes, cv = 10)\r\n\r\ncv_scores.mean()\r\n\r\n#choosing the value of K can be tricky, we will try k values ranging from 1 to 50 and see which value is optimal\r\n\r\nfor n in range(1, 51):\r\n clf = neighbors.KNeighborsClassifier(n_neighbors = n)\r\n cv_scores = cross_val_score(clf, all_features_scaled, all_classes, cv = 10)\r\n print (n, cv_scores.mean())\r\n\r\n### Naive Bayes ###\r\n# we can also try a naive bayes.multinomialNB and see how the accuracy stacks up.\r\n \r\nfrom sklearn.naive_bayes import MultinomialNB\r\n\r\nscaler = preprocessing.MinMaxScaler()\r\n\r\nall_features_minmax = scaler.fit_transform(all_features)\r\n\r\nclf = MultinomialNB()\r\n\r\ncv_scores = cross_val_score(clf, all_features_minmax, all_classes, cv=10)\r\n\r\ncv_scores.mean()\r\n\r\n### Revisiting the SVM model ###\r\n\r\n# we can try to use a SVM but use differnet kernels and see what effect this chaneg has on the performance of the model\r\nC = 1.0\r\nsvc = svm.SVC(kernel = 'rbf', C = C)\r\ncv_scores = cross_val_score(svc, all_features_scaled, all_classes, cv = 10)\r\n\r\ncv_scores.mean()\r\n\r\n\r\nC = 1.0\r\nsvc = svm.SVC(kernel='sigmoid', C=C)\r\ncv_scores = cross_val_score(svc, all_features_scaled, all_classes, cv=10)\r\ncv_scores.mean()\r\n\r\n\r\n\r\nC = 1.0\r\nsvc = svm.SVC(kernel='poly', C=C)\r\ncv_scores = cross_val_score(svc, all_features_scaled, all_classes, cv=10)\r\ncv_scores.mean()\r\n\r\n\r\n\r\n### Logistic Regression ###\r\nfrom sklearn.linear_model import LogisticRegression\r\nclf = LogisticRegression()\r\n\r\ncv_scores = cross_val_score(clf, all_features_scaled, all_classes, cv=10)\r\ncv_scores.mean()\r\n\r\n\r\n### Neural Networks ###\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.backend import set_session\r\n\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True # so that we can dynamically grow the memory usage on the gpu\r\nsess = tf.Session(config = config)\r\nset_session(sess) #set this tensorflow session as the default session for keras\r\n\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.models import Sequential\r\n\r\ndef create_model():\r\n model = Sequential()\r\n model.add(Dense(6, input_dim=4, kernel_initializer='normal', activation='relu'))\r\n # \"Deep learning\" turns out to be unnecessary - this additional hidden layer doesn't help either.\r\n #model.add(Dense(4, kernel_initializer='normal', activation='relu'))\r\n # Output layer with a binary classification (benign or malignant)\r\n model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\r\n # Compile model; adam seemed to work best\r\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n return model\r\n\r\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\r\n# Wrap our Keras model in an estimator compatible with scikit_learn\r\nestimator = KerasClassifier(build_fn=create_model, epochs=100, verbose=0)\r\n# Now we can use scikit_learn's cross_val_score to evaluate this model identically to the others\r\ncv_scores = cross_val_score(estimator, all_features_scaled, all_classes, cv=10)\r\ncv_scores.mean()\r\n","sub_path":"ML_FK_project.py","file_name":"ML_FK_project.py","file_ext":"py","file_size_in_byte":7156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487198676","text":"# The football.csv file contains the results from the English Premier League.\n# The columns labeled ‘Goals’ and ‘Goals Allowed’ contain the total number of\n# goals scored for and against each team in that season (so Arsenal scored 79 goals\n# against opponents, and had 36 goals scored against them). Write a program to read the file,\n# then print the name of the team with the smallest difference in ‘for’ and ‘against’ goals.\n\nimport csv\n\n#Reads the football.csv file and stores data as a list of lists (without headers).\nteamslist = list()\nwith open('football.csv', 'r') as f:\n reader = csv.reader(f)\n teamslist = list(reader)\nteamslist.pop(0)\n\n#Iterates through the list of stats by football team and stores the value of each\n#teams' goals 'for' minus their goals 'against' in a new list. Sets the value of\n#an index variable i to the index of the lowest value of the new list.\ndif_list = list()\nfor t in teamslist:\n dif_list.append(int(t[5]) - int(t[6]))\ni = dif_list.index(min(dif_list))\n\nprint(teamslist[i][0])\n","sub_path":"python/q8_parsing.py","file_name":"q8_parsing.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382424986","text":"from django.urls import path\nfrom . import views\n\n\n\nurlpatterns = [\n path('', views.home, name = 'home'),\n path('detail//', views.detail, name= 'detail'),\n path('about/', views.about, name='about'),\n path('contactme/', views.contact, name='contact'),\n path('category/', views.ProjectListView.as_view(), name='list'),\n\n\n\n]","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3905582","text":"from Mov_v2r_d import Mov_v2r_d\r\nfrom Mov_v2m import Mov_v2m\r\nfrom Mov_m2r import Mov_m2r\r\nfrom Jbe2Jle import JBE2JLE\r\nfrom Type import CodeType\r\nfrom Movzbl import Movzbl\r\nfrom Ja2Jg import JA2JG\r\nfrom Jae2Jge import JAE2JGE\r\nfrom Jb2Jl import JB2JL \r\nimport re\r\n\r\nRamdomCode=False\r\n\r\nclass HandleAsmFile:\r\n \r\n def __init__(self,filename):\r\n self.filedir=\"C:/Users/mut0u/Documents/work/\"\r\n self.count=0\r\n self.mark='.Mich'\r\n self.input = open(self.filedir+filename,'r')\r\n self.file= self.input.read()\r\n self.str=''\r\n self.ran= open('ran.p','r').read()\r\n self.output = open(self.filedir+filename[:-2]+'_change.s','w')\r\n self.Mov_v2r_d_instance = Mov_v2r_d()\r\n self.Mov_v2m_instance = [Mov_v2m().method,Mov_v2m().method2,Mov_v2m().method3]\r\n self.jbe2jle_instance =JBE2JLE()\r\n self.ja2jg_instance = JA2JG()\r\n self.jae2jge_instance = JAE2JGE()\r\n self.jb2jl_instance = JB2JL()\r\n \r\n \r\n self.Mov_m2r_instance = Mov_m2r()\r\n self.movzbl_instance = Movzbl()\r\n self.result = {}.fromkeys(('MOV_V2M', 'MOV_V2R_D','MOV_M2R','JA','JAE','JB','JBE','MOVZBL'), 0)\r\n \r\n self.Mov_v2m_instance_test = Mov_v2m()\r\n def findCodeArea(self,partOfFile):\r\n c = partOfFile\r\n m=re.search(r'(?m)^(\\w+):',c)\r\n if m is None:\r\n return None\r\n fname=m.group(1)\r\n print(fname)\r\n begin=m.start(1)+len(fname)+2\r\n m1=re.search('(?m)^.+?(.size)\\W+'+fname+',\\W+'+'.-'+fname,c[begin:])\r\n if m1 is None:\r\n return None\r\n offset=m1.start(1)\r\n codeA = c[begin:begin+offset]\r\n self.str+=c[:begin]\r\n self.changeCodeLine(codeA)\r\n return begin+offset\r\n def findCodeArea2(self,partOfFile):\r\n c = partOfFile\r\n a = re.compile('(?P^(\\w+)):\\\\n(.*?)(\\.size)\\s+(?P=fname),\\s+\\.-(?P=fname)', re.MULTILINE|re.DOTALL )\r\n m=a.search(c)\r\n if m is None:\r\n return None\r\n print('handle funtion '+ m.group(1))\r\n codeA = m.group(3)\r\n begin = m.start(3)\r\n self.str+=c[:begin]\r\n offset = m.end(3)\r\n\r\n self.changeCodeLine(codeA)\r\n return offset\r\n \r\n def handleMOV_V2M(self,code):\r\n self.result['MOV_V2M']=self.result['MOV_V2M']+1\r\n \r\n value = code[1][1:]\r\n offset= code[2][:code[2].index('(')]\r\n if offset :\r\n offset = int(offset)\r\n else:\r\n offset = 0\r\n reg= code[2][code[2].index('(')+2:code[2].index(')')]\r\n self.str += self.Mov_v2m_instance_test.method(value,offset,reg)\r\n self.str += '\\n'\r\n \r\n def handleMOV_V2MwithRandom(self,code):\r\n self.result['MOV_V2M']=self.result['MOV_V2M']+1\r\n \r\n self.count= self.count+1 \r\n kinds = len(self.Mov_v2m_instance)\r\n pr='subl $4, %esp\\nmovl $'+str(kinds)+' , (%esp)\\ncall r\\n addl $4 ,%esp\\n'\r\n pre='' ; rep='' ; value = code[1][1:]\r\n #print(code[2])\r\n offset= code[2][:code[2].index('(')]\r\n if offset :\r\n offset = int(offset)\r\n else:\r\n offset = 0\r\n reg= code[2][code[2].index('(')+2:code[2].index(')')]\r\n for i in range(kinds):\r\n mar= self.mark+str(self.count)+'f'+ str(i)\r\n pre+='cmpl $'+str(i) +',%eax\\n\\tje '+mar+\"\\n\"\r\n ll=self.Mov_v2m_instance[i](value,offset,reg)\r\n rep+= '\\t'+mar+':\\n\\t' + ll +'\\njmp .End'+self.mark+str(self.count)+'\\n'\r\n \r\n self.str+= pr+pre+'\\n'+rep +'\\n'\r\n \r\n self.str+='\\t.End'+self.mark +str(self.count)+':\\n'\r\n \r\n def handleMOV_V2R_D(self,code):\r\n self.result['MOV_V2R_D']=self.result['MOV_V2R_D']+1\r\n \r\n left=code[1][1:]\r\n right=code[2][1:]\r\n self.str += self.Mov_v2r_d_instance.method(left,right)\r\n self.str += '\\n'\r\n\r\n def handleMOV_V2R_DwithRandom(self,code):\r\n self.result['MOV_V2R_D']=self.result['MOV_V2R_D']+1\r\n \r\n pass\r\n \r\n \r\n \r\n def handleJBE(self,code):\r\n self.result['JBE']=self.result['JBE']+1\r\n \r\n mar= self.mark+str(self.count)+':'\r\n self.count= self.count+1\r\n self.str += self.jbe2jle_instance.method(code[1])\r\n \r\n \r\n def handleJA(self,code):\r\n self.result['JA']=self.result['JA']+1\r\n mar= self.mark+str(self.count)+':'\r\n self.count= self.count+1\r\n self.str += self.ja2jg_instance.method(code[1])\r\n\r\n def handleJAE(self,code):\r\n self.result['JAE']=self.result['JAE']+1\r\n mar= self.mark+str(self.count)+':'\r\n self.count= self.count+1\r\n self.str += self.jae2jge_instance.method(code[1])\r\n def handleJB(self,code):\r\n self.result['JB']=self.result['JB']+1\r\n mar= self.mark+str(self.count)+':'\r\n self.count= self.count+1\r\n self.str += self.jb2jl_instance.method(code[1])\r\n \r\n \r\n def handleJBEwithRandom(self,code):\r\n self.result['JBE']=self.result['JBE']+1\r\n \r\n pass\r\n \r\n def handleMOV_M2R(self,code):\r\n self.result['MOV_M2R']=self.result['MOV_M2R']+1\r\n \r\n offset = code[1][:code[1].index('(')]\r\n if offset:\r\n offset = int(offset)\r\n else:\r\n offset =0\r\n stack = code[1][code[1].index('(')+1:-1]\r\n register = code[2]\r\n #print('offset: %d |reister: %s | stack : %s' %(offset,register,stack))\r\n self.str += self.Mov_m2r_instance.method(register,stack,offset)\r\n def handleMOV_M2RwithRandom(self,code):\r\n self.result['MOV_M2']=self.result['MOV_M2']+1\r\n \r\n pass\r\n def handleMOVZBL(self,code):\r\n self.result['MOVZBL']=self.result['MOVZBL']+1\r\n if len(code)==4:\r\n code[1]=code[1]+','+code[2]\r\n code[2]=code[3]\r\n \r\n pass\r\n self.str +=self.movzbl_instance.method(code[1],code[2])\r\n \r\n \r\n \r\n def changeCodeLine(self,codeArea):\r\n codelines=codeArea.split('\\n')\r\n for line in codelines:\r\n #print(\"handle the line: \" + line)\r\n #self.changeCodeContext(line)\r\n #print(\"code change to :\\n\"+codeReplace)\r\n \"\"\" check the instruction type and then use a certain function to rewrite the instruction\r\n \"\"\"\r\n line =line.strip()\r\n if line=='':\r\n return\r\n code= re.split(r'[^0-9a-zA-Z-_()$%.]+',line)\r\n cType=CodeType.examCodeType(code)\r\n \r\n if cType == CodeType.MOV_V2M:\r\n print('MOV_V2M')\r\n self.str += '#'*50+'\\n\\t#'+line+'\\n'+'#'*50+'\\n' \r\n \r\n if RamdomCode:\r\n self.handleMOV_V2MwithRandom(code)\r\n else:\r\n self.handleMOV_V2M(code)\r\n \r\n elif cType == CodeType.MOV_V2R_D:\r\n print('MOV_V2R_D')\r\n self.str += '#'*50+'\\n\\t#'+line+'\\n'+'#'*50+'\\n' \r\n \r\n self.handleMOV_V2R_D(code)\r\n \r\n \r\n elif cType == CodeType.MOV_M2R:\r\n print('MOV_M2R')\r\n self.str += '#'*50+'\\n\\t#'+line+'\\n'+'#'*50+'\\n'\r\n \r\n self.handleMOV_M2R(code)\r\n \r\n elif cType == CodeType.MOVZBL:\r\n print('MOVZBL')\r\n self.str += '#'*50+'\\n\\t#'+line+'\\n'+'#'*50+'\\n'\r\n self.handleMOVZBL(code)\r\n \r\n elif cType == CodeType.JA:\r\n print('JA')\r\n self.str += '#'*50+'\\n\\t#'+line+'\\n'+'#'*50+'\\n'\r\n self.handleJA(code)\r\n \r\n elif cType == CodeType.JAE:\r\n print('JAE')\r\n self.str += '#'*50+'\\n\\t#'+line+'\\n'+'#'*50+'\\n'\r\n self.handleJAE(code)\r\n elif cType == CodeType.JB:\r\n print('JB')\r\n self.str += '#'*50+'\\n\\t#'+line+'\\n'+'#'*50+'\\n'\r\n self.handleJB(code)\r\n elif cType == CodeType.JBE:\r\n print('JBE')\r\n self.str += '#'*50+'\\n\\t#'+line+'\\n'+'#'*50+'\\n'\r\n self.handleJBE(code)\r\n \r\n else:\r\n self.str+= '\\t' + line +'\\n'\r\n def begin(self):\r\n index=self.findCodeArea(self.file)\r\n \r\n while index is not None:\r\n self.file=self.file[index:]\r\n #print(self.file)\r\n index =self.findCodeArea(self.file)\r\n dail = self.file.split('\\n')\r\n self.str += dail[0]+'\\n'\r\n self.str += self.ran +'\\n'\r\n for d in dail[1:]:\r\n self.str +=d+'\\n'\r\n #print()\r\n #print(self.str)\r\n self.output.write(self.str)\r\n self.output.close()\r\n def begin2(self):\r\n index=self.findCodeArea2(self.file)\r\n \r\n while index is not None:\r\n self.file=self.file[index:]\r\n #print(self.file)\r\n index =self.findCodeArea2(self.file)\r\n dail = self.file.split('\\n')\r\n \r\n self.str += dail[0]+'\\n'\r\n \r\n if RamdomCode:\r\n self.str += self.ran +'\\n'\r\n \r\n \r\n for d in dail[1:]:\r\n self.str +=d+'\\n'\r\n #print()\r\n #print(self.str)\r\n self.output.write(self.str)\r\n self.output.close()\r\nif __name__==\"__main__\":\r\n \r\n a = HandleAsmFile('users-michael.s')\r\n area =\"movzbl -4128(%ebp,%eax), %eax\"\r\n a.begin2()\r\n \r\n #a.begin()\r\n print(a.result)\r\n #a.changeCodeLine(area)","sub_path":"python/work/HandleAsmFile.py","file_name":"HandleAsmFile.py","file_ext":"py","file_size_in_byte":9919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79678996","text":"# @Time : 2021/3/30 22:16\n# @Author : Yang Wang\n\"\"\"\nIn this module the class of the main interface is declared. The callback functions for ui elements are defined\nin the class. In addition, multi-threaded classes for refreshing the main interface are also declared.\n\"\"\"\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nimport time\nimport init_global_variable\nimport global_variable as glv\nimport global_constant as glc\nimport serial\nimport command as cmd\nimport graph\nimport numpy as np\nimport threading\nimport multiprocessing\n# import BLED112.BLEconnect as blec\n\n\nclass MainUI(QWidget):\n \"\"\"The main menu of the program\n\n The main menu of the program. The layout of the UI is loaded from file main.ui created by QT Designer. The ui\n elements are the attributes of the class.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor of the class\n\n constructor of the class. In this function the initial status and callback function of the ui elements are\n defined. In addition the multiprocess nad multithreading are created.\n\n\n \"\"\"\n super().__init__()\n\n # Load the UI layout which is created with PyQt.\n uic.loadUi(\"ui/main.ui\", self)\n\n # Set table\n self.table_offset.setEditTriggers(QTableWidget.NoEditTriggers)\n self.table_v.setEditTriggers(QTableWidget.NoEditTriggers)\n for i in np.arange(6):\n self.table_v.horizontalHeader().setSectionResizeMode(i, QHeaderView.Stretch)\n self.table_v.verticalHeader().setSectionResizeMode(i, QHeaderView.Stretch)\n self.table_offset.horizontalHeader().setSectionResizeMode(i, QHeaderView.Stretch)\n self.table_offset.verticalHeader().setSectionResizeMode(i, QHeaderView.Stretch)\n\n # Set button callback function\n self.button_connect.clicked.connect(self.connect)\n self.button_set_offset.clicked.connect(self.set_offset)\n self.button_show_graph.clicked.connect(self.show_graph)\n self.button_run_c.clicked.connect(self.run_c)\n self.button_run_s.clicked.connect(self.run_s)\n # Set buttons' enable-state\n self.button_set_offset.setEnabled(False)\n self.button_show_graph.setEnabled(False)\n self.button_run_c.setEnabled(False)\n self.button_run_s.setEnabled(False)\n\n # Set combobox callback function\n self.combobox_gain.currentIndexChanged.connect(self.set_gain)\n # Set comboboxes' enable-state\n self.combobox_gain.setEnabled(False)\n self.label_gain.setEnabled(False)\n\n # Creat a children thread in the main precess to update the UI.\n self.update_ui_thread = UpdateUI()\n self.update_ui_thread.update_table.connect(self.update_table)\n self.update_ui_thread.update_button.connect(self.update_button)\n\n # Creat a children thread in the main precess to read the measurement.\n self.read_measurement_thread = threading.Thread(target=cmd.read_measurement, daemon=True)\n\n # Creat two new process for graph.\n self.graph_process_v = multiprocessing.Process(target=graph.realtime_plot_v, args=(glv.sensor_data_sharing,))\n self.graph_process_xyz = multiprocessing.Process(target=graph.realtime_plot_xyz,\n args=(glv.sensor_data_sharing,))\n\n # Initialization\n init_global_variable.init_global_variable()\n\n def connect(self):\n \"\"\"The callback function of button 'Connect'.\n\n After clicking the button 'Connect', the serial port object is created to establish the transmission between\n the sensor controller and PC.\n The button can't be clicked twice. The second click will raise a exception and\n cause a warning window.\n In addition after clicking, the enable status of other ui elements are unlocked.\n\n Returns:\n None\n\n \"\"\"\n try:\n # glv.serial_port = serial.Serial(port=glc.serialPortName,\n # baudrate=glc.serialBaudRate,\n # bytesize=glc.serialDataBits,\n # parity=glc.serialParity,\n # stopbits=glc.serialStopBits,\n # timeout=glc.serialTimeout)\n # cmd.set_gain('100')\n glc.ble.BLE_connection_setup()\n\n except Exception as e:\n QMessageBox.warning(self, 'Warning', str(e), QMessageBox.Ok, QMessageBox.Ok)\n else:\n self.update_ui_thread.start()\n self.button_set_offset.setEnabled(True)\n self.button_show_graph.setEnabled(True)\n self.button_run_c.setEnabled(True)\n self.button_run_s.setEnabled(True)\n self.combobox_gain.setEnabled(True)\n self.label_gain.setEnabled(True)\n QMessageBox.information(self, 'Information', 'Connection successful!', QMessageBox.Ok, QMessageBox.Ok)\n\n def set_offset(self):\n \"\"\"The callback function of button 'Set offset'\n\n Returns:\n None\n\n \"\"\"\n cmd.stop_measurement()\n self.button_run_c.setText('Run')\n cmd.set_offset()\n\n def set_gain(self):\n \"\"\"The callback function of combobox 'gain'\n\n Returns:\n None\n\n \"\"\"\n cmd.set_gain(self.combobox_gain.currentText())\n\n def show_graph(self):\n \"\"\"The callback function of button 'Show graph'\n\n Returns:\n None\n\n \"\"\"\n self.graph_process_v = multiprocessing.Process(target=graph.realtime_plot_v, args=(glv.sensor_data_sharing,))\n self.graph_process_xyz = multiprocessing.Process(target=graph.realtime_plot_xyz,\n args=(glv.sensor_data_sharing,))\n self.graph_process_v.start()\n self.graph_process_xyz.start()\n\n def run_s(self):\n \"\"\"The callback function of button 'Run 1x'\n\n Returns:\n None\n\n \"\"\"\n # cmd.stop_measurement()\n # cmd.start_measurement(mode='s')\n self.read_measurement_thread = threading.Thread(target=cmd.read_measurement, daemon=True)\n self.read_measurement_thread.start()\n self.button_run_c.setText('Run')\n\n def run_c(self):\n \"\"\"The callback function of button 'Run'\n\n Returns:\n None\n\n \"\"\"\n if self.button_run_c.text() == 'Run':\n cmd.stop_measurement()\n cmd.start_measurement(mode='c')\n self.read_measurement_thread = threading.Thread(target=cmd.read_measurement, daemon=True)\n self.read_measurement_thread.start()\n self.button_run_c.setText('Stop')\n else:\n cmd.stop_measurement()\n self.button_run_c.setText('Run')\n\n def closeEvent(self, event):\n \"\"\" Override the method of parent class QWidget.\n\n The closeEvent method define the behaviour of clicking exit button 'x' of the window. Before exit the\n programm, a messagebox is called to confirm with the user if they want to exit the program. If yes,\n all measurement process and children process will be terminated to ensure that there is no possible exception\n caused by the exit of the program.\n\n\n Args:\n event: Fixed argument for method closeEvent of parent class.\n\n Returns:\n None\n\n \"\"\"\n\n reply = QMessageBox.question(self, 'Window Close', 'Are you sure you want to exit the program?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n event.accept()\n # Stop all sub-threads and sub-processes, than wait for them to exit before exiting the main program\n # (to prevent exception raising)\n if self.read_measurement_thread.is_alive():\n cmd.stop_measurement()\n self.read_measurement_thread.join()\n if self.graph_process_v.is_alive() or self.graph_process_xyz.is_alive():\n self.graph_process_v.terminate()\n self.graph_process_xyz.terminate()\n self.graph_process_v.join()\n self.graph_process_xyz.join()\n else:\n event.ignore()\n\n def update_table(self):\n \"\"\"Update the table of the main menu.\n\n Returns:\n None\n\n \"\"\"\n sensor_v = np.around(glv.sensor_v, decimals=4)\n offset = np.around(glv.offset, decimals=4)\n for i in np.arange(6):\n for j in np.arange(6):\n self.table_v.setItem(i, j, QTableWidgetItem(str(sensor_v[i, j])))\n self.table_offset.setItem(i, j, QTableWidgetItem(str(offset[i, j])))\n\n def update_button(self):\n \"\"\"Update the enable status of main menu's buttons\n\n Returns:\n None\n\n \"\"\"\n if self.graph_process_v.is_alive() or self.graph_process_xyz.is_alive():\n self.button_show_graph.setEnabled(False)\n else:\n self.button_show_graph.setEnabled(True)\n\n\nclass UpdateUI(QThread):\n \"\"\"Multithreading class for updating the table and buttons of the main menu.\n\n This is the usage of pyqt multithreading class Qthread. Visit\n https://doc.qt.io/qtforpython/PySide6/QtCore/QThread.html for details.\n\n \"\"\"\n update_table = pyqtSignal()\n update_button = pyqtSignal()\n\n def run(self):\n \"\"\"Define what is run in this threading\n\n Returns:\n None\n\n \"\"\"\n while True:\n self.update_table.emit() # emit signal\n self.update_button.emit()\n time.sleep(0.3)\n","sub_path":"GUI/main_ui.py","file_name":"main_ui.py","file_ext":"py","file_size_in_byte":9716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"605070077","text":"#!/usr/bin/env python3\n#\n# Copyright (C) 2019 UAVCAN Development Team \n# This software is distributed under the terms of the MIT License.\n#\n# Author: Theodoros Ntakouris \n#\n\n# App, Static, Templates folder, CORS setup\n# Controller wiring\n\nfrom quart import Quart, render_template\nfrom quart_cors import cors\n# Controllers\nfrom .controllers.nodes import nodes_controller\n\napi_prefix = '/api/v1'\n\napp = Quart(__name__,\n static_folder='../../frontend/dist/static',\n template_folder='../../frontend/dist')\napp = cors(app)\n\n# Register endpoint modules\napp.register_blueprint(nodes_controller, url_prefix=api_prefix + '/nodes')\n\n\n# Sink all undeclared routes so that vue can work with router properly\n@app.route('/', defaults={'path': ''})\n@app.route('/')\nasync def catch_all(path: str) -> str:\n return await render_template('index.html')\n\nif __name__ == \"__main__\":\n app.run(port=5000)\n","sub_path":"src/yukon/backend/src/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"516250272","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 9 15:28:25 2019\n\n@author: alex\n\"\"\"\n\nclass Meal:\n def __init__(self, price, category, timestamp):\n self.price = price\n self.category = category\n self.timestamp = timestamp\n \nmeal1 = Meal(\"$9.99\", \"Breakfast\", \"6/9/19\")\n \nprint(meal1.price)\nprint(meal1.category)\nprint(meal1.timestamp)","sub_path":"testfile.py","file_name":"testfile.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378099723","text":"#-----------------------------------------------------------------------------------------------------#\r\n#--For IEEE site--#\r\nimport json\r\nimport _json\r\nimport requests\r\nfrom urllib.parse import urlencode, quote_plus\r\n\r\n#----------For iee site----------#\r\nOUTPUT = './Data/ResearchPapers.html'\r\nURL1 = 'https://ieeexplore.ieee.org/rest/search'\r\nHEADERS1 = {\r\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0',\r\n 'Accept': 'application/json, text/plain, */*',\r\n 'Accept-Language': 'en-US,en;q=0.5',\r\n 'Referer': 'https://ieeexplore.ieee.org/search/searchresult.jsp?newsearch=true&queryText=security',\r\n 'Content-Type': 'application/json',\r\n 'Connection': 'keep-alive',\r\n 'Cache-Control': 'max-age=0',\r\n}\r\n#----------For science open site----------#\r\nFILENAME2 = './Data/ResearchData.json'\r\nURL2 = 'https://www.scienceopen.com/search-servlet'\r\nHEADERS2 = {\r\n 'Connection':'keep-alive',\r\n 'Accept':'*/*',\r\n 'Origin':'https://www.scienceopen.com',\r\n 'X-Requested-With': 'XMLHttpRequest',\r\n 'Sec-Fetch-Dest': 'empty',\r\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)',\r\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\r\n 'Sec-Fetch-Site': 'same-origin',\r\n 'Sec-Fetch-Mode': 'cors',\r\n 'Referer': 'https://www.scienceopen.com/search',\r\n 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8'\r\n}\r\nDATA2 = {\r\n 'kind':'61',\r\n 'itemsToGet':'10',\r\n 'firstItemIndex':'0',\r\n 'getFacets':'false',\r\n 'getFilters':'true',\r\n 'search':'{SUB}',\r\n}\r\nSUB = {\r\n \"v\":\"3\",\r\n \"id\":\"\",\r\n \"isExactMatch\":\"true\",\r\n \"context\":\"null\",\r\n \"kind\":\"77\",\r\n \"order\":\"0\",\r\n \"orderLowestFirst\":\"false\",\r\n \"query\":\"{QUERY}\"\r\n }\r\n\r\nf = open(OUTPUT,'w')\r\n# IEEE #\r\ndef ieeeSite(TOPIC):\r\n print(\"Getting Research: IEEE\")\r\n DATA1 ='{\"newsearch\":true,\"queryText\":\"'+TOPIC+'\",\"highlight\":true,\"returnFacets\":[\"ALL\"],\"returnType\":\"SEARCH\"}'\r\n response = requests.post(url = URL1, headers = HEADERS1, data = DATA1)\r\n response=response.json()\r\n i=0\r\n f.write('\\n\\n\\n

Research Papers

\\n

IEEE


') \r\n for article in response['records']:\r\n i = i+1\r\n if i == 20:\r\n break;\r\n f.write('
    ' + article['articleTitle'] + '

')\r\n\r\n# Sciece open #\r\ndef parseData():\r\n fp = open(FILENAME2)\r\n obj = json.load(fp)\r\n f.write('\\n')\r\n f.write('

Science Open


')\r\n for i in obj['result']['results']:\r\n f.write(f'
    {i[\"_titleSafe\"]}

')\r\n f.write('')\r\n\r\ndef scOpen(QUERY):\r\n print(\"Getting Research: ScienceOpen\")\r\n TEST = f\"q=%7B%22kind%22%3A61%2C%22itemsToGet%22%3A20%2C%22firstItemIndex%22%3A0%2C%22getFacets%22%3Afalse%2C%22getFilters%22%3Atrue%2C%22search%22%3A%7B%22v%22%3A3%2C%22id%22%3A%22%22%2C%22isExactMatch%22%3Atrue%2C%22context%22%3Anull%2C%22kind%22%3A77%2C%22order%22%3A0%2C%22orderLowestFirst%22%3Afalse%2C%22query%22%3A%22{QUERY}%22%2C%22filters%22%3A%5B%7B%22kind%22%3A86%2C%22offset%22%3A1%2C%22timeUnit%22%3A5%2C%22%24timezoneOffset%22%3A-19800000%7D%5D%7D%7D\"\r\n r = requests.post(url = URL2 , headers = HEADERS2 , params = TEST )\r\n with open(FILENAME2, \"w\") as f2:\r\n x = r.content.decode()\r\n f2.write(x)\r\n parseData()\r\n f.close()\r\n","sub_path":"R_Papers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88723655","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom eminer import views\n\n\nurlpatterns = patterns('',\n url(r'^$', views.BlogView, name = 'home'),\n url(r'^about/$', views.About, name = 'about'),\n url(r'^addpost/$', views.AddPost, name = 'add_post'),\n url(r'^publisherview/$', views.PublisherView, name = 'publisher-view'),\n url(r'^addtag/$', views.AddTag, name = 'add_tag'),\n\n url(r'^publish/(?P[\\w\\-]+)/$', views.PublishView, name = 'publish'),\n url(r'^(?P[\\w\\-]+)/$', views.BlogView, name = 'entry-detail'),\n url(r'^edit/(?P[\\w\\-]+)/$', views.AddPost, name = 'edit-post'),\n url(r'^pages/(?P[0-9])/$', views.BlogView, name = 'page-detail'),\n url(r'^tags/(?P[\\w\\-]+)/$', views.BlogView, name = 'tag-detail'),\n)\n","sub_path":"eminer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"348318603","text":"from django.shortcuts import render\nfrom .forms import DriverProfileForm,CarProfileForm\n\n# Create your views here.\n\ndef driveprofile(request):\n current_user = request.user\n if request.method == 'POST':\n form = DriverProfileForm(request.POST,request.FILES)\n if form.is_valid():\n driver = form.save(commit=False)\n driver.user = current_user\n driver.save()\n else:\n form = DriverProfileForm()\n return render(request,'profile/driverprof.html',{\"form\":form})\n\n\ndef carprofile(request):\n current_user = request.user\n if request.method == 'POST':\n form = CarProfileForm(request.POST,request.FILES)\n if form.is_valid():\n car = form.save(commit=False)\n car.user = current_user\n car.save()\n else:\n form = CarProfileForm()\n return render(request,'profile/car.html',{\"form\":form})\n","sub_path":"driver/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"336466119","text":"# -*- coding: utf-8 -*-\n'''\n\t1.自变量(一个或几个)对因变量的预测程度,\n\n\t1.首先构建一下整个流程,然后再放入实际的数据\n\t\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy as sp\nfrom scipy.stats import norm\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import linear_model\nfrom newstool import readMetrix\nBasic = 'D:/点击这里/newsAnalysis/'\nGDP_path = Basic+ 'GDP per capita, PPP (con 2011)_no somalia.csv'\nVisits_path = Basic+ 'visit_no somalia.csv'\n'''\nGDP = readMetrix(GDP_path)\nVisits = readMetrix(Visits_path)\n\nfor i in range(len(GDP)):\n\tfor j in range(len(GDP[i])):\n\t\tGDP[i][j] = float(GDP[i][j].replace('\\ufeff',''))\n\nfor i in range(len(Visits)):\n\tfor j in range(len(Visits[i])):\n\t\tVisits[i][j] = float(Visits[i][j].replace('\\ufeff',''))\n'''\nx0 = [211214.5972,219203.1162,222580.0917,229536.9915,243735.5084,256699.7178,266907.4392,281899.8367,295495.2635,293609.8621,295585.8461,285177.8208,279568.3489,282533.698,285636.1472,279656.0303]\n#x0 = [113,101,108,82,116,106,154,131,98,98,141,131,110,74,77,102]\n#工程量\ny0 = [0.003056722,0.004249758,0.005057041,0.007253444,0.010632622,0.016987824,0.025999634,0.034510026,0.055069151,0.078352505,0.099910759,0.100723869,0.113864837,0.133540444,0.147717208,0.152761534]\ny = np.array(y0)\n''' 数据生成 '''\n'''\nx = np.arange(0, 1, 0.002)\nprint(str(x))\ny = norm.rvs(0, size=500, scale=0.1)\nprint(str(y))\ny = y + x**2\n\ny0 = [sum(gp) for gp in GDP]\nprint([i/sum(y0) for i in y0])\ny = np.array([i/sum(y0) for i in y0])\nx0 = [sum(vt) for vt in Visits]\n'''\nx = np.array([i/sum(x0) for i in x0])\n\n''' 均方误差根 '''\ndef rmse(y_test, y):\n return sp.sqrt(sp.mean((y_test - y) ** 2))\n\n''' 与均值相比的优秀程度,介于[0~1]。0表示不如均值。1表示完美预测.这个版本的实现是参考scikit-learn官网文档 '''\ndef R2(y_test, y_true):\n return 1 - ((y_test - y_true)**2).sum() / ((y_true - y_true.mean())**2).sum()\n\n''' 这是Conway&White《机器学习使用案例解析》里的版本 '''\ndef R22(y_test, y_true):\n y_mean = np.array(y_true)\n y_mean[:] = y_mean.mean()\n return 1 - rmse(y_test, y_true) / rmse(y_mean, y_true)\n\nplt.scatter(x, y, s=5)\ndegree = [1]\ny_test = []\ny_test = np.array(y_test)\n\nfor d in degree:\n clf = Pipeline([('poly', PolynomialFeatures(degree=d)),\n ('linear', LinearRegression(fit_intercept=False))])\n clf.fit(x[:, np.newaxis], y)\n y_test = clf.predict(x[:, np.newaxis])\n\n print(clf.named_steps['linear'].coef_)\n print('rmse=%.2f, R2=%.2f, R22=%.2f, clf.score=%.2f' %\n (rmse(y_test, y),\n R2(y_test, y),\n R22(y_test, y),\n clf.score(x[:, np.newaxis], y))) \n \n plt.plot(x, y_test, linewidth=2)\n \nplt.grid()\nplt.legend(['1','2','100'], loc='upper left')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"regressionPredicct.py","file_name":"regressionPredicct.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63997084","text":"# Copyright (c) 2016 Uber Technologies, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals\n)\n\nimport socket\nimport time\n\nfrom tchannel import TChannel\nfrom tornado import gen\n\nfrom yarpc import Response, Request\nfrom yarpc.encoding import json\nfrom yarpc.transport import Channel\nfrom yarpc.transport.http import HTTPOutbound\nfrom yarpc.transport.tchannel import TChannelOutbound\n\nhostname = socket.gethostname()\n\n\n@json.procedure('phone')\n@gen.coroutine\ndef phone(request):\n start = time.time()\n\n # enforce request shape\n if (\n not request.body.get('transport') or\n not request.body.get('service') or\n not request.body.get('procedure')\n ):\n raise ValueError(\n 'transport, service, and procedure are '\n 'required request params'\n )\n\n # create outbound dynamically based on request\n t = request.body['transport']\n if 'http' in t:\n http = t['http']\n outbound = HTTPOutbound(\n url='http://%s:%d' % (http['host'], http['port']),\n )\n elif 'tchannel' in request.body['transport']:\n tch = t['tchannel']\n outbound = TChannelOutbound(\n tchannel=TChannel('yarpc-test'),\n hostport='%s:%d' % (tch['host'], tch['port']),\n )\n else:\n raise ValueError(\n \"no transport available for: %s\" %\n request.body['transport']\n )\n\n # create a client with a custom channel\n # based on the previously created outbound\n # TODO support arbitrary bytes and encodings\n client = json.JSONClient(Channel(\n caller='yarpc-test',\n service=request.body['service'],\n outbound=outbound,\n ))\n answer = yield client.call(Request(\n procedure=request.body['procedure'],\n body=request.body,\n ttl=10000,\n ))\n\n # respond with answer from downstream service\n response = Response(body={\n 'service': request.body['service'],\n 'procedure': request.body['procedure'],\n 'hostname': hostname,\n 'elapsedms': to_millis(time.time() - start),\n 'body': answer.body,\n })\n\n raise gen.Return(response)\n\n\ndef to_millis(time):\n return int(round(time * 1000))\n","sub_path":"crossdock/async/server/procedure/phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"594832433","text":"import random\n\na = []\nb = 1\n\nfor i in range(100000):\n a.append(random.randint(0, 1))\n\na_string = [str(i) for i in a]\na_numb = int(\"\".join(a_string))\n\nwhile str(b) in str(a_numb):\n b = str(b) + '1'\n\nprint(len(str(b)) - 1)\n\nprint(f'Одиниць — {a.count(1)}')\nprint(f'Нулів — {a.count(0)}')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"252775771","text":"# Chapter 5: Data Displays and Grids\r\n# Recipe 1: Displaying lists of data\r\n#\r\nimport wx\r\n\r\nclass BaseList(wx.ListCtrl):\r\n def __init__(self, parent):\r\n super(BaseList, self).__init__(parent, \r\n style=wx.LC_REPORT)\r\n\r\n self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRClick)\r\n self.Bind(wx.EVT_MENU, self.OnMenu, id=wx.ID_COPY)\r\n self.Bind(wx.EVT_MENU, self.OnMenu, id=wx.ID_SELECTALL)\r\n\r\n def OnRClick(self, event):\r\n menu = wx.Menu()\r\n menu.Append(wx.ID_COPY)\r\n menu.Append(wx.ID_SELECTALL)\r\n self.PopupMenu(menu)\r\n menu.Destroy()\r\n\r\n def OnMenu(self, event):\r\n if event.Id == wx.ID_COPY:\r\n self.Copy()\r\n elif event.Id == wx.ID_SELECTALL:\r\n self.SelectAll()\r\n else:\r\n event.Skip()\r\n\r\n def Copy(self):\r\n \"\"\"Copy selected data to clipboard\"\"\"\r\n text = self.GetSelectedText()\r\n data_o = wx.TextDataObject()\r\n data_o.SetText(text)\r\n if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open():\r\n wx.TheClipboard.SetData(data_o)\r\n wx.TheClipboard.Flush()\r\n wx.TheClipboard.Close()\r\n\r\n def GetSelectedText(self):\r\n items = list()\r\n nColumns = self.ColumnCount\r\n for item in range(self.ItemCount):\r\n if self.IsSelected(item):\r\n items.append(self.GetRowText(item))\r\n text = \"\\n\".join(items)\r\n return text\r\n\r\n def GetRowText(self, idx):\r\n txt = list()\r\n for col in range(self.ColumnCount):\r\n txt.append(self.GetItemText(idx, col))\r\n return \"\\t\".join(txt)\r\n\r\n def SelectAll(self):\r\n \"\"\"Select all items\"\"\"\r\n for item in range(self.ItemCount):\r\n self.Select(item, 1)\r\n\r\nclass PersonnelList(BaseList):\r\n def __init__(self, parent):\r\n super(PersonnelList, self).__init__(parent)\r\n\r\n # Add column headers\r\n self.InsertColumn(0, \"ID\")\r\n self.InsertColumn(1, \"Name\")\r\n self.InsertColumn(2, \"Email\")\r\n self.InsertColumn(3, \"Phone#\")\r\n \r\n def AddEmployee(self, id, name, email, phone):\r\n item = self.Append((id, name, email, phone))\r\n\r\nclass MyFrame(wx.Frame):\r\n def __init__(self, parent, title):\r\n super(MyFrame, self).__init__(parent, title=title)\r\n\r\n self._list = PersonnelList(self)\r\n \r\n # add some data\r\n self._list.AddEmployee(\"123\", \"Frank\", \"f@email.com\", \"555-1234\")\r\n self._list.AddEmployee(\"124\", \"Jane\", \"j@email.com\", \"555-1434\")\r\n self._list.AddEmployee(\"125\", \"Thor\", \"t@email.com\", \"555-1274\")\r\n\r\nclass MyApp(wx.App):\r\n def OnInit(self):\r\n self.frame = MyFrame(None, title=\"Displaying lists of data\")\r\n self.frame.Show();\r\n return True\r\n\r\nif __name__ == \"__main__\":\r\n app = MyApp(False)\r\n app.MainLoop()\r\n","sub_path":"Chapter 5/02/baseList.py","file_name":"baseList.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28390535","text":"#!/usr/bin/env python3\nimport tkinter as tk\nimport time\n\nroot = tk.Tk()\n\n\nclass TimeCounter(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack()\n self.init_val()\n self.create_widgets()\n self.count_time()\n\n def init_val(self):\n \"\"\"init_val\"\"\"\n # self.time_delta = timedelta()\n # self.second_delta = timedelta(seconds=1)\n self.time_delta = 0\n\n # self.trigle_date_val = tk.StringVar(\n # value=datetime.today().strftime(\"%Y-%m-%d 周%w\")\n # )\n # self.trigle_time_val = tk.StringVar(value=datetime.today().strftime(\"%H:%M:%S\"))\n self.trigle_datetime_val = tk.StringVar(\n value=time.strftime(\"trigled in %H:%M 周%w %Y-%m-%d \", time.localtime())\n )\n self.counted_time_val = tk.StringVar()\n\n def create_widgets(self):\n \"\"\"create_widgets\"\"\"\n\n # self.master.geome\n # self.trigle_date_label = tk.Label(\n # self, textvariable=self.trigle_date_val, font=(\"\", 10)\n # )\n # self.trigle_time_label = tk.Label(\n # self, textvariable=self.trigle_time_val, font=(\"\", 10)\n # )\n self.trigle_datetime_label = tk.Label(\n self, textvariable=self.trigle_datetime_val, font=(\"\", 10)\n )\n self.counted_time_label = tk.Label(\n self, textvariable=self.counted_time_val, font=(\"\", 80)\n )\n\n self.counted_time_label.pack(expand=1)\n self.trigle_datetime_label.pack()\n # self.trigle_date_label.pack(expand=0)\n # self.trigle_time_label.pack(expand=0)\n\n def count_time(self):\n \"\"\"count_time\"\"\"\n self.time_delta += 1\n self.counted_time_val.set(time.strftime(\"%H:%M:%S\", time.gmtime(self.time_delta)))\n\n self.master.after(1000, self.count_time)\n\n\n\"\"\"程序入口\"\"\"\n\n\n# def main():\n# global trigle_date, counted_time\n# root.title(\"珍惜时间\")\n# trigle_date = tk.StringVar()\n# counted_time = tk.StringVar()\n\n# trigle_date.set(cur_date)\n\n# l2 = tk.Label(root, textvariable=trigle_date, font=(\"Arial\", 20))\n# l1 = tk.Label(root, textvariable=counted_time, font=(\"\", 50))\n\n# l2.pack(expand=1)\n# l1.pack(expand=1)\n\n# count_time()\n\n# # panel1 = tk.Frame(root)\n# # panel1.pack(expand=1)\n\n# root.mainloop()\n\n\n\"\"\"程序入口\"\"\"\n\nif __name__ == \"__main__\":\n # main()\n app = TimeCounter(master=root)\n app.mainloop()","sub_path":"bin/python3-time_counter-tk.py","file_name":"python3-time_counter-tk.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120430039","text":"from elasticsearch import Elasticsearch\nfrom elasticsearch.exceptions import RequestError, NotFoundError\n\n\ndef truncate_date(date_string):\n return date_string[:-3]\n\n\nes = Elasticsearch(\n ['f33c3501de664b0b4d7f2b2116decf45.us-east-1.aws.found.io'],\n http_auth=('elastic', '0HhUVH98eQv0DspEuDLxGDs8'),\n port=9243,\n use_ssl=True\n)\n\n\ndef migrate(batches=None, batch_size=5):\n\n if batches is None:\n batches = es.count('test_bad_date', 'record')['count']\n query = dict(\n query=dict(\n match_all=dict()\n ),\n size=batch_size\n )\n\n for n in range(batches):\n try:\n query_response = es.search('test_bad_date', 'record', body=query)\n\n for hit in query_response['hits']['hits']:\n\n body = hit['_source']\n try:\n body['gooddate'] = truncate_date(body['baddate'])\n except KeyError:\n pass\n\n reindex_response = es.index('test_good_date', doc_type=hit['_type'], body=body, id=hit['_id'])\n try:\n delete_original = es.delete('test_bad_date', doc_type=hit['_type'], id=hit['_id'])\n except NotFoundError:\n print('wtf doc not found for deletion?', hit['_id'])\n\n print('old index: ',\n es.count('test_bad_date', 'record')['count'],\n ' || new index: ',\n es.count('test_good_date', 'record')['count'])\n\n except:\n # Debug\n print(query_response)\n print(hit)\n print(body)\n print(reindex_response)\n print(delete_original)\n raise OSError\n","sub_path":"reindex_on_edge_node/get_transform_put.py","file_name":"get_transform_put.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"567975937","text":"import argparse\nimport csv\nimport os\nimport re\nfrom collections import defaultdict\nfrom itertools import combinations\n\nfrom Bio import SeqIO\n\n\n### parse the fasta files\ndef parseFasta(fasta):\n if os.path.exists(fasta):\n Seq=[str(i.seq) for i in SeqIO.parse(fasta, 'fasta')][0]\n ## create a tau dict pos id is key amino acid is string value\n print('FOUND 1 FASTA WITH {} AA'.format(len(Seq)))\n posDict = defaultdict()\n for n, aa in enumerate(Seq):\n posDict[(n+1, aa)] = set()\n return posDict, Seq\n\n\n## make ptm dict\ndef parsePTMs(ptm, posDict):\n with open(ptm, 'r') as ptmFile:\n reader = csv.reader(ptmFile, delimiter='\\t')\n next(reader)\n for row in reader:\n n, aa, p = row[:3]\n #print(len(row))\n key=(int(n), aa)\n print(key)\n if key in posDict:\n posDict[key].add(p)\n return posDict\n\n\n## main call \ndef makePeptides(posDict, mer, overlap, out, Seq):\n ##intitalize params\n combTrac = 0\n pepLen = mer\n span = pepLen-overlap\n outFile = open(out+'.csv', 'w')\n header = ['{}Mer'.format(pepLen), 'Start', 'End']\n header.extend(['P{}'.format(i) for i in range(1, pepLen+1)])\n header.extend(['NCombinations'])\n outFile.write(','.join(header)+'\\n')\n for i in range(0, len(Seq), span):\n startPos = i+1\n endPos = startPos+(pepLen-1)\n kmer = Seq[i:i+pepLen]\n if len(kmer) == pepLen:\n #break\n print(startPos, 'to ', endPos, ' ', Seq[i:i+pepLen], i)\n ptmList =[]\n combPTM = []\n for j in range(startPos, endPos+1): ##change pointer \n key = (int(j), Seq[j-1])\n if key in posDict:\n ptmOut=[i for i in posDict.get(key)]\n combPTM.extend(ptmOut)\n ptms='|'.join(ptmOut)\n if ptms:\n ptmList.append('{}'.format(ptms))\n else:\n ptmList.append('{}'.format(str('')))\n else:\n ptmList.append('{}'.format(str('')))\n #print(ptmList) \n #print(combPTM)\n outFile.write('{},{},{},'.format(kmer, startPos, endPos))\n if combPTM: ##calculate combinations needed \n combins = len(combPTM)\n totalN=(combins*combins)\n combTrac += totalN\n outFile.write(','.join(ptmList)+',{}\\n'.format(totalN))\n else:\n outFile.write(','.join(ptmList)+',{}\\n'.format(1))\n #combTrac += 1\n outFile.close()\n print('OUT WRITTEN {} WITH TOTAL COMB {}'.format(out, combTrac)) \n\n\n##create combination vector\n#kmer = list('SLPTPPTREPK')\ndef combPeptides(posList, kmer):\n #posList = ['', '', '', '', '', 'Gly|N6-acK', 'N6-acK', '', '', '', '']\n #kmer = ['P', 'G', 'G', 'G', 'N', 'K', 'K', 'I', 'E', 'T', 'H'] \n #print(posList, kmer)\n posDict=[]\n for n, pos in enumerate(posList):\n if pos:\n for ptm in pos.split('|'):\n posDict.append((ptm, n))\n nComb = len(posDict)+1\n posIndex = []\n outKmer = set()\n for i in range(1, nComb):\n for sub in combinations(posDict, i): \n #print(sub)\n kmerCp = kmer.copy()\n posIndex.append(sub)\n for item in sub:\n ptm, pos = item\n #print(ptm, pos)\n makePTM=kmerCp[pos]\n if not re.search(\"\\\\[\", makePTM): #if a ptm has already been assigned at the same position do nothing\n motif='{}[{}]'.format(makePTM, ptm)\n kmerCp[pos] = motif\n outKmer.add(''.join(kmerCp))\n return outKmer\n\n\ndef makePeptidelist(out):\n pepOut=open('{}_PEPLIST.csv'.format(out), 'w')\n writer = csv.writer(pepOut)\n writer.writerow(['PEP', 'START', 'END', 'BASE'])\n nComb =0\n with open(out+'.csv') as pepFile:\n reader = csv.reader(pepFile)\n next(reader)\n for row in reader:\n pep, st, end= row[:3]\n nComb += 1\n writer.writerow([pep, st, end, '*'])\n ptmCheck = '\\t'.join(row[3:14])\n if ptmCheck:\n ptmList = row[3:14]\n print('MAKING PTM COMBINATIONS {}'.format(pep))### implement bool check for ptms here ?\n kmer = list(pep)\n outKmer = combPeptides(posList=ptmList, kmer=kmer)\n nComb += len(outKmer)\n for pep in outKmer:\n writer.writerow([pep, st, end, ''])\n print('BUILT {} COMBINATIONS'.format(nComb))\n print('FINISHED WRITE TO {}'.format(pepOut))\n pepOut.close()\n\n#makePeptidelist(out='outputs/peptideList.csv')\n\n#\"-ptm\", \"data/PTMList_DANIEL.txt\"\ndef main():\n parser = argparse.ArgumentParser('A script to make user defined peptide mers with an option for overlap, additionally tag ptms')\n parser.add_argument('-fasta', required=True, help='FASTA file of protein sequence to be processed')\n parser.add_argument('-ptm', required=False, help='A tab delimited PTM list should have pos aminoacid and ptm as first three columns')\n parser.add_argument('-mer', required=True, help='total length of the kmer', type=int)\n parser.add_argument('-overlap', required=True, help='overlap needed', type=int)\n parser.add_argument('-out', required=True, help='file to write output')\n args = parser.parse_args()\n print(args)\n fasta = args.fasta\n mer = args.mer\n overlap = args.overlap\n out = args.out\n if args.ptm is not None:\n ptm = args.ptm\n posDict, Seq = parseFasta(fasta)\n posDict=parsePTMs(posDict=posDict, ptm=ptm)\n makePeptides(posDict, mer, overlap, out, Seq)\n makePeptidelist(out)\n else:\n posDict, Seq = parseFasta(fasta)\n makePeptides(posDict, mer, overlap, out, Seq)\n\nif __name__ == '__main__':main()\n#python3 scripts/parseTau.py -fasta data/tauMain.fasta -ptm data/PTMList_DANIEL.txt -mer 11 -overlap 1 -out outputs/peptideList_11mer_1overlap\n#python3 scripts/parseTau.py -fasta data/tauMain.fasta -ptm data/PTMList_DANIEL.txt -mer 11 -overlap 5 -out outputs/peptideList_11mer_5overlap","sub_path":"scripts/parseProt.py","file_name":"parseProt.py","file_ext":"py","file_size_in_byte":6230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574785874","text":"from bs4 import BeautifulSoup as BS\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\n\n\nclass ParimatchParser:\n def __init__(self):\n self.url = 'https://www.parimatch.ru/live'\n self.vis_browser = True\n self.browser = None\n self.main_page_load = False\n self.games_found = False\n self.start_work_time = 0\n self.browser_match = None\n self.count = 0\n\n def open_browser(self):\n options = Options()\n options.headless = self.vis_browser\n self.browser = webdriver.Firefox(options=options)\n\n def open_browser_match(self):\n options = Options()\n options.headless = self.vis_browser\n self.browser_match = webdriver.Firefox(options=options)\n\n def get_main_page(self):\n if not self.browser:\n self.open_browser()\n self.browser.get(self.url)\n content = self.browser.page_source\n soup = BS(content, 'lxml')\n while not soup.select('main-markets'):\n time.sleep(0.1)\n content = self.browser.page_source\n soup = BS(content, 'lxml')\n if not soup.select('.live-group-item.sportcolor-bg-B'):\n self.games_found = False\n return\n element = self.browser.find_element_by_css_selector('.live-group-item.sportcolor-bg-B')\n self.games_found = True\n self.browser.execute_script(\"arguments[0].scrollIntoView();\", element)\n while True:\n live_blocks = element.find_elements_by_css_selector('.live-block-column.live-block-column_data')\n hrefs = []\n for live_block in live_blocks:\n if live_block.get_attribute(\"href\") == 'https://www.parimatch.ru/null':\n self.browser.execute_script(\"arguments[0].scrollIntoView();\", live_block)\n hrefs.append('https://www.parimatch.ru/null')\n if not 'https://www.parimatch.ru/null' in hrefs:\n self.start_work_time = time.time()\n self.main_page_load = True\n break\n\n def get_events(self):\n if not self.main_page_load:\n self.get_main_page()\n if time.time() - self.start_work_time > 180:\n self.get_main_page()\n if not self.games_found:\n return []\n content = self.browser.page_source\n soup = BS(content, 'lxml')\n with open('parimatch.html', 'w', encoding='utf8') as html_file:\n html_file.write(str(soup))\n main_block = soup.select('.live-group-item.sportcolor-bg-B')\n blocks_champions = main_block[0].select('.live-block-championship')\n events_info = []\n for block_champion in blocks_champions:\n champ = block_champion.select('.championship-name-title')[0]\n champ_title = champ.text\n l_blocks = block_champion.select('.live-block-column.live-block-column_data')\n for l_block in l_blocks:\n href = l_block['href']\n commands = l_block.select('.competitor-name')\n commands = commands[0].select('span')\n command1 = commands[0].text\n command2 = commands[1].text\n time_match = l_block.select('.live-block-sore')[0].text\n total_score = l_block.select('.live-score-box__total')\n if not total_score:\n total_score1 = 0\n total_score2 = 0\n else:\n total_score = total_score[0].select('span')\n total_score1 = total_score[0].text\n total_score2 = total_score[1].text\n scores = l_block.select('.live-score-box__set')\n scores1 = [score.select('span')[0].text for score in scores]\n scores2 = [score.select('span')[1].text for score in scores]\n event_info = {\n 'href': href,\n 'champ': champ_title,\n 'command1': command1,\n 'command2': command2,\n 'total_score1': int(total_score1),\n 'total_score2': int(total_score2),\n 'scores_1': scores1,\n 'scores_2': scores2,\n 'time': time_match,\n }\n events_info.append(event_info)\n return events_info\n\n def get_current_urls(self, browser):\n if browser:\n current_urls = []\n for page in browser.window_handles:\n browser.switch_to.window(page)\n current_urls.append(browser.current_url)\n return current_urls\n\n def value(self):\n content = self.browser_match.page_source\n soup = BS(content, 'lxml')\n event_markets = soup.select('.event-market')\n value_main = {}\n t_ot_m = []\n t_ot_s = []\n t_it_m_1 = []\n t_it_s_1 = []\n t_it_m_2 = []\n t_it_s_2 = []\n command1 = soup.select('.scoreboard__name')[0].text\n command2 = soup.select('.scoreboard__name')[1].text\n for event in event_markets:\n if event.select('.event-market__title')[0].text.replace(' ', '') == 'Тотал':\n total_all = event.select('.event-outcome-group')\n for total in total_all:\n _t_ot_m = {'points': total.select('.event-outcome-group-head')[0].text,\n 'coef': total.select('.event-outcome__value')[0].text}\n _t_ot_s = {'points': total.select('.event-outcome-group-head')[0].text,\n 'coef': total.select('.event-outcome__value')[1].text}\n t_ot_m.append(_t_ot_m)\n t_ot_s.append(_t_ot_s)\n if event.select(\".event-market__title\")[0].text.replace(' ',\n '') == f'Индивидуальныйтотал{command1}'.replace(' ',\n ''):\n total_all = event.select('.event-outcome-group')\n for total in total_all:\n _t_it_m_1 = {'points': total.select('.event-outcome-group-head')[0].text,\n 'coef': total.select('.event-outcome__value')[0].text}\n _t_it_s_1 = {'points': total.select('.event-outcome-group-head')[0].text,\n 'coef': total.select('.event-outcome__value')[1].text}\n t_it_m_1.append(_t_it_m_1)\n t_it_s_1.append(_t_it_s_1)\n if event.select(\".event-market__title\")[0].text.replace(' ',\n '') == f'Индивидуальныйтотал{command2}'.replace(' ',\n ''):\n total_all = event.select('.event-outcome-group')\n for total in total_all:\n _t_it_m_2 = {'points': total.select('.event-outcome-group-head')[0].text,\n 'coef': total.select('.event-outcome__value')[0].text}\n _t_it_s_2 = {'points': total.select('.event-outcome-group-head')[0].text,\n 'coef': total.select('.event-outcome__value')[1].text}\n t_it_m_2.append(_t_it_m_2)\n t_it_s_2.append(_t_it_s_2)\n value_main['total_total'] = {'more': t_ot_m, 'smaller': t_ot_s}\n value_main['individ_total_1'] = {'more': t_it_m_1, 'smaller': t_it_s_1}\n value_main['individ_total_2'] = {'more': t_it_m_2, 'smaller': t_it_s_2}\n return value_main\n\n def loading_page(self,url):\n start_time_try = time.time()\n while True:\n if time.time() - start_time_try > 5:\n self.browser_match.get(url)\n start_time_try = time.time()\n try:\n elements = self.browser_match.find_elements_by_css_selector('.event-outcome__value')\n if elements:\n if not False in [False for el in elements if not el.text]:\n return\n except NoSuchElementException:\n print('except')\n pass\n\n def get_value(self, href):\n url = 'https://www.parimatch.ru' + href\n if not self.browser_match:\n self.open_browser_match()\n self.browser_match.get(url)\n self.loading_page(url)\n while True:\n try:\n value_main = self.value()\n return value_main\n except Exception:\n self.browser_match.get(url)\n self.loading_page(url)\n\n\nif __name__ == \"__main__\":\n parser = ParimatchParser()\n parser.get_events()\n while True:\n print(parser.get_events())\n\n","sub_path":"parimatch.py","file_name":"parimatch.py","file_ext":"py","file_size_in_byte":9111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113024650","text":"from os.path import join, expanduser\nfrom time import sleep\nimport subprocess\nimport sys\nsys.path.append(join(expanduser('~'), 'morphMod'))\n\nimport pbsGridWalker.grid as gr\nimport pbsGridWalker.tools.algorithms as tal\nimport pbsGridWalker.tools.fsutils as tfs\n\nimport morphModRoutes as mmr\nimport classifiers\nimport gctools\nimport gccommons\n\n# Tunable hyperparameters\nnumTrials = 100\nsegments = 3\n# Optional definitions for pbsGridWalker that depend on the number of segments\npointsPerJob = 20\nmaxJobs = 8\nqueue = 'shortq'\nexpectedWallClockTime = '03:00:00'\n\n# Constant hyperparameters\nevsDefaults = {'individual': 'compositeFixedProbabilities', 'evolver': 'cluneSimplifiedMorphologyControlIndividuals', 'communicator': 'chunkedUnixPipe',\n 'compositeClass0': 'integerVectorSymmetricRangeMutations', 'probabilityOfMutatingClass0': 0.2,\n 'lengthClass0': segments, 'initLowerLimitClass0': 0, 'initUpperLimitClass0': segments, 'lowerCapClass0': 0, 'upperCapClass0': segments,\n 'mutationAmplitudeClass0': 1,\n 'compositeClass1': 'trinaryWeights',\n 'lengthClass1': 2*(segments**2), 'initLowerLimitClass1': -1, 'initUpperLimitClass1': 1, 'lowerCapClass1': -1, 'upperCapClass1': 1,\n 'mutExplorationClass1': 0.8, 'mutInsDelRatioClass1': 1, 'mutationAmplitudeClass1': 1,\n 'genStopAfter': 600, 'populationSize': 50,\n 'initialPopulationType': 'random', 'secondObjectiveProbability': 1.,\n 'logParetoFront': 'yes', 'logBestIndividual': 'yes', 'logParetoFrontKeepAllGenerations': 'yes', 'logParetoFrontPeriod': 5, 'logParetoSize': 'yes',\n 'backup': 'no', 'trackAncestry': 'no',\n 'randomSeed': 0}\narrowbotsDefaults = {'segments': segments, 'sensorAttachmentType': 'variable',\n 'simulationTime': 10., 'timeStep': 0.1,\n 'integrateError': 'false', 'writeTrajectories': 'false'}\narrowbotInitialConditions = [[0]*segments]*segments # segmentsXsegments null matrix\narrowbotTargetOrientations = [ [1 if i==j else 0 for i in range(segments)] for j in range(segments) ] # segmentsXsegments identity matrix\n# Optional definitions for pbsGridWalker that are constant\ninvolvedGitRepositories = mmr.involvedGitRepositories\n# dryRun = False\n\n### Required pbsGridWalker definitions\ncomputationName = 'rateSwipe_N' + str(segments)\n\nnonRSGrid = gr.LinGrid('probabilityOfMutatingClass0', 0.0, 0.05, 0, 20) * \\\n gr.Grid1d('compositeClass0', ['integerVectorSymmetricRangeMutations', 'integerVectorRandomJumps'])\nparametricGrid = nonRSGrid*numTrials + gr.Grid1dFromFile('randomSeed', mmr.randSeedFile, size=len(nonRSGrid)*numTrials)\n\nfor par in parametricGrid.paramNames():\n\tevsDefaults.pop(par)\n\ndef prepareEnvironment(experiment):\n\tgccommons.prepareEnvironment(experiment)\n\ndef runComputationAtPoint(worker, params):\n\treturn gccommons.runComputationAtPoint(worker, params,\n\t\tevsDefaults, arrowbotsDefaults,\n\t\tarrowbotInitialConditions,\n\t\tarrowbotTargetOrientations)\n\ndef processResults(experiment):\n\timport os\n\timport numpy as np\n\timport pbsGridWalker.tools.plotutils as tplt\n\ttfs.makeDirCarefully('results', maxBackups=100)\n\n\t# We'll take a look at some parameters vs relative mutation rate at several stages (generation counts) along the evolutionary process\n\n\t# Linear stages\n\tstagesToConsider = 5\n\tstages = tal.splitIntegerRangeIntoStages(0, evsDefaults['genStopAfter'], stagesToConsider)\n\n\t##### Extracting and plotting the distance to the maximally modular morphology (MMM) for various values relative mutation rate #####\n\t# mmmmdist and similar abbreviations stand for \"minimal distance to the maximally modular morphology\" (across the Pareto front)\n\n\txlabel = r'$P_{mm}$'\n\tfieldNames = [ 'gen {}'.format(st) for st in stages ]\n\n\tdef generateMinMMMDistTimeSlices(gridPoint):\n\t\treturn [ gctools.minParetoFrontHammingDistanceToMMM(gen) for gen in stages ]\n\ttplt.plotComputationVariableAgainstParameter(experiment, 'mmmmd', generateMinMMMDistTimeSlices, 'probabilityOfMutatingClass0',\n\t fieldNames=fieldNames, xlabel=xlabel, ylabel=r'$\\mu$', marker='*')\n\n\tdef generateFitnessTimeSlices(gridPoint):\n\t\tbestIndividualData = np.loadtxt('bestIndividual{}.log'.format(gridPoint['randomSeed']))\n\t\tfitnessData = []\n\t\tfor genRec in range(bestIndividualData.shape[0]):\n\t\t\tgen = int(bestIndividualData[genRec,0])\n\t\t\tif gen in stages:\n\t\t\t\tfitnessData.append(bestIndividualData[genRec,1]) # WILL break if the best individual records are not in the order of increasing generation\n\t\treturn fitnessData\n\ttplt.plotComputationVariableAgainstParameter(experiment, 'error', generateFitnessTimeSlices, 'probabilityOfMutatingClass0',\n\t fieldNames=fieldNames, transform=lambda x: -1.*x, yscale='lin', xlabel=xlabel, ylabel=r'$\\log_{10} E$', strips='conf95', forcedYLabelPos=[0.05,1], marker='*')\n","sub_path":"rateSwipe.py","file_name":"rateSwipe.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"591957570","text":"from PIL import Image\nimport os\nimport sys\n\n'''\nSplit a provided sprite sheet into images based on the number of rows\nand columns in the original. Should be useful for anything created via piskelapp,\nwhere we have a standardized box size for all pieces of a spritesheet\n'''\n\nimageName, outputBaseName, rows, cols = sys.argv[1:]\nrows, cols = int(rows), int(cols)\nprint(imageName, rows, cols)\n\nimage = Image.open(imageName)\nwidth, height = image.size\n\npath = '.'\n\nclipWidth, clipHeight = width // cols, height // rows\n\ncounter = 1\nfor i in range(0, height, clipHeight):\n for j in range(0, width, clipWidth):\n a = image.crop((j, i, j+clipWidth, i+clipHeight))\n a.save(os.path.join(path, '%s-%s.png' % (outputBaseName, counter)))\n counter += 1\n\n","sub_path":"imageSplitter.py","file_name":"imageSplitter.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"303939501","text":"# For this example to run, you also need the 'ica.py' file\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ica import fastica\n\n\ndef test():\n \"\"\"\n This is a combination of two unsupervised learning techniques, principal\n component analysis (PCA) and independent component analysis (ICA). PCA is\n a technique for dimensionality reduction, i.e. an algorithm to explain the\n observed variance in your data using less dimensions. ICA is a source\n separation technique, for example to unmix multiple signals that have been\n recorded through multiple sensors. Doing a PCA first and then an ICA can\n be useful if you have more sensors than signals. For more information see:\n the FastICA example from scikit-learn.\n \"\"\"\n\n data = np.random.random((5000, 100))\n u, s, v = linalg.svd(data)\n pca = np.dot(u[:, :10].T, data)\n results = fastica(pca.T, whiten=False)\n return results\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"profiling/examples/unsupervised_learning/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"518089263","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of integers\n def postorderTraversal(self, root):\n res = []\n if root is None:\n return res\n stack = []\n stack.append(root)\n prev = None\n while len(stack) > 0:\n cur = stack[-1]\n if prev is None or prev.left == cur or prev.right == cur:\n if cur.left is not None:\n stack.append(cur.left)\n elif cur.right is not None:\n stack.append(cur.right)\n elif cur.left == prev:\n if cur.right is not None:\n stack.append(cur.right)\n else:\n res.append(cur.val)\n stack.pop()\n prev = cur\n \n return res \n","sub_path":"src/main/python/lc/binary_tree_postorder_traversal.py","file_name":"binary_tree_postorder_traversal.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330235282","text":"#!/home/xlin/miniconda3/bin/python\n# -*- coding: utf-8 -*-\n \nimport logging\nimport os.path\nimport sys\nimport multiprocessing\nimport time\n\nimport pickle\nimport numpy as np\nfrom gensim.corpora import WikiCorpus\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom scipy import spatial\n\nfrom numpy import dot\nfrom numpy.linalg import norm\nimport random\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\nimport scipy\nfrom scipy.sparse import lil_matrix\nfrom scipy.sparse import coo_matrix\nfrom scipy.sparse.linalg import spsolve\nfrom distutils.log import info\n\nfrom math import floor\n\ndef getSigma(dataArray):\n N = 0\n Dmax = -1.0e9\n Dmin = 1.0e9\n Dsum = 0.0\n Dmean = 0.0\n diffArray = []\n for diffNorm in dataArray:\n diffArray.append(diffNorm)\n if (Dmax < diffArray[N]):\n Dmax = diffArray[N]\n if (Dmin > diffArray[N]):\n Dmin = diffArray[N]\n Dsum = Dsum + diffArray[N]\n N = N + 1\n\n if (N < 2):\n print(\"Not enough data to analyze statistics.\")\n return Dmax, Dmin, Dmean, Dsum\n \n Dmean = Dsum / N\n Dsum = 0.0\n for ii in range(N):\n diffNorm = (diffArray[ii] - Dmean)\n Dsum = Dsum + diffNorm * diffNorm\n Dsum = np.sqrt(Dsum / (N - 1) ) \n \n return Dmin, Dmax, Dmean, Dsum\n\nif __name__ == '__main__':\n program = os.path.basename(sys.argv[0])\n logger = logging.getLogger(program)\n \n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')\n logging.root.setLevel(level=logging.INFO)\n logger.info(\"running %s\" % ' '.join(sys.argv))\n\n # check and process input arguments\n if len(sys.argv) != 3:\n print(globals()['__doc__'] % locals())\n sys.exit(1)\n\n wv = dict()\n vecFile = open(sys.argv[1], 'r')\n isLineOne = True\n counter = 0\n for aLine in vecFile:\n aLine = aLine.strip('\\n')\n lineToken = aLine.split(' ')\n if isLineOne:\n isLineOne = False\n N = int(lineToken[0].strip())\n D = int(lineToken[1].strip())\n continue\n aWord = lineToken[0].strip()\n vecDim = len(lineToken) - 1\n if (vecDim != D):\n print(\"inconsistent data %d => %d\" % (aWord, vecDim))\n sys.exit(1)\n aVec = np.zeros(D)\n for ii in range(D):\n lineToken[ii+1] = lineToken[ii+1].strip()\n aVec[ii] = float(lineToken[ii+1])\n wv[aWord] = aVec\n counter += 1\n vecFile.close()\n if (counter != N):\n print(\"inconsistent model %d => %d\" % (aWord, vecDim))\n sys.exit(1)\n\n \"\"\"\n node1list = [0, 5, 1, 0, 0, 4]\n node2list = [3, 10, 11, 10, 4, 6]\n for ii in range(len(node2list)):\n v1 = wv[str(node1list[ii])]\n v2 = wv[str(node2list[ii])]\n cosSim = dot(v1, v2)/norm(v1)/norm(v2)\n print(\"%d %d => %.4f\" % (node1list[ii], node2list[ii], cosSim))\n \"\"\"\n \n largePairs = []\n numOfLargePair = 10\n distSim = np.zeros(numOfLargePair)\n for ii in range(numOfLargePair):\n largePairs.append((\"None\", \"None\", -2.0))\n numOfNodesProcessed = 0\n for w1 in wv.keys():\n for w2 in wv.keys():\n if (w1 == w2):\n continue\n v1 = wv[w1]\n v2 = wv[w2]\n cosSim = dot(v1, v2)/ norm(v1) / norm(v2)\n binIdx = int(floor(cosSim * numOfLargePair))\n if ( (binIdx < 0) or (binIdx > numOfLargePair-1) ):\n continue\n distSim[binIdx] += 1\n checkedPair = False\n for ii in range(numOfLargePair):\n if ( ((w1 == largePairs[ii][0]) and (w2 == largePairs[ii][1])) or\n ((w1 == largePairs[ii][1]) and (w2 == largePairs[ii][0])) ):\n checkedPair = True\n break \n if (checkedPair):\n continue\n #if (cosSim > 0.1):\n # continue\n for ii in range(numOfLargePair):\n if (largePairs[ii][2] < cosSim):\n for ij in range(numOfLargePair-1, ii, -1):\n largePairs[ij] = largePairs[ij-1]\n largePairs[ii] = (w1, w2, cosSim)\n break\n numOfNodesProcessed += 1\n print(\"processed %d nodes\" % numOfNodesProcessed)\n \n print(\"Word pairs with largest cosine similarity:\")\n for ii in range(numOfLargePair):\n print(\"%s %s %.6f\" % (largePairs[ii][0], largePairs[ii][1], largePairs[ii][2]))\n print(\"Distribution:\")\n for ii in range(numOfLargePair):\n print(\"%d %d\" % (ii, distSim[ii]))\n \n nodeLabel = dict()\n labelFile = open(sys.argv[2], 'r')\n counter = 0\n maxGroup = -1\n for aLine in labelFile:\n aLine = aLine.strip('\\n')\n lineToken = aLine.split(' ')\n nodeID = int(lineToken[0].strip())\n ndLabel = int(lineToken[1].strip())\n nodeLabel[nodeID] = ndLabel\n if (ndLabel > maxGroup):\n maxGroup = ndLabel\n counter += 1\n labelFile.close()\n if (counter != N):\n print(\"inconsistent label %d : %d\" % (N, counter))\n sys.exit(1)\n\n sameGroupPairs = 0\n notSameGroupPairs = 0\n inGroupTrue = 0\n inGroupFalse = 0\n cosSim0 = 0.60\n \n for ii in nodeLabel.keys():\n for ij in nodeLabel.keys():\n if (nodeLabel[ii] == nodeLabel[ij]):\n sameGroupPairs += 1\n else:\n notSameGroupPairs += 1\n \n for w1 in wv.keys():\n for w2 in wv.keys():\n v1 = wv[w1]\n v2 = wv[w2]\n cosSim = dot(v1, v2)/norm(v1)/norm(v2)\n if (cosSim > cosSim0):\n if (nodeLabel[int(w1)] == nodeLabel[int(w2)]):\n inGroupTrue += 1\n else:\n inGroupFalse += 1\n \n print(\"Precision = %.2f%%\" % (inGroupTrue/(inGroupTrue+inGroupFalse)*100.0))\n print(\"Recall = %.2f%%\" % (inGroupTrue/sameGroupPairs*100.0))\n\n labels = []\n tokens = []\n for aWord in wv.keys():\n labels.append(aWord)\n tokens.append(wv[aWord]) \n myPerplexity = 6\n if (len(nodeLabel) > 100):\n myPerplexity = 30\n tsne_model = TSNE(perplexity=myPerplexity, n_components=2, init='pca', n_iter=5000, random_state=0)\n fittedValues = tsne_model.fit_transform(tokens)\n X = []\n Y = []\n counter = 0\n for val in fittedValues:\n X.append(val[0])\n Y.append(val[1])\n #X.append(wv[labels[counter]][0])\n #Y.append(wv[labels[counter]][1])\n #counter += 1\n \n kellyColors = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231', '#911eb4',\n '#46f0f0', '#f032e6', '#d2f53c', '#fabebe', '#008080', '#e6beff',\n '#aa6e28', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1',\n '#000080', '#808080', '#000000', '#FFFFFF'] \n plt.figure(figsize=(16, 16)) \n scatterSize = 20 #200\n fontSize = 4 #40\n \n for ii in range(len(X)):\n idx = int(labels[ii])\n if (nodeLabel[idx] >= len(kellyColors)-1):\n continue\n idxColor = nodeLabel[idx] % (len(kellyColors)-1)\n colorRGB = kellyColors[idxColor]\n plt.scatter(X[ii], Y[ii], s=scatterSize, color=colorRGB)\n plt.annotate(labels[ii],\n xy=(X[ii], Y[ii]),\n xytext=(5, 2),\n size = fontSize,\n textcoords='offset points',\n ha='right',\n va='bottom',color=colorRGB)\n plt.show()\n\n kmeanResult = KMeans(n_clusters=42, random_state=0).fit(np.array(tokens))\n \n testNodes = [1, 20, 100, 300, 900]\n for ii in testNodes:\n print(\"Node #%d:\" % ii)\n print(\"\\tground truth: \", end='')\n nodeGroup = 0\n for aNode in nodeLabel.keys():\n if (aNode == ii):\n nodeGroup = nodeLabel[ii]\n break\n for aNode in nodeLabel.keys():\n if (nodeLabel[aNode] == nodeGroup):\n print(\"%d \" % aNode, end='')\n print(\"\")\n\n print(\"\\tpredicted: \", end='')\n nodeGroup = kmeanResult.labels_[ii]\n for aNode in range(N):\n if (kmeanResult.labels_[aNode] == nodeGroup):\n print(\"%d \" % aNode, end='')\n print(\"\")\n\n print(\"similarity of nodes within same group:\")\n counter = 0\n for nodeGroup in range(maxGroup+1):\n nodeList = []\n for aNode in nodeLabel.keys():\n if (nodeLabel[aNode] == nodeGroup):\n nodeList.append(aNode)\n dataArray = []\n for node1 in nodeList:\n v1 = wv[str(node1)]\n for node2 in nodeList:\n if (node1 == node2):\n continue\n v2 = wv[str(node2)]\n cosSim = dot(v1, v2)/ norm(v1) / norm(v2)\n dataArray.append(cosSim)\n Dmin, Dmax, Dmean, Dsum = getSigma(dataArray)\n counter += 1\n print(\"Group #%d: N = %d, min = %.2f, max = %.2f, mean = %.2f, sigma = %.2f\" % (counter, len(nodeList), Dmin, Dmax, Dmean, Dsum))\n \n \n \n \n \n \n \n \n ","sub_path":"struc2vec/src/clusterVectors.py","file_name":"clusterVectors.py","file_ext":"py","file_size_in_byte":9331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"110756576","text":"import urllib.request\nimport json\n\nclient_id = \"privacy\"\nclient_secret = \"privacy\"\n\nlanguage = [\"ko\", \"ja\", \"zh-cn\", \"zh-tw\", \"hi\", \"en\", \"es\", \"fr\", \"de\", \"pt\", \"vi\", \"id\", \"fa\", \"ar\", \"mm\", \"th\", \"ru\", \"it\"]\n\nwith open('source.txt', 'r', encoding='utf8') as file:\n srcText = file.read()\n\nprint(\"Language: {}\".format(language))\nsrc = input(\"Language: \")\ntar = input(\"Language: \")\n\nencText = urllib.parse.quote(srcText)\ndata = \"source=\" + src + \"&target=\" + tar + \"&text=\" + encText\nurl = \"https://openapi.naver.com/v1/papago/n2mt\"\nrequest = urllib.request.Request(url)\nrequest.add_header(\"X-Naver-Client-Id\", client_id)\nrequest.add_header(\"X-Naver-Client-Secret\", client_secret)\n\nresponse = urllib.request.urlopen(request, data=data.encode(\"utf-8\"))\nrescode = response.getcode()\n\nif rescode == 200:\n response_body = response.read()\n\n res = json.loads(response_body.decode('utf-8'))\n from pprint import pprint\n pprint(res)\n\n with open('translation.txt', 'w', encoding='utf8') as file:\n file.write(res['message']['result']['translatedText'])\nelse:\n print(\"Error Code: \" + rescode)\n","sub_path":"Application/Project/Coding Interface/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433837162","text":"from src import scrape_messages\n\n\ndef create_message_list(convo):\n message_list = list(scrape_messages.get_convo_id(convo))\n message_classes = scrape_messages.get_message_classes(convo)\n messages = scrape_messages.get_message_content(convo)\n for c, m in zip(message_classes, messages):\n message_list.append(str(c) + str(m))\n return message_list\n\n\ndef create_split_convo_list(convos):\n convo_list = []\n for convo in convos:\n convo_list.append(create_message_list(convo))\n return convo_list\n\n\ndef write_split_convos_to_files(convos):\n for convo in convos:\n with open(convo[0] + '.htm', 'a+',\n encoding='utf-8') as output_file:\n for message in convo[1:]:\n output_file.write(str(message) + '\\n')\n output_file.close()\n","sub_path":"extra/split_messages.py","file_name":"split_messages.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"107178549","text":"import sympy\nfrom parsing import command\n\ndef equation(expr):\n\t\"\"\"Converts an equation of the form a+b = c+d into a form sympy can manipulate, in this case c+d-a-b.\"\"\"\n\tsplit = expr.find(\"=\")\n\tif split < 0:\n\t\traise SyntaxError(\"Invalid equation syntax.\")\n\tls = sympy.sympify(expr[:split].strip())\n\trs = sympy.sympify(expr[split+1:].strip())\n\treturn rs-ls\n\ndef solve_for(expr, variable):\n\ttry:\n\t\texpr = equation(expr)\n\t\ts = sympy.symbols(variable)\n\t\treturn str(sympy.solveset(expr, s))\n\texcept:\n\t\treturn \"Incorrect equation syntax.\"\nsolve_cmd = command(\"solve\", arguments=[\"expr\", (\"variable\", \"x\")], run=solve_for, help=\"Solves the specified equation for the specified variable.\")\n\ndef say_func(text):\n\treturn text\nsay = command(\"say\", arguments=[\"text\"], run=say_func)\n\ndef factor_polynomial(expr):\n\ttry:\n\t\texpr = sympy.sympify(expr)\n\t\treturn str(sympy.factor(expr))\n\texcept:\n\t\treturn \"Invalid expression syntax.\"\nfactor = command(\"factor\", arguments=[\"expr\"], run=factor_polynomial, help=\"Factors a polynomial with rational coefficients.\")\n\ndef eval_func(expr):\n\ttry:\n\t\texpr = sympy.sympify(expr)\n\t\treturn str(expr)\n\texcept:\n\t\treturn \"Invalid expression syntax.\"\neval_cmd = command(\"eval\", arguments=[\"expr\"], run = eval_func, help=\"Evaluate an algebraic expression.\")\n\ndef calc_func(expr):\n\ttry:\n\t\texpr = sympy.sympify(expr)\n\t\treturn str(float(expr))\n\texcept:\n\t\treturn \"Could not convert expression to float.\"\ncalc = command(\"calc\", arguments=[\"expr\"], run = calc_func, help=\"Evaluates an expression and returns the float evaluation.\")\n\ndef roots_func(expr):\n\ttry:\n\t\texpr = sympy.sympify(expr)\n\t\treturn str(sympy.roots(expr))\n\texcept:\n\t\treturn \"Invalid expression syntax.\"\nroots_cmd = command(\"roots\", arguments=[\"expr\"], run = roots_func, help=\"Return the roots of an equation.\")\n\ndef simplify_func(expr):\n\tprint(expr)\n\ttry:\n\t\texpr = sympy.sympify(expr)\n\t\treturn str(sympy.simplify(expr))\n\texcept:\n\t\treturn \"Invalid expression syntax.\"\nsimplify_cmd = command(\"simplify\", arguments=[\"expr\"], run=simplify_func, help=\"Siplifies the given expression.\")\n\nclass help_command(command):\n\tdef __init__(self, name=\"help\", arguments=[(\"topic\",)], run=None, syntax=None, help=\"Type help for help on a specific function.\", parser=None):\n\t\tsuper(help_command, self).__init__(name, arguments, run, syntax, help, parser)\n\n\tdef run(self, topic=None):\n\t\tprint(\"Topic is {}.\".format(topic))\n\t\tif not topic:\n\t\t\tmsg = \"Welcome to {name}, version {version}. \".format(**self.parser.info)\n\t\t\tmsg += \"Available commands: \"+\", \".join(self.parser.commands.keys())+\".\"\n\t\t\treturn msg\n\t\telse:\n\t\t\tcmd = self.parser.get_command_by_name(topic.lower())\n\t\t\tif not cmd:\n\t\t\t\treturn \"Command {} not found.\".format(topic.lower())\n\t\t\telse:\n\t\t\t\treturn cmd.help+\" \"+cmd.syntax\n\nhelp_cmd = help_command()\n\ncommands = {\n\t'help': help_cmd,\n\t'solve': solve_cmd,\n\t'factor': factor,\n\t'eval': eval_cmd,\n\t'calc': calc,\n\t'roots': roots_cmd,\n\t'simplify': simplify_cmd,\n}","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"29871328","text":"import libprojector\n\nPROJECTION_EQUIRECTANGULAR = 'equirectangular'\nPROJECTION_CUBEMAP = 'cubemap'\n\n\nclass BaseProj(object):\n \n def __init__(self, image_width, options):\n self.image_width = image_width\n self.options = options\n\n def get_projection(self):\n raise NotImplementedError\n\n\nclass EquirectangularProj(BaseProj):\n \n def get_projection(self):\n width = int(self.image_width)\n height = int(self.image_width / 2)\n return libprojector.SphericalProjection(width, height)\n\n\nclass CubemapProj(BaseProj):\n \n def get_projection(self):\n side_width = int(self.image_width / 6)\n border_padding = self.options.get('border_padding', 0)\n return libprojector.CubemapProjection(side_width, border_padding)\n\n\nPROJECTION_CLASSES = dict((\n (PROJECTION_EQUIRECTANGULAR, EquirectangularProj),\n (PROJECTION_CUBEMAP, CubemapProj),\n))\n","sub_path":"projector/projections.py","file_name":"projections.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381021505","text":"import os,cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\nfrom PIL import Image\r\nfrom numpy import *\r\n\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.cross_validation import train_test_split\r\n\r\nfrom keras import backend as K\r\nK.set_image_dim_ordering('tf')\r\n\r\nfrom keras.utils import np_utils\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\r\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\r\nfrom keras.optimizers import SGD,RMSprop,adam\r\n\r\n\r\npath1='D:\\Pomo Disease dataset\\Input'\r\npath2='D:\\Pomo Disease dataset\\Input_resized'\r\n\r\nlisting = os.listdir(path1) \r\nnum_samples=size(listing)\r\nprint (num_samples)\r\n\r\nimg_rows=128\r\nimg_cols=128\r\nnum_channel=1\r\nnum_epoch=20\r\n\r\nfor file in listing:\r\n im = Image.open(path1 + '\\\\' + file) \r\n img = im.resize((img_rows,img_cols))\r\n gray = img.convert('L') #need to do some more processing here \r\n gray.save(path2 +'\\\\' + file, \"JPEG\")\r\n \r\nimlist = os.listdir(path2)\r\n\r\nim1 = array(Image.open('D:\\Pomo Disease dataset\\Input_resized' + '\\\\'+ imlist[0])) # open one image to get size\r\nm,n = im1.shape[0:2] # get the size of the images\r\nimnbr = len(imlist) # get the number of images\r\n\r\n# create matrix to store all flattened images\r\n \r\nimmatrix = array([array(Image.open('D:\\Pomo Disease dataset\\Input_resized'+ '\\\\' + im2)).flatten()\r\n for im2 in imlist],'f')\r\n\r\nlabel=np.ones((num_samples,),dtype = int)\r\n\r\nlabel[0:20]=0\r\nlabel[20:43]=1\r\nlabel[43:49]=2\r\nlabel[49:58]=3 \r\nlabel[58:73]=4\r\n\r\ndata,Label = shuffle(immatrix,label, random_state=2)\r\ntrain_data = [data,Label]\r\n\r\nimg=immatrix[4].reshape(img_rows,img_cols)\r\nplt.imshow(img)\r\nplt.imshow(img,cmap='gray')\r\nprint (train_data[0].shape)\r\nprint (train_data[1].shape)\r\n\r\n#%%\r\n\r\n#batch_size to train\r\nbatch_size = 30\r\n# number of output classes\r\nnb_classes = 5\r\n\r\n\r\nnb_epoch=20\r\n# number of convolutional filters to use\r\nnb_filters = 32\r\n# size of pooling area for max pooling\r\nnb_pool = 2\r\n# convolution kernel size\r\nnb_conv = 3\r\n\r\n#%%\r\n(X, y) = (train_data[0],train_data[1])\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4)\r\n\r\nX_train = X_train.reshape(X_train.shape[0], img_rows, img_cols,1)\r\nX_test = X_test.reshape(X_test.shape[0], img_rows, img_cols,1)\r\n\r\nX_train = X_train.astype('float32')\r\nX_test = X_test.astype('float32')\r\n\r\nX_train /= 255\r\nX_test /= 255\r\n\r\nprint('X_train shape:', X_train.shape)\r\nprint(X_train.shape[0], 'train samples')\r\nprint(X_test.shape[0], 'test samples')\r\n\r\nY_train = np_utils.to_categorical(y_train, nb_classes)\r\nY_test = np_utils.to_categorical(y_test, nb_classes)\r\n\r\n#%%\r\n# Defining the model\r\n\r\n\t\t\t\t\t\r\nmodel = Sequential()\r\n\r\nmodel.add(Convolution2D(32, (3,3),border_mode='same',input_shape=(img_rows, img_cols,1))) #CHANGE IN CODE\r\nconvout1=Activation('relu')\r\nmodel.add(convout1)\r\nmodel.add(Convolution2D(32, (3, 3)))\r\nconvout2=Activation('relu')\r\nmodel.add(convout2)\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.5))\r\n\r\nmodel.add(Convolution2D(64, (3, 3)))\r\nconvout3=Activation('relu')\r\nmodel.add(convout3)\r\n#model.add(Convolution2D(64, 3, 3))\r\n#model.add(Activation('relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.5))\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Dense(64))\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(nb_classes)) #CHANGE IN PARA\r\nmodel.add(Activation('softmax'))\r\n\r\n#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\r\n#model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=[\"accuracy\"])\r\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=[\"accuracy\"])\r\n\r\n#%%\r\nhist = model.fit(X_train, Y_train, batch_size=16, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, Y_test))\r\n\r\nscore = model.evaluate(X_test, Y_test,verbose=0)\r\nprint('Test score:', score[0])\r\nprint('Test accuracy:', score[1])\r\n\r\nprint(model.predict_classes(X_test[1:6]))\r\nprint(Y_test[1:6])\r\n\r\n\r\n# Viewing model_configuration\r\n\r\nmodel.summary()\r\nmodel.get_config()\r\nmodel.layers[0].get_config()\r\nmodel.layers[0].input_shape\t\t\t\r\nmodel.layers[0].output_shape\t\t\t\r\nmodel.layers[0].get_weights()\r\nnp.shape(model.layers[0].get_weights()[0])\r\nmodel.layers[0].trainable\r\n\r\n# visualizing losses and accuracy\r\ntrain_loss=hist.history['loss']\r\nval_loss=hist.history['val_loss']\r\ntrain_acc=hist.history['acc']\r\nval_acc=hist.history['val_acc']\r\nxc=range(num_epoch)\r\n\r\nplt.figure(1,figsize=(7,5))\r\nplt.plot(xc,train_loss)\r\nplt.plot(xc,val_loss)\r\nplt.xlabel('num of Epochs')\r\nplt.ylabel('loss')\r\nplt.title('train_loss vs val_loss')\r\nplt.grid(True)\r\nplt.legend(['train','val'])\r\n#print plt.style.available # use bmh, classic,ggplot for big pictures\r\nplt.style.use(['classic'])\r\n\r\nplt.figure(2,figsize=(7,5))\r\nplt.plot(xc,train_acc)\r\nplt.plot(xc,val_acc)\r\nplt.xlabel('num of Epochs')\r\nplt.ylabel('accuracy')\r\nplt.title('train_acc vs val_acc')\r\nplt.grid(True)\r\nplt.legend(['train','val'],loc=4)\r\n#print plt.style.available # use bmh, classic,ggplot for big pictures\r\nplt.style.use(['classic'])\r\n\r\n#%%\r\nfrom sklearn.metrics import classification_report,confusion_matrix\r\n\r\nY_pred = model.predict(X_test)\r\nprint(Y_pred)\r\ny_pred = np.argmax(Y_pred, axis=1)\r\nprint(y_pred)\r\n #or\r\ny_pred = model.predict_classes(X_test)\r\nprint(y_pred)\r\n\r\n#%%\r\n#plotting probability\r\np=model.predict_proba(X_test) # to predict probability\r\nprint(p)\r\n \r\n#%%\r\ntarget_names = ['class 0(Alternaria Alternata)', 'class 1(Anthracnose)', 'class 2(Bacterial Blight)','class 3(Cercospora Leaf Spot)','class 4(Healthy Leaves-12.jpg)']\r\nprint(classification_report(np.argmax(Y_test,axis=1), y_pred,target_names=target_names))\r\nprint(confusion_matrix(np.argmax(Y_test,axis=1), y_pred))\r\n\r\n#%%\r\n# saving weights\r\n\r\nfname = \"weights-Test-CNN.hdf5\"\r\nmodel.save_weights(fname,overwrite=True)\r\n#%%\r\nfname = \"weights-Test-CNN.hdf5\"\r\nmodel.load_weights(fname)\r\n\r\n\r\n","sub_path":"SampleCodes/Deep_Learning_Pomo.py","file_name":"Deep_Learning_Pomo.py","file_ext":"py","file_size_in_byte":5941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335294780","text":"# Multiply & Add Layer Test\nimport sys\nimport os\nfrom pathlib import Path\ntry:\n sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))\n from layers import Multiply, Add\nexcept ImportError:\n raise ImportError(\"Library Module Can Not Found\")\n\napple = 100\napplecount = 2\norange = 150\norangecount = 3\ndiscount = 0.9\n\nmultiply_appleprice = Multiply()\nmultiply_orangeprice = Multiply()\nadd_appleorangeprice = Add()\nmultiply_discountprice = Multiply()\n\n# forward propagation\nappleprice = multiply_appleprice.forward(apple, applecount)\nprint(f'appleprice = {appleprice}')\n\norangeprice = multiply_orangeprice.forward(orange, orangecount)\nprint(f'orangeprice = {orangeprice}')\n\nappleorangeprice = add_appleorangeprice.forward(appleprice, orangeprice)\nprint(f'orangeprice = {appleorangeprice}')\n\ndiscountprice = multiply_discountprice.forward(appleorangeprice, discount)\nprint(f'discountprice = {discountprice}')\n\n# backward propagation\nddiscountprice = 1\n\ndappleorangeprice, ddiscount = multiply_discountprice.backward(ddiscountprice)\nprint(f'dappleorangeprice = {dappleorangeprice}, ddiscount = {ddiscount}')\n\ndappleprice, dorangeprice = add_appleorangeprice.backward(dappleorangeprice)\nprint(f'dappleprice = {dappleprice}, dorangeprice = {dorangeprice}')\n\ndapple, dapplecount = multiply_appleprice.backward(dappleprice)\nprint(f'dapple = {dapple}, dapplecount = {dapplecount}')\n\ndorange, dorangecount = multiply_orangeprice.backward(dappleprice)\nprint(f'dorange = {dorange}, dorangecount = {dorangecount}')\n\n\n","sub_path":"04.deep-learning/02.neural-network/07.backpropagation/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234689969","text":"import numpy as np\nfrom Modules.Models.KernelModel import KernelModel\n\n\nclass KernelSVM(KernelModel):\n def __init__(self, kernel=None,\n informations=True, lamda=1.0, max_iter=10000, tol=1e-6,\n preprocessing=None, normalisation=None):\n # Instantiation of the super class\n super().__init__(informations=informations, kernel=kernel)\n\n # Name of the class\n self.name = \"KernelSVM\"\n\n # Hyperparameter for the regulariser in the optimisation\n self.lamba = lamda\n\n # Gradient Descent parameters\n self.max_iter = int(max_iter)\n self.tol = tol\n self.proj_cst = 0.1\n self.preprocessing = preprocessing\n self.normalisation = normalisation\n\n def fit(self, data_train, labels, alpha_init=[]):\n \"\"\"\n ASSUMPTION: CONSTANT LEARNING RATE!!!!\n\n :param data_train:\n :param labels:\n :param alpha_init:\n :return:\n \"\"\"\n\n # Computation of K\n self.K_train_func = lambda data: self.kernel.call(data_train, data)\n self.K_train = self.K_train_func(data_train)\n\n # Preprocessing of K_train\n if self.preprocessing is not(None):\n self.K_train = self.preprocessing(self.K_train)\n if self.normalisation is not(None):\n self.K_train = self.normalisation(self.K_train)\n\n if self.informations:\n print('K train mean: {}; std: {}'.format(self.K_train.mean(), self.K_train.std()))\n\n # Bijection between values of y and {-1, 1}\n self.Fromlabels = {min(labels): -1, max(labels): 1}\n self.Tolabels = {-1: min(labels), 1: max(labels)}\n self.y_train = np.array([self.Fromlabels[y_i] for y_i in labels]).reshape((-1, 1))\n\n # Initialisation of alpha\n self.n_examples = len(self.K_train)\n if alpha_init == []:\n self.alpha = np.zeros(self.n_examples)\n else:\n self.alpha = np.array(alpha_init)\n\n # Initialisation of alpha_t-1 and ite\n ite = 0\n self.alpha_previous = 100 * np.ones(self.n_examples)\n\n # Condition to stop the gradient descent\n diff_alphas = np.abs(self.alpha.reshape(-1) -\\\n self.alpha_previous.reshape(-1)).max()\n\n # Projected gradient procedure\n while (ite < self.max_iter) and (diff_alphas < self.tol) :\n\n # Compute the gradient\n grad = self.gradient()\n grad_norm = np.abs(grad).max()\n\n # Compute the current loss\n loss = self.trainLoss()\n\n # Solve best alpha and project them\n invert = np.identity(self.n_examples)\n try:\n alpha_star = np.linalg.solve(\n a=self.K_train + 1e-7 * invert,\n b=self.y_train.reshape(-1))\n except np.linalg.LinAlgError:\n print(self.K_train.mean(), self.K_train.std())\n raise\n proj_alpha = self.project_alpha(alpha_star)\n\n # alpha_n = proj_y_n\n self.alpha_previous = self.alpha\n self.alpha = self.alpha + self.proj_cst * (proj_alpha.reshape(-1) -\\\n self.alpha)\n\n if self.informations and iter % 1 == 0:\n try:\n print(\"Iterations done: {}, Loss: {:.4f}, Gradient: {}\".format(iter,\n loss,\n grad_norm))\n except:\n print(loss, grad_norm)\n\n self.alpha = self.project_alpha(self.alpha)\n return self\n\n def project_alpha(self, alpha):\n \"\"\"\n Projects alpha such that alpha is admissible:\n\n 0.0 <= y_i x alpha_i <= 1.0 / (2 x lamda x n)\n\n :param alpha:\n :return:\n \"\"\"\n\n # Compute y_i x \\alpha_i\n prod_sign = np.sign(alpha.reshape(-1) * self.y_train.reshape(-1))\n alpha_sign = np.sign(alpha.reshape(-1))\n\n # Useless operation but shows that necessarily proj_alpha=0\n # a_max = 0.1 * np.zeros(self.n_examples)\n # a_max[prod_sign < 0.0] = 0.0\n #\n # # Clip to margin C\n # a_max[prod_sign > 0.0] = 1.0 / (2.0 * self.lamba * self.n_examples)\n\n a_min = np.zeros(self.n_examples)\n a_max = np.zeros(self.n_examples)\n\n a_min[(prod_sign > 0.0) & (alpha_sign > 0.0)] = 0.0\n a_max[(prod_sign > 0.0) & (alpha_sign > 0.0)] = 1.0 / (2.0 * self.lamba * self.n_examples)\n\n a_min[(prod_sign > 0.0) & (alpha_sign < 0.0)] = -1.0 / (2.0 * self.lamba * self.n_examples)\n a_max[(prod_sign > 0.0) & (alpha_sign < 0.0)] = 0.0\n\n return np.clip(alpha, a_min=a_min, a_max=a_max)\n\n def trainLoss(self):\n \"\"\"Compute the loss for the training set.\"\"\"\n return self.alpha.T.dot(self.y_train) -\\\n 0.5 * self.alpha.T.dot(self.K_train.dot(self.alpha))\n\n def gradient(self):\n \"\"\"Compute the gradient for the current self.alpha.\"\"\"\n return self.K_train.dot(self.alpha) - self.y_train\n\n def predict(self, data_test, average_size=3):\n \"\"\"Predict a class for dataset.\"\"\"\n\n # Computation of K\n K_pre = self.K_train_func(data_test)\n\n # Preprocessing of K_test\n if self.preprocessing is not(None):\n K_pre = self.preprocessing(K_pre)\n\n if self.normalisation is not(None):\n K_test = self.kernel.call(data_test, data_test)\n K_test = self.normalisation(self.preprocessing(K_test))\n K_pre = self.normalisation(K_pre, K_train=self.K_train,\n K_test=K_test)\n\n # Extract alpha_predict\n alpha_predict = self.alpha.reshape(-1)\n\n # Prediction\n y_pred = np.where(alpha_predict.T.dot(K_pre) >= 0.0, 1, -1)\n label_pred = np.array([self.Tolabels[y_i] for y_i in y_pred])\n label_pred = label_pred.reshape((-1, 1))\n\n return label_pred\n","sub_path":"Modules/Models/KernelSVM.py","file_name":"KernelSVM.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381645113","text":"import os, dirDiff\n\nblocksize = 1024 * 1024\n\ndef intersect(seq1, seq2):\n\treturn [item for item in seq1 if item in seq2]\n\t\n\ndef compareTrees(dir1, dir2, diffs):\n\tnames1 = os.listdir(dir1)\n\tnames2 = os.listdir(dir2)\n\tdirDiff.compareDirs(dir1, dir2, diffs, names1, names2)\n\t\n\tcommon = intersect(names1, names2)\n\tmissed = common[:]\n\tfor name in common:\n\t\tpath1 = os.path.join(dir1, name)\n\t\tpath2 = os.path.join(dir2, name)\n\t\tif os.path.isfile(path1) and os.path.isfile(path2):\n\t\t\tmissed.remove(name)\n\t\t\tfile1 = open(path1, 'rb')\n\t\t\tfile2 = open(path2, 'rb')\n\t\t\twhile True:\n\t\t\t\tbytes1 = file1.read(blocksize)\n\t\t\t\tbytes2 = file2.read(blocksize)\n\t\t\t\tif not bytes1 and not bytes2:\n\t\t\t\t\tbreak\n\t\t\t\tif bytes1 != bytes2:\n\t\t\t\t\tdiffs.append(path1 + ' <---> ' + path2)\n\t\t\t\t\tbreak\n\t\telif os.path.isdir(path1) and os.path.isdir(path2):\n\t\t\tmissed.remove(name)\n\t\t\tcompareTrees(path1, path2, diffs)\n\t\telse:\n\t\t\tdiffs.append('unique to ' + dir1)\n\t\t\tdiffs.append('---> ' + path1)\n\t\t\tdiffs.append('unique to ' + dir2)\n\t\t\tdiffs.append('---> ' + path2)\n\t\t\t\n\nif __name__ == '__main__':\n\tdir1, dir2 = dirDiff.getargs()\n\tdiffs = []\n\tcompareTrees(dir1, dir2, diffs)\n\tif not diffs:\n\t\tprint('No diff found')\n\telse:\n\t\tfor diff in diffs: print(diff)\n\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n","sub_path":"old/Python/OLD/OLD/dirDiff/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531387250","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__='Uglyboy'\n\nimport sqlite3\nimport os\n\nfrom .Singleton import Singleton\n\nclass DB(Singleton):\n def __init__(self):\n if not hasattr(self, '_conn'):\n self._conn = sqlite3.connect(os.path.split(os.path.realpath(__file__))[0] + \"/Database.db\")\n \n def __del__(self):\n self._commit()\n self._conn.close()\n\n def _execute(self,sql):\n if not hasattr(self, '_cursor'):\n self._cursor=self._conn.cursor()\n return self._cursor.execute(sql)\n\n def _commit(self):\n if hasattr(self, '_cursor'):\n self._cursor.close()\n delattr(self,'_cursor')\n self._conn.commit()\n \n ### Manage Table\n def create(self,sql_create):\n self._execute(sql_create)\n self._commit()\n\n def drop(self, table):\n self._execute(\"drop table \" + table + \";\")\n self._commit()\n\n def truncate(self,table):\n self._execute(\"delete from \" + table + \";\")\n if self.check_table(\"sqlite_sequence\"):\n self._execute(\"delete from sqlite_sequence where name = '\" + table +\"';\")\n self._commit()\n\n def check_table(self,table):\n cursor = self._execute(\"select * from sqlite_master where type='table' and name = '\" + table + \"';\")\n for row in cursor:\n if row[1] == table:\n return True\n return False\n \n def show(self,table,condition=None):\n sql = \"select * from \" + table\n if condition is not None:\n sql = sql + \" where \" + condition\n sql = sql + \";\"\n cursor = self._execute(sql)\n for row in cursor:\n print(row)\n \n \n ### Get List Data\n def _str_condition_in(self,list_name,list):\n _str = list_name + \" in ('\"\n for item in list:\n _str = _str + item + \"','\"\n _str = _str[:-2] + \") \"\n return _str\n \n def _cursor_to_list(self,cursor):\n _list = []\n for row in cursor:\n if len(row) == 1:\n _list.append(row[0])\n else:\n _list.append(row)\n return _list\n\n def _list_from_db(self,db_name,list_name,condition):\n sql = \"select \" + list_name + \" from \" + db_name + \" where \" + condition + \";\"\n #print(sql)\n cursor = self._execute(sql)\n return self._cursor_to_list(cursor)\n","sub_path":"ding/util/SQLite.py","file_name":"SQLite.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69124537","text":"import sys\n\nn = int(sys.stdin.readline())\n\ntime = list()\nprice = list()\n\nfor _ in range(n):\n\tt, p = map(int, sys.stdin.readline().split())\n\ttime.append(t)\n\tprice.append(p)\n\ndp = [[0 for x in range(n)] for y in range(n)] \nfor i in range(n):\n\tif i >= time[0] - 1:\n\t\tdp[0][i] = price[0]\n\nfor i in range(1, n):\n\tfor j in range(n):\n\t\tif j >= time[i] + i - 1 and i + time[i] <= n and j >= i:\n\t\t\tdp[i][j] = max(dp[i - 1][i - 1] + price[i], dp[i - 1][j])\n\t\telse :\n\t\t\tdp[i][j] = dp[i - 1][j]\n\nprint(dp[n - 1][n - 1])","sub_path":"2020_01_02/BOJ14501_JH.py","file_name":"BOJ14501_JH.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"58163380","text":"import datetime\nimport glob\n\nimport pydicom\nfrom pydicom.dataset import Dataset, FileDataset, FileMetaDataset\nfrom pydicom.sequence import Sequence\n\nimport config\n\n\ndef get_file_meta(dcms):\n dcm = dcms[0]\n file_meta = FileMetaDataset()\n file_meta.FileMetaInformationVersion = dcm.file_meta.FileMetaInformationVersion\n file_meta.MediaStorageSOPClassUID = \"RT Structure Set Storage\"\n file_meta.MediaStorageSOPInstanceUID = \"Anonymous\" # TODO\n file_meta.TransferSyntaxUID = dcm.file_meta.TransferSyntaxUID\n file_meta.ImplementationClassUID = dcm.file_meta.ImplementationClassUID\n file_meta.ImplementationVersionName = dcm.file_meta.ImplementationVersionName\n return file_meta\n\n\ndef add_referenced_frame_of_reference_sequence(ds, dcms):\n dcm = dcms[0]\n\n # Referenced Frame of Reference Sequence\n refd_frame_of_ref_sequence = Sequence()\n ds.ReferencedFrameOfReferenceSequence = refd_frame_of_ref_sequence\n\n # Referenced Frame of Reference Sequence: Referenced Frame of Reference 1\n refd_frame_of_ref1 = Dataset()\n refd_frame_of_ref1.FrameOfReferenceUID = dcm.FrameOfReferenceUID\n\n # RT Referenced Study Sequence\n rt_refd_study_sequence = Sequence()\n refd_frame_of_ref1.RTReferencedStudySequence = rt_refd_study_sequence\n\n # RT Referenced Study Sequence: RT Referenced Study 1\n rt_refd_study1 = Dataset()\n rt_refd_study1.ReferencedSOPClassUID = ds.file_meta.MediaStorageSOPClassUID\n rt_refd_study1.ReferencedSOPInstanceUID = (\n \"2.25.152307708682568459392858274513677418485\" # TODO\n )\n\n # RT Referenced Series Sequence\n rt_refd_series_sequence = Sequence()\n rt_refd_study1.RTReferencedSeriesSequence = rt_refd_series_sequence\n\n # RT Referenced Series Sequence: RT Referenced Series 1\n rt_refd_series1 = Dataset()\n rt_refd_series1.SeriesInstanceUID = dcm.SeriesInstanceUID\n\n # Contour Image Sequence\n contour_image_sequence = Sequence()\n rt_refd_series1.ContourImageSequence = contour_image_sequence\n\n for dcm in dcms:\n contour_image = Dataset()\n contour_image.ReferencedSOPClassUID = dcm.SOPClassUID\n contour_image.ReferencedSOPInstanceUID = dcm.SOPInstanceUID\n contour_image_sequence.append(contour_image)\n\n rt_refd_series_sequence.append(rt_refd_series1)\n rt_refd_study_sequence.append(rt_refd_study1)\n refd_frame_of_ref_sequence.append(refd_frame_of_ref1)\n\n return ds\n\n\ndef add_structure_set_roi_sequence(ds):\n # Structure Set ROI Sequence\n structure_set_roi_sequence = Sequence()\n ds.StructureSetROISequence = structure_set_roi_sequence\n\n # Structure Set ROI Sequence: Structure Set ROI 1\n structure_set_roi1 = Dataset()\n structure_set_roi1.ROINumber = \"27\"\n structure_set_roi1.ReferencedFrameOfReferenceUID = dcm.FrameOfReferenceUID\n structure_set_roi1.ROIName = \"Vacbag\"\n structure_set_roi1.ROIGenerationAlgorithm = \"AUTOMATIC\"\n structure_set_roi_sequence.append(structure_set_roi1)\n\n return ds\n\n\ndef add_roi_contour_sequence(ds, dcms):\n # ROI Contour Sequence\n roi_contour_sequence = Sequence()\n ds.ROIContourSequence = roi_contour_sequence\n\n # ROI Contour Sequence: ROI Contour 1\n roi_contour1 = Dataset()\n roi_contour1.ROIDisplayColor = [128, 128, 255]\n\n # Contour Sequence\n contour_sequence = Sequence()\n roi_contour1.ContourSequence = contour_sequence\n\n for dcm in dcms:\n z = dcm.SliceLocation\n\n # Contour Sequence: Contour 1\n contour1 = Dataset()\n\n # Contour Image Sequence\n contour_image_sequence = Sequence()\n contour1.ContourImageSequence = contour_image_sequence\n\n # Contour Image Sequence: Contour Image 1\n contour_image1 = Dataset()\n contour_image1.ReferencedSOPClassUID = \"CT Image Storage\"\n contour_image1.ReferencedSOPInstanceUID = (\n \"2.25.152306916480171479263212437161921183733.1\" # TODO\n )\n contour_image_sequence.append(contour_image1)\n\n contour1.ContourGeometricType = \"CLOSED_PLANAR\"\n contour1.ContourData = [\n -100.5,\n -100.5,\n z,\n -99.9,\n -100.5,\n z,\n ] # TODO - GET CONTOUR DATA FROM INFERENCE\n contour1.NumberOfContourPoints = len(contour1.ContourData) // 3\n contour_sequence.append(contour1)\n\n roi_contour1.ReferencedROINumber = \"27\"\n roi_contour_sequence.append(roi_contour1)\n\n\ndef add_rt_roi_observations_sequence(ds):\n # RT ROI Observations Sequence\n rtroi_observations_sequence = Sequence()\n ds.RTROIObservationsSequence = rtroi_observations_sequence\n\n # RT ROI Observations Sequence: RT ROI Observations 1\n rtroi_observations1 = Dataset()\n rtroi_observations1.ObservationNumber = \"27\"\n rtroi_observations1.ReferencedROINumber = \"27\"\n rtroi_observations1.RTROIInterpretedType = \"ORGAN\"\n rtroi_observations1.ROIInterpreter = \"\"\n rtroi_observations_sequence.append(rtroi_observations1)\n\n return ds\n\n\ndcm_paths = glob.glob(config.DATA_PATH + \"/*\")\n\ndcms = [pydicom.dcmread(path, force=True) for path in dcm_paths]\n\ntry:\n dcms = sorted(dcms, key=lambda dcm: dcm.SliceLocation)\nexcept AttributeError:\n dcms = sorted(dcms, key=lambda dcm: dcm.SOPInstanceUID)\n\ndcm = dcms[0]\n\nds = Dataset()\nds.file_meta = get_file_meta(dcms)\nds.is_implicit_VR = True\nds.is_little_endian = True\nds.fix_meta_info(enforce_standard=True)\n\ndt = datetime.datetime.now()\n\nds.InstanceCreationDate = dt.strftime(\"%Y%m%d\")\nds.InstanceCreationTime = dt.strftime(\"%H%M%S.%f\")\nds.InstanceCreatorUID = \"Anonymous\" # TODO\nds.SOPInstanceUID = ds.file_meta.MediaStorageSOPInstanceUID\nds.SOPClassUID = ds.file_meta.MediaStorageSOPClassUID\nds.StudyDate = dcm.StudyDate\nds.StudyTime = dcm.StudyTime\nds.AccessionNumber = dcm.AccessionNumber\nds.Modality = \"RTSTRUCT\"\nds.Manufacturer = dcm.Manufacturer\nds.ReferringPhysicianName = dcm.ReferringPhysicianName\nds.InstitutionalDepartmentName = \"Anonymous\" # TODO\nds.ManufacturerModelName = dcm.ManufacturerModelName\nds.PatientName = dcm.PatientName\nds.PatientID = dcm.PatientID\nds.PatientBirthDate = dcm.PatientBirthDate\nds.PatientSex = dcm.PatientSex\nds.StudyInstanceUID = dcm.StudyInstanceUID\nds.StudyID = \"Anonymous\" # TODO\nds.SeriesNumber = \"1\" # TODO\nds.StructureSetLabel = \"STRCTRLABEL\"\nds.StructureSetName = \"STRCTRNAME\"\nds.StructureSetDate = ds.InstanceCreationDate\nds.StructureSetTime = ds.InstanceCreationTime\n\nds = add_referenced_frame_of_reference_sequence(ds, dcms)\nds = add_structure_set_roi_sequence(ds)\n\n# ds = add_roi_contour_sequence(ds, dcms)\nds = add_rt_roi_observations_sequence(ds)\n\nfor x in ds:\n print(x)\n\nds.save_as(\"Test\")\n","sub_path":"prototyping/auto-segmentation/mc_old_4/create_rs_file.py","file_name":"create_rs_file.py","file_ext":"py","file_size_in_byte":6644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80880289","text":"import os\nimport random\n\ndirectory = os.getcwd()+\"/data/barcodes/\"\n\n\nfiles_img = os.listdir(directory+\"images/\")\nrandom.shuffle(files_img)\n\nprint(files_img)\n\ntest_sample = int(0.1*len(files_img))\n\ntrain_files = files_img[test_sample:]\ntest_files = files_img[:test_sample]\n\n\nprint(\"**************************\")\nprint(\"Train sample: {}/{} \".format(len(train_files), len(files_img)))\nprint(\"Test sample: {}/{} \".format(len(test_files), len(files_img)))\nprint(\"**************************\")\n\n\nwith open(directory+\"train.txt\", \"w\") as ftrain, open(directory+\"val.txt\", \"w\") as fval:\n for ifile in train_files:\n fname = directory+\"images/\"+ifile+\"\\n\"\n ftrain.write(fname)\n\n for ifile in test_files:\n fname = directory+\"images/\"+ifile+\"\\n\"\n fval.write(fname)\n\n\n\n\n\n\n\n\n","sub_path":"split_train_test.py","file_name":"split_train_test.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192027748","text":"\n\nfrom xai.brain.wordbase.nouns._advancement import _ADVANCEMENT\n\n#calss header\nclass _ADVANCEMENTS(_ADVANCEMENT, ):\n\tdef __init__(self,): \n\t\t_ADVANCEMENT.__init__(self)\n\t\tself.name = \"ADVANCEMENTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"advancement\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_advancements.py","file_name":"_advancements.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"208499370","text":"\"\"\"\nFunctions to create masks for images (2D arrays) \n\n\"\"\"\n\n# Import numpy\nimport numpy as np\n\ndef angularmask(image, start, stop, xcenter, ycenter):\n \"\"\"\n Purpose: Mask out an angular region from start to stop using\n (xcenter, ycenter) as origin. All pixels outside angular region\n are masked out (FALSE).\n\n Parameters\n ----------\n image: 2D array representing the image\n start: starting angle in radians\n stop: end angle in radians\n xcenter: x co-ordinate for origin\n ycenter: y co-ordinate for origin\n\n Masks (blocks) a set of pixels (array elements) within angular\n range from start to stop, evaluating the angle from\n (xcenter,ycenter).\n\n Angles defined as:\n \n |\n pi/2 --> pi | 0--> pi/2\n | \n --------+----------\n -pi/2 --> -pi | 0 --> -pi/2\n |\n |\n \n Angles increase in counter (anti) clockwise direction\n\n\n Returns\n -------\n result: boolean array, same dimensions as image\n TRUE - Unmasked pixel\n FALSE - Masked pixel \n \"\"\"\n\n # Create 2D array of positions w.r.t to origin\n xSize, ySize = image.shape\n y,x = np.ogrid[-ycenter:ySize-ycenter,-xcenter:xSize-xcenter]\n angle_array = np.arctan2(y,x) \n\n mask = None\n if stop > start :\n mask = (angle_array >= start) & (angle_array <= stop)\n else:\n mask = ~((angle_array >= stop) & (angle_array <= start))\n\n return mask\n\n\n\ndef circularmask(image, radius, xcenter, ycenter):\n \"\"\"\n Purpose: Mask out a circular region centered at (xcenter,ycenter)\n with radius of 'radius'. All pixels outside circular region are\n masked out (FALSE).\n\n Parameters\n ----------\n image: 2D array representing the image\n radius: radius of masked out region\n xcenter: x co-ordinate for origin\n ycenter: y co-ordinate for origin\n\n Returns\n -------\n result: boolean array, same dimensions as image\n TRUE - Unmasked pixel\n FALSE - Masked pixel \n\n \"\"\"\n xSize, ySize = image.shape\n y,x = np.ogrid[-ycenter:ySize-ycenter,-xcenter:xSize-xcenter]\n radii_array = np.hypot(x,y) \n \n mask = (radii_array < radius)\n\n return mask\n \n\n\n\n\n\n\n# Test code for mask\nif __name__ == \"__main__\" :\n import matplotlib.pyplot as plt\n\n testImage = np.ones([1024,1024])\n\n mask = angularmask(testImage, \n 0.125*np.pi,0.25*np.pi, \n 512,512)\n plt.imshow(testImage*mask,origin='lower')\n plt.draw()\n \n mask = circularmask(testImage, 256,\n 512,512)\n\n plt.imshow(testImage*mask,origin='lower')\n plt.draw()\n \n","sub_path":"Mask.py","file_name":"Mask.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131991630","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom gi import require_version\n\nrequire_version('Gtk', '3.0')\nfrom gi.repository import Gtk as gtk\n\n\nclass LogBookWindow(gtk.Window):\n '''\n main window for logbook\n '''\n\n def __init__(self):\n gtk.Window.__init__(self)\n gtk.Window.set_title(self, \"Dat issen Test\")\n gtk.Window.set_default_size(self, 600, 400)\n self.set_border_width(10)\n self.mainbox = gtk.Box(orientation=gtk.Orientation.HORIZONTAL)\n self.add(self.mainbox)\n\n self.leftbox = gtk.Box(orientation=gtk.Orientation.VERTICAL)\n self.mainbox.pack_start(self.leftbox, True, True, 0)\n\n self.rightbox = gtk.Box(orientation=gtk.Orientation.VERTICAL)\n self.mainbox.pack_start(self.rightbox, True, True, 0)\n\n # create label\n self.label = gtk.Label()\n self.label.set_markup(\"Your entry\")\n self.rightbox.pack_start(self.label, True, True, 0)\n\n self.entry = gtk.Entry()\n self.leftbox.pack_start(self.entry, True, True, 0)\n\n # create button1\n self.button1 = gtk.Button(label=\"Save\")\n self.button1.connect('clicked', self.change_label, self.entry)\n self.leftbox.pack_start(self.button1, True, True, 0)\n\n # create button2\n self.button2 = gtk.Button(label=\"Quit\")\n self.button2.connect(\"clicked\", gtk.main_quit)\n self.leftbox.pack_start(self.button2, True, True, 0)\n\n def change_label(self):\n self.label.set_markup(self.entry.g)\n\n\nlogBookWindow = LogBookWindow()\nlogBookWindow.connect('delete-event', gtk.main_quit)\nlogBookWindow.show_all()\ngtk.main()\n","sub_path":"LogBook.py","file_name":"LogBook.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"254942084","text":"import json\nimport csv\n\n\nwith open('conflict_data_full_lined.json') as file:\n data = json.load(file)\n\nwith open('UCDPconflictdata.csv', 'w', newline='') as file:\n csvwriter = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n \n csvwriter.writerow(['country', 'year', 'type_of_violence', 'dyad_name', 'latitude', 'longitude', 'region', 'deaths_a', 'deaths_b', 'best', 'high', 'low'])\n for line in data: \n if int(line['year']) >= 2000 : # Making the year lines into integers, and selecting for the last two decades\n csvwriter.writerow([line['country'], line['year'], line['type_of_violence'], line['dyad_name'], line['latitude'], line['longitude'], line['region'], line['deaths_a'], line['deaths_b'], line['best'], line['high'], line['low']]) \n\n\n ","sub_path":"Conflicts.py","file_name":"Conflicts.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560915277","text":"\"\"\" Using the previously setup mongodb collection\nanswer the following questions:\n\n\n- How many total Characters are there?\n- How many of each specific subclass?\n- How many total Items?\n- How many of the Items are weapons? How many are not?\n- How many Items does each character have? (Return first 20 rows)\n- How many Weapons does each character have? (Return first 20 rows)\n- On average, how many Items does each Character have?\n- On average, how many Weapons does each character have?\n\"\"\"\nimport pymongo\nimport json\nclient = pymongo.MongoClient(\n \"mongodb+srv://foobarfoobar:foobarfoobar@cluster0.vvgzp.gcp.mongodb.net/?retryWrites=true&w=majority\")\ndb = client.test\n# rpgmdb.rpgdata # collection\nc = db[\"rpg_data\"]\n\n\nc.estimated_document_count()\n#collection = db.test_collection\ntable_names_list = c.distinct(\"model\")\n\n# first set of questions:\n# list containing all characters\ncharacters = list(c.find({\"model\": \"charactercreator.character\"}))\nprint(f\"The number of characters is {len(characters)}\")\n\nchar_types = []\nfor t in table_names_list:\n if (\"charactercreator\" in t) and (\".character\" not in t):\n char_types.append(t)\n\nfor subclass in char_types:\n sc = list(c.find({'model': subclass}))\n print(f\"The number of {subclass}s is {len(sc)}\")\n\nall_items = list(c.find({\"model\": \"armory.item\"}))\nprint(f\"The number of total Items is: {len(all_items)}\")\nall_weapons = list(c.find({\"model\": \"armory.weapon\"}))\nnum_weapons = len(all_weapons)\nprint(f\"\\n {num_weapons} of total Items are weapons. {len(all_items) - num_weapons} are not.\")\n\n\"\"\" number of items for each of the first 20 characters\"\"\"\nfor i in range(0, 20):\n print(f\"The character {characters[i]['fields']['name']} has \"\n f\"{ len(characters[i]['fields']['inventory'])} items\"\n )\n\n\"\"\"For the first 20 characters, print the number of Weapons \"\"\"\n\nall_weapons_ids = [x['pk'] for x in all_weapons]\n\nfor i in range(0, 20):\n char_items = characters[i]['fields']['inventory']\n char_weapon_count = 0\n\n for item in char_items: # check if each item in char inv is a weapon\n if item in all_weapons_ids:\n char_weapon_count += 1\n print(f\"The character {characters[i]['fields']['name']} has \"\n f\"{char_weapon_count} weapons\"\n )\n\ntotal_weapon_count = 0 # initialize counter\nfor i in range(0, len(characters)): # for all characters\n char_items = characters[i]['fields']['inventory']\n for item in char_items: # check if each item in char inv is a weapon\n if item in all_weapons_ids:\n total_weapon_count += 1\n\nprint(f\"\"\"the average of the number of items per character is the total number of items\ndivided by the number of characters! {len(all_items) / len(characters)} \\n\nSimilarly the average number of weapons per charaacter is : {total_weapon_count / len(characters)}\"\"\")\n","sub_path":"module4-acid-and-database-scalability-tradeoffs/mongo_rpg_queries.py","file_name":"mongo_rpg_queries.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"294455656","text":"#/home/wdcai/anaconda2/bin/\n\nimport os\nimport sys\nfrom math import pi\nimport matplotlib.pyplot as plt\n\ndef RadarPlot(vec_values, vec_names, y_tick_values, y_tick_names, area_color, ofname_fig):\n\n N = len(vec_names)\n\n x_as = [n / float(N) * 2 * pi for n in range(N)]\n\n # Because our chart will be circular we need to append a copy of the first \n # value of each list at the end of each list with data\n vec_values += vec_values[:1]\n x_as += x_as[:1]\n \n # Set figure size\n plt.figure(figsize=(5,4))\n\n # Set color of axes\n plt.rc('axes', linewidth=1, edgecolor=\"#888888\")\n\n # Create polar plot\n ax = plt.subplot(111, polar=True)\n\n # Set clockwise rotation. That is:\n ax.set_theta_offset(pi / 2)\n ax.set_theta_direction(-1)\n\n # Set position of y-labels\n ax.set_rlabel_position(0)\n\n # Set color and linestyle of grid\n ax.xaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n ax.yaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n\n # Set number of radial axes and remove labels\n plt.xticks(x_as[:-1], [])\n\n # Set yticks\n plt.yticks(y_tick_values, y_tick_names)\n\n # Plot data\n ax.plot(x_as, vec_values, linewidth=2, linestyle='solid', zorder=3)\n\n # Fill area\n ax.fill(x_as, vec_values, area_color, alpha=0.3)\n\n # Set axes limits\n ylim_min = min(y_tick_values)\n ylim_max = max(y_tick_values)\n plt.ylim(ylim_min, ylim_max)\n\n # Draw ytick labels to make sure they fit properly\n for i in range(N):\n angle_rad = i / float(N) * 2 * pi\n\n if angle_rad == 0:\n ha, distance_ax = \"center\", (1 + (y_tick_values[-1]-y_tick_values[-2])/8)\n elif 0 < angle_rad < pi:\n ha, distance_ax = \"left\", 1\n elif angle_rad == pi:\n ha, distance_ax = \"center\", 1\n else:\n ha, distance_ax = \"right\", 1\n \n ax.text(angle_rad, y_tick_values[-2] + distance_ax, vec_names[i], size=14, horizontalalignment=ha, verticalalignment=\"center\")\n\n # Show polar plot\n plt.savefig(ofname_fig, dpi=1000)\n plt.show()\n","sub_path":"Python/CommonModule/CommonPlot.py","file_name":"CommonPlot.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438464120","text":"# https://atcoder.jp/contests/abc145/tasks/abc145_e\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda :sys.stdin.readline().rstrip()\ndef resolve():\n n,T=map(int,input().split())\n AB=[tuple(map(int,input().split())) for _ in range(n)]\n AB.sort()\n\n dp=[0]*T\n ans=-INF\n for i in range(n):\n a,b=AB[i]\n ndp=dp[:]\n for t in range(T):\n if(t+a>>\")\r\n if plrOneInput == \"roll\":\r\n rolledOne = random.randint(1,6)\r\n print(\"Player 1 rolled\", rolledOne)\r\n print(\"It is now player Two's turn \\nType 'roll' to roll your dice , type 'quit' to end your turn or 'exit' to end the game.\")\r\n plrTwoInput = input(\">>>\")\r\n if plrTwoInput == \"roll\":\r\n rolledTwo = random.randint(1,6)\r\n print(\"Player 2 rolled\", rolledTwo)\r\n if rolledOne > rolledTwo:\r\n playerOneScore += 1\r\n print(\"Player 1 wins a point!\",\"\\nPlayer 1 :\", playerOneScore, \"\\nplayer 2:\", playerTwoScore)\r\n elif rolledOne == rolledTwo:\r\n print(\"Womp Womp , it's a tie!\")\r\n else:\r\n playerTwoScore += 1\r\n print(\"Player 2 wins a point!\",\"\\nPlayer 1 :\", playerOneScore, \"\\nplayer 2:\", playerTwoScore)\r\n elif plrTwoInput == \"quit\":\r\n playerOneScore += 1\r\n print(\"Player 2 forfeited this round! Player 1 wins a point!\", \"\\nPlayer 1 :\", playerOneScore, \"\\nplayer 2:\", playerTwoScore )\r\n else:\r\n plrTwoInput == \"exit\"\r\n print(\"Welp Game Ended! \\n Thanks for Playing!\")\r\n break\r\n time.sleep(2)\r\n\r\n \"\"\"trying player 2 and the quits and exit under player 1's elif\"\"\"\r\n elif plrOneInput == \"quit\":\r\n playerTwoScore += 1\r\n print(\"Player 1 forfeited this round! Player 2 wins a point!\",\"\\nPlayer 1 :\", playerOneScore, \"\\nplayer 2:\", playerTwoScore)\r\n\r\n else:\r\n plrOneInput == \"exit\"\r\n print(\"Welp Game Ended! \\n Thanks for Playing!\")\r\n break\r\n time.sleep(2)","sub_path":"2playertrial2.py","file_name":"2playertrial2.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413782646","text":"\"\"\"\nThis script tests the impact of additive noise on the estimation\nof multifractal properties.\n\nPrecisely, let X_t be a multifractal random process; and let N_t be the \nnoise. We analyze here the signal Y_t = X_t + sigma*N_t\naiming to see for which values of sigma the log-cumulants of Y_t are \nsufficiently close to the log-cumulants of X_t.\n\"\"\"\n\nimport mfanalysis as mf\nimport os\nimport numpy as np\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\nfrom colored_noise import powerlaw_psd_gaussian\nfrom scipy.signal import welch\nfrom scipy.stats import linregress\n\nplt.rcParams.update({'mathtext.default': 'regular', 'font.size': 16})\n\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 18, 8\n\n#-------------------------------------------------------------------------------\n# Functions\n#-------------------------------------------------------------------------------\ndef get_data_from_mat_file(filename):\n contents = loadmat(filename)\n return contents['data'][0]\n\ndef plot_psd(signal, fs = 100, name = '', f1 = 1.0, f2 = 10.0, nperseg = 1024):\n f, px = welch(signal, fs, scaling = 'spectrum', nperseg=nperseg)\n plt.figure()\n plt.loglog(f, px)\n plt.xlabel('frequency [Hz]')\n plt.ylabel('Power spectrum - ' + name)\n if (f1 is not None) and (f2 is not None):\n ff = f[ np.logical_and(f>=f1, f<=f2) ].copy()\n PP = px[ np.logical_and(f>=f1, f<=f2) ].copy()\n log_ff = np.log10(ff)\n log_PP = np.log10(PP)\n slope, intercept, r_value, p_value, std_err = linregress(log_ff,log_PP)\n log_PP_fit = slope*log_ff + intercept\n PP_fit = 10.0**(log_PP_fit)\n plt.loglog(ff, PP_fit, label = 'beta=%f'%(slope))\n plt.legend()\n plt.grid()\n\n#-------------------------------------------------------------------------------\n# Load and normalize data, generate noise\n#-------------------------------------------------------------------------------\n# multifractal random walk (c_1=0.75, c_2=-0.05, N=32768)\n# data_file = 'example_data/mrw07005n32768.mat'\ndata_file = 'example_data/S010.mat'\n\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\ndata_file = os.path.join(current_dir, data_file)\ndata = get_data_from_mat_file(data_file)\n\n# normalize signal\ndata = data / data.std()\n\n# generate noise\nNOISE_TYPE = 'colored'\nnoise_params = {'beta':1.5, 'len':len(data), 'n_sigma':40}\n\nif NOISE_TYPE == 'white':\n noise = np.random.normal(loc = 0, scale = 1.0, size=noise_params['len']) # np.sin(np.arange(len(data))) \nelif NOISE_TYPE == 'colored':\n noise = powerlaw_psd_gaussian(noise_params['beta'], noise_params['len'])\n noise = noise/noise.std()\n\nelif NOISE_TYPE == 'smooth':\n nn = np.arange(noise_params['len'])/noise_params['len'] - 0.5\n noise = nn**3.0\n noise = noise/noise.std()\n\nelif NOISE_TYPE == 'dirac':\n noise = np.random.binomial(1, 5/noise_params['len'], noise_params['len'])\n noise *= 5\n #noise = noise/noise.std()\n\n\n# vector of sigma^2 (variances)\nsigma2 = np.linspace(0., 0.1, noise_params['n_sigma']) # np.array([0., 0.000001]) \n\n\n#-------------------------------------------------------------------------------\n# MFA parameters\n#-------------------------------------------------------------------------------\np_list = [2.0, np.inf]\n\n# Multifractal analysis object\nmfa = mf.MFA()\nmfa.wt_name = 'db3'\n# mfa.p = np.inf\nmfa.j1 = 4\nmfa.j2 = 9\nmfa.n_cumul = 3\nmfa.gamint = 1.5 # !!!!!!!!!!!!!!!!!!!!!!!!\nmfa.verbose = 1\nmfa.wtype = 0\n\nmfa.q = np.arange(-8, 9)\n\n# get cumulants\nmfa.analyze(data)\ncp = mfa.cumulants.log_cumulants\nprint(\"Noiseless cumulants: \")\nprint(\"c1 = \", cp[0])\nprint(\"c2 = \", cp[1])\n\n\n#-------------------------------------------------------------------------------\n# Run simulations\n#-------------------------------------------------------------------------------\nc1_list = np.zeros((len(p_list),noise_params['n_sigma']))\nc2_list = np.zeros((len(p_list),noise_params['n_sigma']))\nC1j_list = np.zeros((len(p_list),noise_params['n_sigma'], mfa.j2 - mfa.j1 + 1))\nC2j_list = np.zeros((len(p_list),noise_params['n_sigma'], mfa.j2 - mfa.j1 + 1))\n\nDq_list = np.zeros((len(p_list),noise_params['n_sigma'], len(mfa.q)))\nhq_list = np.zeros((len(p_list),noise_params['n_sigma'], len(mfa.q)))\n\n\n\nfor p_idx, p in enumerate(p_list):\n mfa.p = p\n for idx, ss2 in enumerate(sigma2):\n signal = data + np.sqrt(ss2)*noise \n mfa.analyze(signal)\n cp = mfa.cumulants.log_cumulants\n c1_list[p_idx, idx] = cp[0]\n c2_list[p_idx, idx] = cp[1]\n\n C1j_list[p_idx, idx, :] = mfa.cumulants.values[0, mfa.j1-1:mfa.j2]\n C2j_list[p_idx, idx, :] = mfa.cumulants.values[1, mfa.j1-1:mfa.j2]\n \n\n Dq_list[p_idx, idx, :] = mfa.spectrum.Dq\n hq_list[p_idx, idx, :] = mfa.spectrum.hq\n\n if idx % 15 == 0:\n print(\"--- simulation \", idx)\n\n\n\n#-------------------------------------------------------------------------------\n# Plots\n#-------------------------------------------------------------------------------\n\nsigmas_to_plot = np.percentile(sigma2, [0, 25, 50, 75, 100], interpolation = 'nearest')\nsigma_indexes = []\nj_list = np.arange(mfa.j1, mfa.j2+1)\n\n\nfor ss2 in sigmas_to_plot:\n index = np.argsort( np.abs(sigma2 - ss2) )[0]\n sigma_indexes.append(index)\n\n\n# Log-cumulants\n\nif NOISE_TYPE == 'white':\n title = 'White noise'\n\nelif NOISE_TYPE == 'colored':\n title = 'Colored noise - beta = %0.2f'%noise_params['beta']\n\nelif NOISE_TYPE == 'smooth':\n title = 'Smooth trend'\n\nelif NOISE_TYPE == 'dirac':\n title = 'Dirac train'\n\nplt.figure(1)\nfor p_idx, p in enumerate(p_list):\n plt.subplot(1, 2, 1)\n plt.plot(sigma2, c1_list[p_idx, :], 'o-' ,label = ('p = %0.1f'%p))\n plt.ylabel('$c_1$')\n plt.xlabel('$\\sigma^2$')\n plt.legend()\n plt.title(title)\n plt.grid(True)\n plt.subplot(1, 2, 2)\n # plt.figure(2)\n plt.plot(sigma2, c2_list[p_idx, :], 'o-' ,label = ('p = %0.1f'%p))\n plt.ylabel('$c_2$')\n plt.xlabel('$\\sigma^2$')\n plt.legend()\n plt.title(title)\n plt.grid(True)\n\n\n\n# MF spectrum\nplt.figure(3)\n\nfor p_idx, p in enumerate(p_list):\n\n # plt.figure(3 + p_idx)\n plt.subplot(1, len(p_list), p_idx+1)\n plt.title('p = %0.1f'%p_list[p_idx])\n\n for s_idx, sigma in enumerate(sigmas_to_plot):\n Dq = Dq_list[p_idx, sigma_indexes[s_idx], :]\n hq = hq_list[p_idx, sigma_indexes[s_idx], :]\n\n plt.plot(hq, Dq, 'o-',label = ('$\\sigma^2$ = %0.2f'%sigma))\n\n plt.xlabel('$h$')\n plt.ylabel('$\\mathcal{D}(h)$')\n\n plt.grid(True)\n plt.legend()\n\n\n# # C1j\n# for p_idx, p in enumerate(p_list):\n\n# plt.figure(3 + len(p_list) + p_idx)\n# plt.title('p = %0.1f'%p_list[p_idx])\n\n# for s_idx, sigma in enumerate(sigmas_to_plot):\n# C1j = C1j_list[p_idx, sigma_indexes[s_idx], :]\n\n# plt.plot(j_list, C1j, 'o-',label = ('$\\sigma^2$ = %0.2f'%sigma))\n\n# plt.xlabel('$j$')\n# plt.ylabel('$C_1(j)$')\n\n# plt.grid(True)\n# plt.legend()\n\n# # C2j\n# for p_idx, p in enumerate(p_list):\n\n# plt.figure(3 + 2*len(p_list) + p_idx)\n# plt.title('p = %0.1f'%p_list[p_idx])\n\n# for s_idx, sigma in enumerate(sigmas_to_plot):\n# C2j = C2j_list[p_idx, sigma_indexes[s_idx], :]\n\n# plt.plot(j_list, C2j, 'o-',label = ('$\\sigma^2$ = %0.2f'%sigma))\n\n# plt.xlabel('$j$')\n# plt.ylabel('$C_2(j)$')\n\n# plt.grid(True)\n# plt.legend()\n\n\nplt.show()","sub_path":"impact_of_noise.py","file_name":"impact_of_noise.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525462951","text":"#coding=utf-8\n#图片检测 - Dlib版本\nimport cv2\nimport dlib\n\npath = \"./ori_img/Ronald1.jpg\"\nimg = cv2.imread(path)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n#人脸分类器\ndetector = dlib.get_frontal_face_detector()\n# 获取人脸检测器\npredictor = dlib.shape_predictor(\n \"./shape_predictor_68_face_landmarks.dat\")\n\ndets = detector(gray, 1)\nfor face in dets:\n # 在图片中标注人脸,并显示\n left = face.left()\n top = face.top()\n right = face.right()\n bottom = face.bottom()\n cv2.rectangle(img, (left, top), (right, bottom), (255 ,0, 0), 2)\n cv2.imshow(\"image\", img)\n cv2.waitKey(0)\n\n shape = predictor(img, face) # 寻找人脸的68个标定点\n i = 1\n # 遍历所有点,打印出其坐标,并圈出来\n for pt in shape.parts():\n pt_pos = (pt.x, pt.y)\n cv2.circle(img, pt_pos, 1, ( 0,255, 0), 2)\n cv2.putText(img, str(i),pt_pos,cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1, cv2.LINE_AA)\n i+=1\n # cv2.waitKey(0)\n cv2.imshow(\"image\", img)\n\nimgsmall = img[ (top-50):(bottom+50),(left-50):(right+50),:]\ncv2.imshow(\"image_small\", imgsmall)\ncv2.waitKey(0)\ncv2.imwrite(\"./out_img/Ronald1_68points.jpg\",imgsmall)\ncv2.destroyAllWindows()\n","sub_path":"detectionDlib.py","file_name":"detectionDlib.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"420292130","text":"from AdvancedAlgorithms.usecase.graph import Graph\nfrom AdvancedAlgorithms.entity.vertex import Vertex\nfrom AdvancedAlgorithms.entity.edge import Edge\n\nclass DFS:\n\n def __init__(self):\n self.__is_visited = dict()\n self.__queue = list()\n self.__search_list = list()\n def run(self, graph):\n self.__queue.clear()\n self.__search_list.clear()\n vertices = graph.vertices\n self.__initial_visited(vertices)\n self.__queue.append(vertices[0])\n self.__is_visited[vertices[0].ID] = True\n while len(self.__queue) != 0:\n vertex = self.__queue.pop(len(self.__queue) - 1)\n self.__is_visited[vertex.ID] = True\n self.__search_list.append(vertex)\n for edge in vertex.edges:\n if not self.__is_visited[edge.destination.ID]:\n self.__queue.append(edge.destination)\n \n return self.__search_list\n \n def __initial_visited(self, vertices):\n self.__is_visited.clear()\n for vertex in vertices:\n self.__is_visited[vertex.ID] = False","sub_path":"AdvancedAlgorithms/algorithms/dfs/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"9155736","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/sogubaby/.virtualenvs/scrapy_middlewares/lib/python3.7/site-packages/scrapy_cabinet/utils.py\n# Compiled at: 2019-11-14 02:52:46\n# Size of source mod 2**32: 2726 bytes\nimport sys\nfrom importlib import import_module\nfrom typing import AnyStr\nfrom loguru import logger\nlog_config = {'handlers': [\n {'sink':sys.stdout, \n 'format':'{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}', \n 'colorize':True, \n 'level':'DEBUG', \n 'filter':lambda x: str(x['level']) == 'DEBUG'},\n {'sink':sys.stdout, \n 'format':'{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}', \n 'colorize':True, \n 'level':'INFO', \n 'filter':lambda x: str(x['level']) == 'INFO'},\n {'sink':sys.stdout, \n 'format':'{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}', \n 'colorize':True, \n 'level':'WARNING', \n 'filter':lambda x: str(x['level']) == 'WARNING'},\n {'sink':sys.stdout, \n 'format':'{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}', \n 'colorize':True, \n 'level':'CRITICAL', \n 'filter':lambda x: str(x['level']) == 'CRITICAL'},\n {'sink':sys.stdout, \n 'format':'{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}', \n 'colorize':True, \n 'level':'ERROR', \n 'filter':lambda x: str(x['level']) == 'ERROR'}]}\n(logger.configure)(**log_config)\nLOGGER = logger\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be a class, function, variable or an instance.\n path ie: 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n try:\n dot = path.rindex('.')\n if 'constants' in path.lower():\n if 'type' in path.lower():\n dot = path.rindex('.', 0, dot)\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n try:\n obj = getattr(mod, name)\n except AttributeError:\n try:\n dot = name.rindex('.')\n name, type = name[:dot], name[dot + 1:]\n obj = getattr(mod, name)\n obj = obj[type]\n except:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj","sub_path":"pycfiles/scrapy_cabinet-0.0.4.macosx-10.9-x86_64.tar/utils.cpython-37.py","file_name":"utils.cpython-37.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645310290","text":"import subprocess\nimport glob\n\n\nclass TestFailed(BaseException):\n def __new__(self, m):\n self.message = m\n def __str__(self):\n return self.message\n\ndef get_answer(file):\n f = open(file,'r')\n answ = f.read()\n f.close()\n return int(answ)\n\ndef test_input(input, answer):\n line = \"python lazy_crypto.py < %s\" %input\n try:\n p = subprocess.Popen(line, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n ans = int(p.stdout.read()) #if not int it breaks\n except Exception as e:\n raise TestFailed(\"Fail: Execution error\")\n print(\"[\" + input + \"]: \" + (\"PASS\" if answer == ans else \"FAIL\"))\n \nfiles = glob.glob(\"*.in\")\nfiles.sort()\nfor test in files:\n test_input(test, get_answer(test.split('.')[0]+'.ans'))\n","sub_path":"impl_tests/lazy_crypto_test.py","file_name":"lazy_crypto_test.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572033054","text":"# A startup file to register the ArnoldYeti Op, if this is in\n# $KATANA_RESOURCES/Plugins, it will be exec'd automatically. If its\n# in $KATANA_RESOURCES/Startup, it would need to be init.py, or called\n# from and existing init.py script.\n\ndef registerArnoldYetiInOp():\n \"\"\"\n Registers the ArnoldYeti Op using the NodeTypeBuilder.\n This is a helper class that takes care of registering a suitable\n node graph node type, and configuring it to call the supplied method\n in order to build the Op chain that is used to represent the\n node in the Op Graph.\n \"\"\"\n\n from Katana import Nodes3DAPI\n from Katana import FnAttribute, FnGeolibServices\n\n MODE_RIBBON = 0\n MODE_THICK = 1\n MODE_ORIENTED = 2\n modeDict = { 'ribbon' : MODE_RIBBON,\n 'thick' : MODE_THICK,\n 'oriented' : MODE_ORIENTED }\n\n def buildOpChain( node, interface ):\n \"\"\"\n Configures the Ops to represent the current state of the node.\n The calling mechanism in Node3D takes are of ensuring that\n the underlying Op network is only updated if Op Args and types\n have changed.\n\n @type node: C{Nodes3DAPI.NodeTypeBuilder.SubdividedSpace}\n @type interface: C{Nodes3DAPI.NodeTypeBuilder.BuildChainInterface}\n @param node: The node to build the Op and OpArgs from (ie: an instance\n of our node).\n @param interface: The interface that will configure the Ops in the\n underlying Op network.\n \"\"\"\n\n # Ensure the runtime doesn't error for us if there are no inputs on the\n # Node - as we want to allow it to exist in isolation. The default is 1\n interface.setMinRequiredInputs( 0 )\n\n # We first need the current time for parameter value queries\n frameTime = interface.getGraphState().getTime()\n\n # Pass these along through the ops so that the have shutter open/close and number of samples\n systemArgs = interface.getGraphState().getOpSystemArgs()\n\n # Get the parameters from our node\n locationParam = node.getParameter( \"location\" )\n filenameParam = node.getParameter( \"filename\" )\n proxyParam = node.getParameter( \"proxy\" )\n samplesParam = node.getParameter( \"samples\" )\n imageSearchPathParam = node.getParameter( \"imageSearchPath\" )\n disableBoundingboxParam = node.getParameter( \"disableBoundingbox\" )\n lengthParam = node.getParameter( \"length\" )\n densityParam = node.getParameter( \"density\" )\n minPixelWidthParam = node.getParameter( \"min_pixel_width\" )\n modeParam = node.getParameter( \"mode\" )\n widthParam = node.getParameter( \"width\" )\n verboseParam = node.getParameter( \"verbose\" )\n threadsParam = node.getParameter( \"threads\" )\n makeInteractiveParam = node.getParameter(\"makeInteractive\")\n\n if not locationParam or not filenameParam:\n raise RuntimeError( \"Missing node parameters, requires 'location' and 'filename'\" )\n \n # Copy array param values out to appropriate attributes\n samples = []\n for ci in range(0, samplesParam.getNumChildren()):\n samples = samples + [ samplesParam.getChildByIndex(ci).getValue(frameTime) ]\n if samples is not None and len(samples) > 0:\n opSamplesParam = FnAttribute.FloatAttribute(samples)\n imageSearchPath = \"\"\n if imageSearchPathParam.getNumChildren() >= 1:\n imageSearchPath = imageSearchPathParam.getChildByIndex(ci).getValue(frameTime)\n for ci in range(1, imageSearchPathParam.getNumChildren()):\n imageSearchPath = imageSearchPath + \":\" + imageSearchPathParam.getChildByIndex(ci).getValue(frameTime)\n opImageSearchPathParam = FnAttribute.StringAttribute(imageSearchPath)\n\n # Build the Op args from our node\n argsGb = FnAttribute.GroupBuilder()\n argsGb.set( \"filename\", FnAttribute.StringAttribute(filenameParam.getValue(frameTime)) )\n argsGb.set( \"proxy\", FnAttribute.StringAttribute(proxyParam.getValue(frameTime)) )\n if samples is not None and len(samples) > 0:\n argsGb.set( \"samples\", opSamplesParam )\n argsGb.set( \"length\", FnAttribute.FloatAttribute(lengthParam.getValue(frameTime)) )\n argsGb.set( \"density\", FnAttribute.FloatAttribute(densityParam.getValue(frameTime)) )\n argsGb.set( \"min_pixel_width\", FnAttribute.FloatAttribute(minPixelWidthParam.getValue(frameTime)) )\n argsGb.set( \"width\", FnAttribute.FloatAttribute(widthParam.getValue(frameTime)) )\n argsGb.set( \"imageSearchPath\", opImageSearchPathParam )\n argsGb.set( \"verbose\", FnAttribute.IntAttribute(verboseParam.getValue(frameTime)) )\n argsGb.set( \"threads\", FnAttribute.IntAttribute(threadsParam.getValue(frameTime)) )\n argsGb.set( \"frame\", FnAttribute.IntAttribute(int(round(frameTime))) )\n argsGb.set( \"mode\", FnAttribute.IntAttribute(modeParam.getValue(frameTime)) )\n argsGb.set( \"system\", systemArgs )\n argsGb.set( \"disableBoundingbox\", FnAttribute.IntAttribute(disableBoundingboxParam.getValue(frameTime)) )\n argsGb.set( \"makeInteractive\", FnAttribute.StringAttribute(makeInteractiveParam.getValue(frameTime)) )\n\n argsGb.set( \"xform\", interface.getTransformAsAttribute() )\n exclusiveAttrName, exclusiveAttr = interface.getExclusiveToNameAndAttribute()\n if exclusiveAttr is not None:\n argsGb.set( \"exclusiveTo\", exclusiveAttr )\n\n # We want to use the StaticSceneCreate Op to build the parent\n # hierarchy, so that our op only has to worry about generating its\n # children. Its args are somewhat complex, but fortunately, there\n # is a helper class that makes it all much easier.\n\n rootLocation = locationParam.getValue( frameTime )\n\n sscb = FnGeolibServices.OpArgsBuilders.StaticSceneCreate()\n sscb.addSubOpAtLocation( rootLocation, \"ArnoldYeti_In\", argsGb.build() )\n\n interface.appendOp( \"StaticSceneCreate\", sscb.build() )\n\n def getScenegraphLocation(node, frameTime):\n locationParam = node.getParameter( \"location\" )\n return locationParam.getValue(0.0)\n\n # Here we need to define the parameters for the node, and register the op\n # chain creation callback function\n\n nodeBuilder = Nodes3DAPI.NodeTypeBuilder( \"ArnoldYeti_In\" )\n \n # If we wanted to merge with incoming scene, we could simply allow the\n # node to have an input. Unless you delete locations in your Op, any\n # existing locations will pass-through. It is encouraged though to avoid\n # long chains of Ops, as it makes multi-threading and caching less\n # efficient, so for 'Generator' Ops, no input is preferable.\n # nodeBuilder.setInputPortNames( (\"in\",) )\n\n # Parameters can be described by a group attribute\n paramGb = FnAttribute.GroupBuilder()\n paramGb.set( \"location\", FnAttribute.StringAttribute(\"/root/world/geo/yetiProc\") )\n paramGb.set( \"filename\", FnAttribute.StringAttribute(\"\") )\n paramGb.set( \"proxy\", FnAttribute.StringAttribute(\"\") )\n paramGb.set( \"density\", FnAttribute.FloatAttribute( 1.0 ) )\n paramGb.set( \"length\", FnAttribute.FloatAttribute( 1.0 ) )\n paramGb.set( \"imageSearchPath\", FnAttribute.StringAttribute([ ], 1) )\n paramGb.set( \"min_pixel_width\", FnAttribute.FloatAttribute( 0.0 ) )\n paramGb.set( \"width\", FnAttribute.FloatAttribute( 1.0 ) )\n paramGb.set( \"mode\", FnAttribute.IntAttribute( MODE_RIBBON ) )\n paramGb.set( \"samples\", FnAttribute.FloatAttribute([ ], 1) )\n paramGb.set( \"threads\", FnAttribute.IntAttribute( 0 ) )\n paramGb.set( \"verbose\", FnAttribute.IntAttribute( 2 ) )\n paramGb.set( \"disableBoundingbox\", FnAttribute.IntAttribute( 1 ) )\n\n nodeBuilder.addTransformParameters(paramGb)\n nodeBuilder.addMakeInteractiveParameter(paramGb)\n nodeBuilder.addInteractiveTransformCallbacks(paramGb)\n\n nodeBuilder.setParametersTemplateAttr( paramGb.build(), forceArrayNames = ('samples', 'imageSearchPath') )\n\n nodeBuilder.setHintsForNode( { 'help' : 'Create an Arnold procedural node suitable for invoking Peregrine Labs\\' Yeti.' } )\n \n nodeBuilder.setHintsForParameter( \"location\", { 'widget' : 'newScenegraphLocation' } )\n nodeBuilder.setHintsForParameter( \"filename\",\n { 'widget' : 'assetIdInput',\n 'sequenceListing' : False,\n 'fileTypes':'fur',\n 'help' : 'Requred; path to Yeti fur cache file' } )\n nodeBuilder.setHintsForParameter( \"proxy\",\n { 'widget' : 'assetIdInput',\n 'sequenceListing' : False,\n 'fileTypes':'abc',\n 'help' : 'Not Requred; path to Yeti fur alembic proxy file' } )\n nodeBuilder.setHintsForParameter( \"density\",\n { 'help' : 'Density scale for curve population' } )\n nodeBuilder.setHintsForParameter( \"length\",\n { 'help' : 'Length scale for curves' } )\n nodeBuilder.setHintsForParameter( \"imageSearchPath\",\n { 'widget' : 'sortableArray',\n 'help' : 'Optional; colon-separated paths to images for curve operations' } )\n nodeBuilder.setHintsForParameter( \"min_pixel_width\",\n { 'help' : 'Arnold min-pixel-width; typically between 0 and 1, can help reduce aliasing' } )\n nodeBuilder.setHintsForParameter( \"mode\",\n { 'widget' : 'mapper',\n 'options' : modeDict,\n 'help' : 'Rendering mode of the curves; camera-facing ribbons, thick cylinders, or ribbons oriented by normal vectors.' } )\n nodeBuilder.setHintsForParameter( \"width\",\n { 'help' : 'Width/radius scale factor for curves' } )\n nodeBuilder.setHintsForParameter( \"samples\",\n { 'widget' : 'sortableArray',\n 'help' : 'Optional; frame-relative motion sample times (e.g -0.25, 0.25).' } )\n nodeBuilder.setHintsForParameter( \"threads\",\n { 'help' : 'Number of threads for curve generation' } )\n nodeBuilder.setHintsForParameter( \"verbose\",\n { 'help' : 'Log level for Yeti' } )\n nodeBuilder.setHintsForParameter('disableBoundingbox',\n {'widget':'checkBox', \n 'help' : 'disable caculating Yeti fur Boundingbox'})\n\n # Register our Op build function\n nodeBuilder.setBuildOpChainFnc( buildOpChain )\n\n # Make this available for widgets and parameter expressions\n nodeBuilder.setGetScenegraphLocationFnc( getScenegraphLocation )\n\n # Create the new Node3D type\n nodeBuilder.build()\n\n\nregisterArnoldYetiInOp()\n\n","sub_path":"LightChaserAnim/katana_plugins/ArnoldYeti_In/RegisterArnoldYetiInNode.py","file_name":"RegisterArnoldYetiInNode.py","file_ext":"py","file_size_in_byte":11124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"259602959","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('keyword_ideas', '0003_auto_20150627_1927'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='keyword',\n name='mean_external_links',\n field=models.FloatField(null=True, blank=True),\n ),\n migrations.AddField(\n model_name='keyword',\n name='median_external_links',\n field=models.FloatField(null=True, blank=True),\n ),\n ]\n","sub_path":"keyword_ideas/migrations/0004_auto_20150628_0950.py","file_name":"0004_auto_20150628_0950.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376871471","text":"import os \nimport subprocess\nimport httplib2\n\n\ndef mount(container , path , core , parms):\n '''\n container - the swift container which is for s3backer\n path - localhost directory for mounting specified container\n parms - a list includes other options for mounting\n '''\n pp_dict=parms\n pp_list=[]\n for k,v in pp_dict.iteritems():\n if v:\n pp_list.append('='.join([k,v]))\n else:\n pp_list.append(k)\n \n subprocess.call(' '.join([\"taskset\",\"-c\",core,\"s3backer\",' '.join(pp_list),container,path]), shell=True) \n '''\n taskset -c core s3backer \\\n --baseURL= \\\n --accessId= \\ \n --accessKey= \\\n --listBlocks \\\n --blockSize= \\\n --size= \\\n --blockCacheSize= \\\n --blockCacheThreads= \\\n ontaine path\n\n #--debug\n '''\n\n return\n\n\ndef loop_map(pv,loop_num):\n loop_dev=\"/dev/loop\"+str(loop_num)\n raw_file=\"/srv/\"+pv+\"/file\" \n subprocess.call([\"losetup\", loop_dev, raw_file])\n \n return loop_dev\n\n\n\ndef lvm_pv_binding(dev):\n '''pvcreate /dev/a /dev/b'''\n subprocess.call([\"pvcreate\", dev])\n \n\ndef lvm_vg_binding(vg,pvs):\n '''\n vgcreate -s %PE_SIZE %VG_NAME &PV_list\n vg - specified volume group\n pvs - a list of pv\n '''\n PE_SIZE=\"128M\"\n VG_NAME=vg\n subprocess.call([' '.join([\"vgcreate\",\"-s\",PE_SIZE,VG_NAME,' '.join(pvs)])],shell=True)\n return\n\n \n","sub_path":"lib/simulator/s3backer.py","file_name":"s3backer.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"59516137","text":"# -*- coding: utf-8 -*-\nimport time\nimport numpy as np\nfrom .databackend import DataBackend, DataBackendHandler\nfrom .data import SETTINGS\nfrom collections import deque\nfrom copy import deepcopy\nfrom .alarms import AlarmType\n\nclass TimeDataInputManager:\n def __init__(self, arraysize, data_range):\n self.ymin,self.ymax=data_range\n self.arraysize=arraysize\n self.data=np.arange(self.ymin,self.ymax, (self.ymax-self.ymin)/self.arraysize)\n def get_range(self):\n return (self.ymin,self.ymax)\n\nclass DataInputs:\n \n def __init__(self, xmax, freq):\n self.running=True\n \n self.inputs = {}\n self.inputs[DataBackend.MAXPAW]=0\n self.inputs[DataBackend.TIME]=0\n self.inputs[DataBackend.IE]=0\n self.inputs[DataBackend.PEP]=0\n self.inputs[DataBackend.PEP_ALARM]=False\n self.inputs[DataBackend.FR]=0\n self.inputs[DataBackend.PPLAT]=0\n self.inputs[DataBackend.VM]=0\n self.inputs[DataBackend.PCRETE]=0\n self.inputs[DataBackend.PCRETE_ALARM]=False\n self.inputs[DataBackend.VTE]=0\n self.inputs[DataBackend.VTE_ALARM]=False\n\n self.changed=False\n \n self.index=0\n self.index_zero_time = 0\n self.freeze=False\n self.unfreeze=False\n self.xmax=xmax\n self.freq=freq\n self.arraysize=xmax*freq\n\n self.pressure=TimeDataInputManager(self.arraysize, (-20,100))\n self.flow=TimeDataInputManager(self.arraysize, (-80,80))\n self.volume=TimeDataInputManager(self.arraysize, (0,600))\n\n \n def settings_changed(self,reset=True):\n val = self.changed\n self.changed=False\n return val\n\n def get_index(self):\n return self.index\n\n def make_index(self,timestamp):\n if(timestamp-self.index_zero_time >= self.xmax or self.unfreeze):\n self.index=0\n self.index_zero_time=timestamp\n self.unfreeze=False\n else:\n diff=timestamp-self.index_zero_time\n self.index=int(diff*self.freq)\n def timedata_freeze(self, freeze=True):\n self.freeze=freeze\n self.unfreeze=True\n\nclass SettingManager():\n\n def __init__(self, controller, setting):\n self.vmin = setting.vmin\n self.vmax = setting.vmax\n self.step = setting.step\n self.bigStep = setting.bigStep\n self.default = setting.default\n self.key = setting.key\n self.value = setting.default\n self.controller = controller\n self.synchronized = False\n self.widget=None\n\n def change(self, value):\n # NB: go through the controller to ensure correct data management\n self.controller.change_setting(self.key, value)\n\n def sync(self):\n if(self.widget is not None):\n self.widget.refresh()\n\nclass DataController:\n\n class Handler(DataBackendHandler):\n def __init__(self, parent):\n self.parent=parent\n\n def update_inputs(self, **kwargs):\n self.parent.lastControllerDataTime = time.time()\n if kwargs is not None:\n for key, value in kwargs.items():\n if(key in self.parent.inputs.inputs):\n oldval=self.parent.inputs.inputs[key]\n if oldval != value:\n self.parent.inputs.changed=True\n self.parent.inputs.inputs[key]=value\n # Assume that this function is only called when RESP is received/treated\n # (Only wrong with DatabackendDummy now)\n if len(self.parent.historyDataQueue) == 8:\n self.parent.historyDataQueue.popleft()\n self.parent.historyDataQueue.append(deepcopy(self.parent.inputs))\n self.parent.inputs.inputs[DataBackend.MAXPAW] = 0\n self.parent.checkHistoryForAlarms()\n \n def update_timedata(self,timestamp, pressure, flow, volume):\n self.parent.lastControllerDataTime = time.time()\n if pressure > self.parent.inputs.inputs[DataBackend.MAXPAW]:\n self.parent.inputs.inputs[DataBackend.MAXPAW] = pressure\n if not self.parent.inputs.freeze:\n prevIndex = self.parent.inputs.index\n prevPressure = self.parent.inputs.pressure.data[self.parent.inputs.index]\n prevFlow = self.parent.inputs.flow.data[self.parent.inputs.index]\n prevVolume = self.parent.inputs.volume.data[self.parent.inputs.index]\n\n self.parent.inputs.inputs[DataBackend.TIME] = timestamp\n self.parent.inputs.make_index(timestamp)\n\n index = self.parent.inputs.index\n dataLen = len(self.parent.inputs.pressure.data)\n if self.parent.inputs.index < prevIndex:\n index += len(self.parent.inputs.pressure.data)\n # linear interpolation between prev and current index\n for idx in range(prevIndex + 1, index + 1):\n coeff = (idx - prevIndex) / (self.parent.inputs.index - prevIndex)\n self.parent.inputs.pressure.data[idx % dataLen] = coeff * pressure + (1 - coeff) * prevPressure\n self.parent.inputs.flow.data[idx % dataLen] = coeff * flow + (1 - coeff) * prevFlow\n self.parent.inputs.volume.data[idx % dataLen] = coeff * volume + (1 - coeff) * prevVolume\n\n def received_setting(self, key, value):\n self.parent.lastControllerDataTime = time.time()\n if key in self.parent.settings:\n if (key == DataBackend.IE):\n value /= 10\n self.parent.settings[key].value = value\n self.parent.settings[key].synchronized = True\n self.parent.settings[key].sync()\n\n def received_alarm(self, alarmTab):\n if( isinstance(alarmTab, list)):\n for i in alarmTab:\n self.parent.activeAlarms[i] = alarmTab[i]\n\n def alarmPerteCtrl(self, isActive):\n self.parent.activeAlarms[AlarmType.LOST_CPU] = isActive\n\n \n def __init__(self, backend, mainLoop):\n self.backend=backend\n self.mainLoop = mainLoop\n self.inputs=None\n self.repost_stop_exp = False\n self.repost_stop_ins = False\n self.repost_stop_exp_posted = False\n self.repost_stop_ins_posted = False\n self.historyDataQueue = deque()\n #calculate alarms are just to display the background of Mesure in red\n # 8 is to use the enum alarmtype\n self.calculateAlarms = [False] * 8\n self.activeAlarms = [False] * 16\n self.lastControllerDataTime = 0\n self.controllerAlarm = None\n\n self.reset_settings()\n\n def reset_settings(self):\n self.settings = {setting.key: SettingManager(self, setting) for setting in SETTINGS.values()}\n\n def init_inputs(self, xmax, freq):\n self.inputs=DataInputs(xmax,freq)\n self.handler = DataController.Handler(self)\n self.backend.set_handler(self.handler)\n \n def post_stop_exp(self, time_ms):\n # repost 100 msec before timeout end to avoid breath restart\n if self.repost_stop_exp:\n self.backend.stop_exp(500)\n self.mainLoop.after(time_ms - 100, self.post_stop_exp, time_ms)\n else:\n self.repost_stop_exp_posted = False\n\n def stop_exp(self, on):\n if (on):\n self.repost_stop_exp = True\n self.backend.stop_exp(500)\n if self.repost_stop_exp_posted == False: # if we don't already have a running timer\n self.repost_stop_exp_posted = True\n self.mainLoop.after(400, self.post_stop_exp, 500)\n else:\n self.backend.stop_exp(0)\n self.repost_stop_exp = False\n\n def post_stop_ins(self, time_ms):\n # repost 100 msec before timeout end to avoid breath restart\n if self.repost_stop_ins:\n self.backend.stop_ins(500)\n self.mainLoop.after(time_ms - 100, self.post_stop_ins, time_ms)\n else:\n self.repost_stop_ins_posted = False\n\n def stop_ins(self, on):\n if (on):\n self.repost_stop_ins = True\n self.backend.stop_ins(500)\n if self.repost_stop_ins_posted == False: # if we don't already have a running timer\n self.repost_stop_ins_posted = True\n self.mainLoop.after(400, self.post_stop_ins, 500)\n else:\n self.backend.stop_ins(0)\n self.repost_stop_ins = False\n\n def pause_bip(self):\n self.backend.pause_bip(120 * 1000)\n\n def change_setting(self, key, value):\n setting = self.settings[key]\n if setting.vmin <= value <= setting.vmax:\n setting.value = value\n setting.synchronized = False\n if (key == DataBackend.IE):\n value = int(value * 10)\n self.backend.set_setting(key, value)\n\n def get_setting(self, key):\n \"\"\"\n Returns the local value of a setting and whether it is synchronized\n with the controller as a pair (value, synchronized). Values might be\n desynchronized during connection or when a new change is not acked yet.\n \"\"\"\n return (self.settings[key].value, self.settings[key].value == self.backend.setings[key])\n\n def checkHistoryForAlarms(self):\n ## check the 8 last cycles data, from last one to older one\n ## switch to 0 when no need to check in older cycles\n Pmax_cycles = 2\n Pmin_startFailing = -1\n VTmin_cycles = 3\n VTmax_cycles = 3\n VMmin_cycles = 3\n PEPmax_cycles = 3\n PEPmin_cycles = 3\n for inp in reversed(self.historyDataQueue):\n if Pmax_cycles != 0:\n if inp.inputs[DataBackend.MAXPAW] >= max(self.settings[DataBackend.PMAX].value, inp.inputs[DataBackend.PEP] + 10):\n Pmax_cycles -= 1\n if Pmax_cycles == 0:\n self.calculateAlarms[AlarmType.PRESSION_MAX] = True\n else:\n Pmax_cycles = 0\n self.calculateAlarms[AlarmType.PRESSION_MAX] = False\n if Pmin_startFailing != 0:\n if inp.inputs[DataBackend.PCRETE] <= max(self.settings[DataBackend.PMIN].value, inp.inputs[DataBackend.PEP] + 2):\n if Pmin_startFailing == -1:\n Pmin_startFailing = inp.inputs[DataBackend.TIME]\n else:\n if Pmin_startFailing - inp.inputs[DataBackend.TIME] > 15:\n self.activeAlarms[AlarmType.PRESSION_MIN] = True\n else:\n Pmin_startFailing = 0\n self.calculateAlarms[AlarmType.PRESSION_MIN] = False\n if VTmin_cycles != 0:\n if inp.inputs[DataBackend.VTE] <= self.settings[DataBackend.VTMIN].value:\n VTmin_cycles -= 1\n if VTmin_cycles == 0:\n self.calculateAlarms[AlarmType.VOLUME_COURANT_MIN] = True\n else:\n VTmin_cycles = 0\n self.calculateAlarms[AlarmType.VOLUME_COURANT_MIN] = False\n if VTmax_cycles != 0:\n if inp.inputs[DataBackend.VTE] >= self.settings[DataBackend.VTMAX].value:\n VTmax_cycles -= 1\n if VTmax_cycles == 0:\n self.calculateAlarms[AlarmType.VOLUME_COURANT_MAX] = True\n else:\n VTmax_cycles = 0\n self.calculateAlarms[AlarmType.VOLUME_COURANT_MAX] = False\n if VMmin_cycles != 0:\n if inp.inputs[DataBackend.VM] <= self.settings[DataBackend.VMMIN].value:\n VMmin_cycles -= 1\n if VMmin_cycles == 0:\n self.calculateAlarms[AlarmType.VOLUME_MINUTE] = True\n else:\n VMmin_cycles = 0\n self.calculateAlarms[AlarmType.VOLUME_MINUTE] = False\n if PEPmax_cycles != 0:\n if inp.inputs[DataBackend.PEP] >= self.settings[DataBackend.PEP].value + 2:\n PEPmax_cycles -= 1\n if PEPmax_cycles == 0:\n self.calculateAlarms[AlarmType.PEP_MAX] = True\n else:\n PEPmax_cycles = 0\n self.calculateAlarms[AlarmType.PEP_MAX] = False\n if PEPmin_cycles != 0:\n if inp.inputs[DataBackend.PEP] <= self.settings[DataBackend.PEP].value - 2:\n PEPmin_cycles -= 1\n if PEPmin_cycles == 0:\n self.calculateAlarms[AlarmType.PEP_MIN] = True\n else:\n PEPmin_cycles = 0\n self.calculateAlarms[AlarmType.PEP_MIN] = False\n\n def GetAlarmState(self, alarmtype):\n return self.activeAlarms[alarmtype]\n","sub_path":"monitor/datacontroller.py","file_name":"datacontroller.py","file_ext":"py","file_size_in_byte":12994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"474023485","text":"\ndef _uppercase_formatter(annotation):\n return '[{}]'.format(annotation[\"tag\"].upper())\n\n\ndef mask_annotations(doc_text, annotations, replacement_formatter = _uppercase_formatter):\n \"\"\"Utility function to replace sensitive PHI spans with a placeholder.\"\"\"\n # Amount of characters by which start point of annotation is adjusted\n # Positive shift if replacement is longer than original annotation\n # Negative shift if replacement is shorter\n shift = 0\n\n original_text_pointer = 0\n text_rewritten = ''\n annotations_rewritten = []\n\n for annotation in annotations:\n replacement = replacement_formatter(annotation)\n part = doc_text[original_text_pointer:annotation[\"start\"]]\n\n start = annotation[\"start\"] + shift\n end = start + len(replacement)\n shift += len(replacement) - len(annotation[\"text\"])\n\n text_rewritten += part + replacement\n original_text_pointer = annotation[\"end\"]\n\n text_rewritten += doc_text[original_text_pointer:]\n return text_rewritten\n","sub_path":"deidentify/util/replace_phi.py","file_name":"replace_phi.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230280117","text":"\"\"\"Prepare images for training\"\"\"\nimport argparse\nimport sys\nimport logging\nimport os\nfrom os.path import join, isdir\n\nimport cv2\nimport numpy as np\nimport pylab as pl\nfrom keras.preprocessing.image import Iterator\nfrom sklearn.model_selection import train_test_split\nfrom scipy.stats import truncnorm\n\nlog = logging.getLogger(__name__)\n\n\ndef make_iterators(data_path, image_size, batch_size,\n test_size=0.25, transform=True):\n \"\"\"Create training and validation image iterators\n\n Collects all samples, splits them into training and validation sets,\n and creates an iterator for each.\n\n Assumes file structure under data_path as follows:\n\n data_path//png/.png\n\n where classname and samplename can be anything.\n This trains a binary classifier, so currently only supports two classes.\n\n Parameters\n ----------\n data_path : str\n path to dataset (structured as assessment dataset)\n image_size : tuple\n image size to resize to for training\n batch_size : int\n number of images in each training batch\n test_size : float\n proportion of the data to set aside for testing\n transform : bool\n whether to randomly scale and rotate the images\n\n Returns\n -------\n train_iter : ImageIterator\n trainingset iterator\n val_iter : ImageIterator\n validationset iterator\n class_map : dict\n maps 0/1 model outputs to class names\n \"\"\"\n # find the class names\n clses = [fn for fn in os.listdir(data_path) if isdir(join(data_path, fn))]\n assert len(clses) == 2, \"Only supports 2 classes, got %d\" % len(clses)\n\n # find all the files\n filenames = []\n labels = []\n for i, class_name in enumerate(clses):\n img_path = join(data_path, class_name, 'png')\n fns = [join(img_path, fn) for fn in os.listdir(img_path)\n if fn.endswith('.png') or fn.endswith('.jpg')]\n filenames.extend(fns)\n labels.extend([i]*len(fns))\n\n # split data and make iterators\n train_fns, val_fns, train_labels, val_labels = train_test_split(\n filenames, labels, test_size=test_size, stratify=labels,\n )\n train_iter = ImageIterator(\n train_fns, train_labels, image_size, batch_size, transform=transform\n )\n val_iter = ImageIterator(\n val_fns, val_labels, image_size, batch_size, transform=transform\n )\n\n # store the class names for during prediction\n class_map = {i: name for i, name in enumerate(clses)}\n return train_iter, val_iter, class_map\n\n\ndef sample_trunc_normal(mean, std, minimum, maximum):\n \"\"\"Generates truncated normal samples\n\n Parameters\n ----------\n mean : float\n mean of the normal distribution\n std : float\n standard deviation of the normal distribution\n minimum : float\n truncate below this value\n maximum : float\n truncate above this value\n\n Returns\n -------\n value : float\n sample from specified distribution\n \"\"\"\n x = (minimum - mean) / (std + 1e-10)\n y = (maximum - mean) / (std + 1e-10)\n return truncnorm.rvs(x, y, loc=mean, scale=std)\n\n\nclass ImageIterator(Iterator):\n \"\"\"Iterator that generates batches of images and labels for training\"\"\"\n\n def __init__(self, filenames, labels, image_size=(256, 256),\n batch_size=32, shuffle=True, seed=None, transform=True):\n \"\"\"Initialize image iterator\n\n Parameters\n ----------\n filenames : list\n paths to image filenames\n labels : list\n list of class labels for each image\n image_size : tuple\n resize images to this size (width, height)\n batch_size : int\n number of images to return per iteration\n shuffle : bool\n whether to shuffle images\n seed : int\n random seed to use\n transform : bool\n whether to apply random rotation and scaling to each image\n \"\"\"\n self.filenames = filenames\n self.labels = labels\n self.image_size = tuple(image_size)\n self.num_samples = len(self.filenames)\n self.transform = transform\n log.info('Found %d images.', self.num_samples)\n super(ImageIterator, self).__init__(\n self.num_samples, batch_size, shuffle, seed\n )\n\n def _get_batches_of_transformed_samples(self, index_array):\n \"\"\"Load next batch of images\n\n Parameters\n ----------\n index_array : list\n dataset indices to include in batch\n\n Returns\n -------\n imgs : ndarray\n images for current batch in single array\n labels : ndarray\n ground truth for current batch in single array\n \"\"\"\n # prepare arrays to hold batch data\n imgs = np.zeros(\n (len(index_array),) + self.image_size + (3,), np.float32\n )\n labels = np.zeros((len(index_array), 1), np.float32)\n\n # build batch of image/label data\n for i, j in enumerate(index_array):\n imgs[i] = self.prepare_image(self.filenames[j])\n labels[i] = self.labels[j]\n return imgs, labels\n\n def prepare_image(self, filename):\n \"\"\"Prepare image for training\n\n Resize image and randomly scale and rotate\n\n Parameters\n ----------\n filename : str\n image to prepare\n\n Returns\n -------\n image : nparray\n prepared image\n \"\"\"\n image = cv2.imread(filename, cv2.IMREAD_COLOR)\n image = image.astype(np.float32) / 255.\n\n w, h = self.image_size\n scale = h / np.min(image.shape[:2])\n\n if self.transform:\n # sample transform parameters\n angle = sample_trunc_normal(0, 15, -45, 45)\n scale *= sample_trunc_normal(1, 0.1, .8, 1.2)\n x = sample_trunc_normal(0.5, 0.15, 0.3, 0.7)\n y = sample_trunc_normal(0.5, 0.15, 0.3, 0.7)\n else:\n # default no-op params\n angle = 0\n y = .5\n x = .5\n\n y, x = h*y, w*x\n\n log.debug(\n \"Sampled vars: angle %2.2f, scale %2.2f, center %2.3f, %2.3f\",\n angle, scale, x, y\n )\n\n # determine rotation matrices\n rot = cv2.getRotationMatrix2D((x, y), angle, scale)\n # apply shift to rotation center\n rot[0, 2] += (w/2)*scale - x\n rot[1, 2] += (h/2)*scale - y\n # warp image\n image = cv2.warpAffine(\n image, rot, self.image_size, None,\n cv2.INTER_LINEAR, cv2.BORDER_REFLECT\n )\n return image\n\n def next(self):\n \"\"\"Returns the next batch.\n \"\"\"\n with self.lock:\n index_array = next(self.index_generator)\n return self._get_batches_of_transformed_samples(index_array)\n\n\nif __name__ == '__main__':\n # pylint: disable=invalid-name\n parser = argparse.ArgumentParser(\n description='Display ImageIterator samples')\n parser.add_argument(\n '-d', '--debug', action='store_true',\n help='enable debug logging',\n )\n parser.add_argument(\n '-s', '--image_size', type=int, nargs=2, default=(256, 256),\n help='training image size (w*h)',\n )\n parser.add_argument(\n '-t', '--transform', action='store_true',\n help='randomly rotate and scale input',\n )\n parser.add_argument(\n 'data_path', type=str,\n help='data directory from the assessment, with bowl and vase images',\n )\n\n args = parser.parse_args()\n loglvl = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(\n stream=sys.stdout,\n level=loglvl,\n format='[%(asctime)s: %(levelname)s] %(message)s'\n )\n\n # display some data samples\n log.info(\"Make iterators\")\n train_iter, _, class_map = make_iterators(\n args.data_path, args.image_size, 8,\n test_size=2, transform=args.transform\n )\n for imgs, labels in train_iter:\n pl.figure()\n for i in range(8):\n pl.subplot(2, 4, i+1)\n pl.imshow(cv2.cvtColor(imgs[i], cv2.COLOR_BGR2RGB))\n pl.title(\"class: %s\" % class_map[int(labels[i])])\n pl.axis('off')\n pl.tight_layout()\n pl.show()\n pl.close('all')\n","sub_path":"pots/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530185178","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nAdjust seed line\n\n@author: dcortex\n\"\"\"\nfrom os.path import join, basename\nfrom os import chdir\nimport numpy as np\nimport nibabel as nib\nfrom sklearn.neighbors import NearestNeighbors\nimport networkx as nx\nfrom scipy.interpolate import splprep, splev\n\ndef sort_points(seeds):\n \"\"\"\n Returns sorted 'seeds' (np.ndarray) by their nearest neighbors\n \"\"\"\n if len(seeds)==0: return np.array([])\n clf = NearestNeighbors(n_neighbors=2).fit(seeds)\n G = clf.kneighbors_graph() # sparse N x N matrix\n \n T = nx.from_scipy_sparse_matrix(G)\n \n paths = [list(nx.dfs_preorder_nodes(T, i)) for i in range(len(seeds))]\n\n mindist = np.inf\n minidx = 0\n \n for i in range(len(seeds)):\n p = paths[i] # order of nodes\n ordered = seeds[p] # ordered nodes\n # find cost of that order by the sum of euclidean distances between points (i) and (i+1)\n cost = (((ordered[:-1] - ordered[1:])**2).sum(1)).sum()\n if cost < mindist:\n mindist = cost\n minidx = i\n seeds = seeds[paths[minidx]]\n # Medial a lateral\n if seeds[0][1] < seeds[-1][1]: seeds = seeds[::-1]\n return seeds\n\ndef smooth_curve(x, y, s=0.5):\n tck, u = splprep([x, y], s=s)\n smooth_points = np.array(splev(u, tck)).T\n return smooth_points\n\ndef get_seeds_from_nii(f_name, subject, side='l', smooth=False, save=True, s=0.1, \n save_folder='~/Descargas',):\n lines_volume = nib.load(f_name).get_fdata()\n \n seeds_dict = {}\n for slice_n in range(10,16):\n \n sx, sy = np.array(np.nonzero(lines_volume[:,:,slice_n]))\n seeds = np.array([sx,sy]).T\n \n if len(seeds)==0: continue\n \n seeds = sort_points(seeds)\n if smooth: seeds = smooth_curve(*seeds.T, s=s)\n \n # Add z coordinate\n ones = np.ones([len(seeds), 1])\n seeds = np.concatenate((seeds, slice_n*ones), axis=1)[1:,:] \n # remove first entry because of reasons ^\n \n if save:\n seeds_name = f'{subject}_{side}_{slice_n}_seeds'\n if smooth: seeds_name += '_smooth'\n \n np.savetxt(join(save_folder, basename(seeds_name)+'.txt'), seeds)\n print(f'\\n Created file: {basename(seeds_name)}.txt in: {save_folder}') \n\n seeds_dict[seeds_name] = seeds\n \n return seeds_dict\n \n#_____________________________________________________________________________ \n\n\nif __name__ == \"__main__\":\n import sys\n import subprocess\n from os.path import dirname\n \n #subject = sys.argv[1] # subject = '37A\n #side = 'l'\n \n #f_name = f'minc/{subject}_{side}_outline.nii' \n f_name = sys.argv[1]\n out_dir = sys.argv[2]\n prefix = sys.argv[3]\n try:\n n_seeds = sys.argv[4]\n except IndexError:\n n_seeds = 150\n print(f'\\n Using {n_seeds} seeds')\n\n #subject = basename(f_name).split('_')[0]\n side = basename(f_name).split('_')[1]\n seeds = get_seeds_from_nii(f_name, subject=prefix, side=side, smooth=True, save=True,\n s=10, save_folder=out_dir, )\n\n #out_dir = dirname(f_name)\n for seeds_name in list(seeds.keys()):\n \n convert1 = (f\"tckconvert {join(out_dir, seeds_name)}.txt\"\n f\" {join(out_dir, seeds_name)}.tck \") \n # -voxel2scanner ../{subject}_x2.nii \n resample = (f\"tckresample -num_points {n_seeds} -nthreads 0\"\n f\" {join(out_dir, seeds_name)}.tck\"\n f\" {join(out_dir, seeds_name)}_resampled.tck\")\n convert2 = (f\"tckconvert {join(out_dir, seeds_name)}_resampled.tck\"\n f\" {join(out_dir, seeds_name)}_resampled_[].txt\")\n rename = (f\"mv {join(out_dir, seeds_name)}_resampled_0000000.txt\"\n f\" {join(out_dir, seeds_name)}_resampled.txt\")\n for my_command in [convert1, resample, convert2, rename]:\n process = subprocess.Popen(my_command.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n \n print((f\"\\n Created file: {join(out_dir, seeds_name)}_resampled\"\n f\"(txt & tck) in: {out_dir}\\n\"))\n\n#!tckconvert {seeds_name}.txt {seeds_name}.tck -voxel2scanner ../{subject}_x2.nii\n#!tckresample -num_points 150 -nthreads 0 {seeds_name}.tck {seeds_name}_resampled.tck\n#!tckconvert {seeds_name}_resampled.tck {seeds_name}_resampled_[].txt\n \n sys.exit()\n\n\n\n\n\n\n\n ","sub_path":"get_seeds.py","file_name":"get_seeds.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530320097","text":"import os\nimport argparse\nimport torch\nimport torch.optim as optim\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom time import time_ns\n\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nfrom densenet import densenet121\nfrom dataset import load_dataset\nfrom state import save_checkpoint, load_checkpoint\n\n\ndef train(model, optimizer, train_loader):\n model.train()\n for (x, y) in train_loader:\n x = x.cuda(non_blocking=True)\n y = y.cuda(non_blocking=True)\n\n optimizer.zero_grad()\n\n out = model(x)\n out = F.log_softmax(out, dim=1)\n loss = F.nll_loss(out, y)\n\n loss.backward()\n optimizer.step()\n\n\ndef test(model, val_loader):\n model.eval()\n correct = 0.\n with tqdm(total=len(val_loader.dataset)) as progress_bar:\n with torch.no_grad():\n for (x, y) in val_loader:\n x = x.cuda(non_blocking=True)\n y = y.cuda(non_blocking=True)\n\n out = model(x)\n pred = F.log_softmax(out, dim=1).max(1)[1]\n\n correct += pred.eq(y).cpu().sum().item()\n progress_bar.update(x.size(0))\n accuracy = (100. * correct) / len(val_loader.dataset)\n return accuracy\n\n\ndef worker(device_id, args):\n rank_id = args.nr * args.gpus + device_id\n dist.init_process_group(\n backend='nccl',\n init_method='env://',\n world_size=args.world_size,\n rank=rank_id\n )\n torch.cuda.set_device(device_id)\n\n\n train_set, val_set = load_dataset(args.dataset, args.dataroot)\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_set,\n num_replicas=args.world_size,\n rank=rank_id,\n shuffle=True\n )\n train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=args.batch_size,\n shuffle=False, num_workers=3, pin_memory=True, sampler=train_sampler)\n val_loader = torch.utils.data.DataLoader(dataset=val_set, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True)\n\n model = densenet121(pretrained=True, num_classes=args.num_classes, memory_efficient=True).cuda(device_id)\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, nesterov=True)\n model = nn.parallel.DistributedDataParallel(model, device_ids=[device_id])\n state = load_checkpoint(args.cp_file, device_id, model, optimizer)\n\n cudnn.benchmark = True\n\n start_epoch = state.epoch + 1\n for epoch in range(start_epoch, args.max_epochs):\n t0 = time_ns()\n\n train(model, optimizer, train_loader)\n\n t1 = time_ns()\n delta = (t1 - t0) / (10 ** 9)\n print(f\"Device {device_id} - Train time: {delta} sec\")\n\n if device_id == 0:\n accuracy = test(model, val_loader)\n print(f\"Accuracy: {accuracy}%\")\n\n if epoch in [int(args.max_epochs * 0.5), int(args.max_epochs * 0.75)]:\n optimizer.param_groups[0]['lr'] /= 10.\n\n if epoch % args.save_interval == 0 and device_id == 0:\n save_checkpoint(state, args.cp_file)\n\n state.epoch = epoch\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('DDP training')\n parser.add_argument('--dataset',\n help='dataset',\n type=str,\n default='CIFAR10')\n parser.add_argument('--dataroot',\n help='dataroot',\n type=str,\n default='./data')\n parser.add_argument('--batch_size',\n help='total batch size',\n type=int,\n default=64)\n parser.add_argument('--max_epochs',\n help='maximum number of training epoches.',\n type=int,\n default=200)\n parser.add_argument('--save_interval',\n help='save interval in epochs',\n type=int,\n default=10)\n parser.add_argument('--lr',\n help='lr.',\n type=float,\n default=0.1)\n parser.add_argument('--num_classes',\n help='number of classes.',\n type=int,\n default=10)\n parser.add_argument('--cp_file',\n help='checkpoint file',\n type=str,\n default='./checkpoints/CIFAR10.pt')\n\n parser.add_argument('-n', '--nodes',\n default=1,\n type=int,\n metavar='N')\n parser.add_argument('-g', '--gpus',\n default=2,\n type=int,\n help='number of gpus per node')\n parser.add_argument('-nr', '--nr',\n default=0,\n type=int,\n help='ranking within the nodes')\n\n args = parser.parse_args()\n args.world_size = args.gpus * args.nodes\n args.batch_size = args.batch_size // args.gpus\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '8080'\n mp.spawn(worker, nprocs=args.gpus, args=(args,))\n","sub_path":"train_ddp.py","file_name":"train_ddp.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"101298109","text":"##############################################################################\n#\n# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.\n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsibility of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# guarantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 3\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\n\nfrom __future__ import print_function\n\nimport sys\nimport pkg_resources\nfrom logging import Formatter\nfrom slapos.recipe.librecipe import BaseSlapRecipe\n\nclass NoSQLTestBed(BaseSlapRecipe):\n\n def _install(self):\n self.parameter_dict = self.computer_partition.getInstanceParameterDict()\n try:\n entry_point = pkg_resources.iter_entry_points(group='slapos.recipe.nosqltestbed.plugin',\n name=self.parameter_dict.get('plugin', 'kumo')).next()\n plugin_class = entry_point.load()\n\n testbed = plugin_class()\n except:\n print(Formatter().formatException(sys.exc_info()))\n return None\n\n software_type = self.parameter_dict.get('slap_software_type', 'default')\n if software_type is None or software_type == 'RootSoftwareInstance':\n software_type = 'default'\n if \"run_%s\" % software_type in dir(testbed) and \\\n callable(getattr(testbed, \"run_%s\" % software_type)):\n return getattr(testbed, \"run_%s\" % software_type)(self)\n else:\n raise NotImplementedError(\"Do not support %s\" % software_type)\n\n","sub_path":"slapos/recipe/nosqltestbed/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"247842195","text":"from datetime import datetime\nimport json\nfrom nose.tools import *\nimport base64\nimport os\n\nfrom tests import test_app, check_content_type, setup_func, api_prefix, check_error_code, testfiles_path\n\nendpoint = '%suploads/entityphotos/' % api_prefix\n\n\n\n@with_setup(setup_func)\ndef test_entityphoto_endpoints():\n \"\"\"\n Tests CRUD functionality of the entityphoto endpoints.\n \"\"\"\n # Create test user and login JWT\n d = dict(nickname=\"Jule\", email=\"juliane@wurm.de\", password=\"secret123!\", installAdmin=True)\n rv = test_app.post('%susers/' % api_prefix, data=d)\n check_content_type(rv.headers)\n eq_(rv.status_code, 201)\n resp = json.loads(rv.data)['data']\n eq_(resp['nickname'], \"Jule\")\n eq_(resp['email'], \"juliane@wurm.de\")\n ok_(datetime.now() >= datetime.strptime(resp['time_created'][:-6], '%a, %d %b %Y %H:%M:%S'))\n encoded_credentials = base64.b64encode('juliane@wurm.de:secret123!')\n h = [('Authorization', 'Basic %s' % encoded_credentials)]\n rv = test_app.get('%slogin/' % api_prefix, headers=h)\n eq_(rv.status_code, 200)\n login_jwt = json.loads(rv.data)['data']['loginJWT']\n h = [('loginJWT', login_jwt)]\n\n # Create test entity\n d = dict(name=\"Nahetal\", confirmed=True)\n rv = test_app.post('%sareas/' % api_prefix, data=d, headers=h)\n check_content_type(rv.headers)\n eq_(rv.status_code, 201)\n resp = json.loads(rv.data)['data']\n eq_(resp['name'], \"Nahetal\")\n ok_(datetime.now() >= datetime.strptime(resp['time_created'][:-6], '%a, %d %b %Y %H:%M:%S'))\n entity_id = resp['id']\n\n # GET (empty) collection\n # rv = test_app.get(endpoint)\n # check_content_type(rv.headers)\n # resp = json.loads(rv.data)['data']\n # eq_(rv.status_code, 200)\n # eq_(len(resp), 0)\n\n # POST with missing parameter\n # d = dict()\n # rv = test_app.post(endpoint, data=d, headers=h)\n # check_content_type(rv.headers)\n # eq_(rv.status_code, 400)\n\n # POST\n d = dict(\n text='Schones Nahetal ist schon!',\n entity_id=entity_id,\n entityphoto=open(os.path.join(testfiles_path,'w3000h2000.jpg'), 'r+b'))\n rv = test_app.post(endpoint, data=d, headers=h)\n check_content_type(rv.headers)\n eq_(rv.status_code, 201)\n resp = json.loads(rv.data)['data']\n eq_(resp['text'], \"Schones Nahetal ist schon!\")\n ok_(datetime.now() >= datetime.strptime(resp['time_created'][:-6], '%a, %d %b %Y %H:%M:%S'))","sub_path":"server/tests/testentityphotosets.py","file_name":"testentityphotosets.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"53484783","text":"import xml.etree.ElementTree as ElementTree\nimport os\nfrom diesel.templates import template_call, load_templates_from_lisp\n\nfrom collections import defaultdict as ddict\n\n\nclass Morph:\n def __init__(self, word, cat, to):\n self.word = word\n self.cat = cat\n self.to = to\n\n def __str__(self):\n return \"{}({})->{}\".format(self.cat, self.word, self.to)\n\n def __repr__(self):\n return str(self)\n\n def __hash__(self):\n return hash(str(self))\n\n\nclass LexClass:\n def __init__(self, onttype, words, templates=None):\n self.onttype = onttype\n self.words = set(words)\n if templates is None:\n self.templates = []\n else:\n self.templates = templates[:]\n\n def add_template(self, template):\n self.templates.append(template)\n\n # dangerous assumption: all classes with the same onttype and words also have the same template\n\n def __str__(self):\n return \"{}[{}]\".format(self.onttype, sorted(list(self.words))[0])\n\n def __repr__(self):\n return \"{}[{}]\".format(self.onttype, \",\".join(sorted(list(self.words))))\n\n def __hash__(self):\n return hash(repr(self))\n\n def __eq__(self, other):\n return repr(self) == repr(other)\n\n\nclass LexWord:\n def __init__(self, word, pos, morphs, lexclasses):\n self.word = word\n self.pos = pos\n self.morphs = morphs\n self.lexclasses = lexclasses\n\n def __str__(self):\n return self.word + \".\" + self.pos\n\n def __repr__(self):\n return str(self)\n\n def __hash__(self):\n return hash(str(self))\n\n def __eq__(self, other):\n return str(self) == str(other)\n\n def has_cat(self, cat):\n return any([m.cat == cat for m in self.morphs])\n\n\nclass Lexicon:\n def __init__(self, lexwords, templates, lexclasses, morphs):\n self.lexwords = lexwords\n self.templates = templates\n self.lexclasses = list(lexclasses)[:]\n for l in self.lexclasses:\n if l.templates:\n l.templates = [self.get_template(t) for t in l.templates]\n self.morphs = list(morphs)[:]\n\n self.de_morph = ddict(list)\n for m in morphs:\n self.de_morph[m.to].append(m)\n\n self.lexindex = {}\n for l in self.lexwords.values():\n if l.word in self.lexindex:\n self.lexindex[l.word].append(l)\n else:\n self.lexindex[l.word] = []\n self.lexindex[l.word].append(l)\n\n def morph(self, word, cat=None, detailed=False):\n candidates = self.de_morph.get(word, [])\n if cat is not None:\n candidates = [c for c in candidates if c.cat == cat]\n if detailed:\n return candidates[:]\n if len(candidates) == 0:\n return [word] # return the word if no morphs are found\n return list(set([c.word for c in candidates]))\n\n def lookup_literal(self, word, pos=None):\n word = word.lower()\n if pos is None:\n return self.lexindex.get(word, [])[:]\n else:\n q = \"{}.{}\".format(word, pos)\n if q in self.lexwords:\n return [self.lexwords[q]]\n return []\n\n def lookup(self, word, pos=None, literal=False):\n word = word.lower()\n if literal:\n return self.lookup_literal(word, pos)\n morphs = self.morph(word, detailed=True)\n base_words = {c.word for c in morphs}\n base_words.add(word)\n candidates = set()\n for b in base_words:\n candidates.update(self.lookup_literal(b, pos))\n if morphs:\n return [l for l in candidates if any([m in l.morphs or m.word == word for m in morphs])]\n else:\n return list(candidates)\n\n def get_template(self, call):\n name, args = template_call(call)\n t = self.templates.get(name, None)\n if not t:\n print(\"did not find {}\".format(name))\n else:\n for p, arg in args:\n t = t.apply(p, arg)\n return t\n\n\ndef load_lexicon_file(lexiconpath, name, lexclasses=None, morphs=None):\n fname = os.path.join(lexiconpath, name)\n data = ElementTree.parse(fname)\n root = data.getroot()\n word = root.attrib[\"name\"]\n new_words = []\n if lexclasses is None:\n lexclasses = {}\n if morphs is None:\n morphs = {}\n for pos_elt in root:\n pos = pos_elt.attrib[\"name\"]\n wmorphs = []\n classes = []\n for elt in pos_elt:\n if elt.tag == \"MORPH\":\n cat = elt.attrib.get(\"cat\")\n to = elt.attrib.get(\"to\", word)\n frm = elt.attrib.get(\"from\", word)\n m = Morph(frm, cat, to)\n if m in morphs:\n m = morphs[m]\n else:\n morphs[m] = m\n wmorphs.append(m)\n elif elt.tag == \"CLASS\":\n cls = LexClass(elt.attrib[\"onttype\"], elt.attrib[\"words\"].split(\",\"))\n if cls in lexclasses:\n cls = lexclasses[cls]\n else:\n for templ in elt:\n if templ.tag == \"FRAME\":\n cls.add_template(templ.attrib[\"desc\"])\n lexclasses[cls] = cls\n classes.append(cls)\n new_words.append(LexWord(word, pos, wmorphs, classes))\n return new_words, lexclasses, morphs\n\n\ndef load_lexicon(lexiconpath, templpath):\n files = [f for f in os.listdir(lexiconpath) if f.startswith(\"W_\")]\n lexclasses = {}\n morphs = {}\n lexicon = {}\n for name in files:\n lexwords, lexclasses, morphs = load_lexicon_file(lexiconpath, name, lexclasses, morphs)\n for x in lexwords:\n lexicon[str(x)] = x\n # initialize_templates_xml(frames.values(), dslpath)\n templates = load_templates_from_lisp(templpath)\n return Lexicon(lexicon, templates, lexclasses.values(), morphs.values())\n","sub_path":"diesel/lexicon.py","file_name":"lexicon.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341951976","text":"data = open('train_data.txt', 'r').read().strip().split('\\n')\nout = open('all.txt', 'w')\n\ntrain_corpus = []\nfor line in data:\n content = line.split('\\t')\n title = content[0]\n condidates = [title] + [content[-1][:8]] + [content[-1][8:16]] + [content[-1][16:24]] + [content[-1][24:]]\n # import pdb\n # pdb.set_trace()\n\n \n for i in range(4):\n train_corpus.append(\"{}@{}\\t{}\".format(title, condidates[i], condidates[i+1]))\n\nfor i in range(len(train_corpus)):\n out.write('{}\\n'.format(train_corpus[i]))","sub_path":"data_process/convert_to_megts_data/to_megts_data.py","file_name":"to_megts_data.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106367004","text":"#!/usr/bin/env python\nimport os,sys\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nimport argparse\nimport numpy as np\nimport csv\n\nfrom multiagent.environment import MultiAgentEnv\nfrom multiagent.policy import InteractivePolicy\nimport multiagent.scenarios as scenarios\n\nfrom agents.coop_reinforce import CoopReinforce\n\nTEST_INDEX = 10 # test after every 10 training episodes\nNUM_TESTS = 10\n\nclass CoopInteractive():\n def __init__(self, scenario_file, episodes, good_agents, adversary_agents, verbose):\n # load scenario from script\n scenario = scenarios.load(scenario_file).Scenario()\n # create world\n world = scenario.make_world()\n # create multiagent environment\n env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, info_callback=None, shared_viewer = False)\n # render call to create viewer window (necessary only for interactive policies)\n if verbose:\n env.render()\n \"TODO: determine which agents are good and bad and give them different policies\"\n agent = good_agents(env, env.n, 0.1)\n\n # find directory to save results\n current_directory = os.path.dirname(__file__)\n parent_directory = os.path.split(current_directory)[0]\n parent_directory = os.path.split(parent_directory)[0]\n\n scenario_name = os.path.splitext(scenario_file)[0]\n filename = f'{parent_directory}/results/{scenario_name}_{agent.name}.csv'\n print(filename)\n with open(filename, 'w', newline = '') as csvfile:\n writer = csv.writer(csvfile, delimiter = ',')\n\n # execution loop\n for i in range(1, episodes):\n if i % TEST_INDEX == 0:\n scores = []\n for t in range(NUM_TESTS):\n value = self.run(env, agent, True, verbose)\n scores.append(value)\n\n avg_scores = np.mean(scores, axis=0)\n avg_scores = np.mean(avg_scores)\n #avg_scores_string = [\"%.3f\" % avg for avg in avg_scores]\n\n print(f'TEST %d:\\t Avg Agent Rewards = %.3f' %(int(i / TEST_INDEX), avg_scores))\n writer.writerow([i / TEST_INDEX, avg_scores])\n else:\n self.run(env, agent, False, verbose)\n agent.learn()\n\n def run(self, env, agent, istest, verbose):\n num_agents = env.n\n obs_n = env.reset()\n step_count = 0\n done_n = []\n scores = np.zeros(num_agents)\n while step_count < 200 and sum(done_n) == 0:\n # query for action from each agent's policy\n act_n = agent.action(obs_n)\n # step environment\n obs_n, reward_n, done_n, _ = env.step(act_n)\n step_count += 1\n #print(f'scores: {scores} rewards: {reward_n}')\n for i in range(num_agents):\n scores[i] += reward_n[i]\n\n if not istest:\n agent.update(reward_n)\n # render all agent views\n if verbose:\n env.render()\n\n # display rewards\n #for agent in env.world.agents:\n # print(agent.name + \" reward: %0.3f\" % env._get_reward(agent))\n return scores\n","sub_path":"multiagent-particle-envs/bin/coop_interactive.py","file_name":"coop_interactive.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"573146513","text":"\r\n# Jaden Stock\r\n# Smith-Waterman alignment algorithm for local alignment\r\n\r\nimport random\r\n\r\n#MYOD1_HUMAN (human)\r\nP15172 = \"MELLSPPLRDVDLTAPDGSLCSFATTDDFYDDPCFDSPDLRFFEDLDPRLMHVGALLKPEEHSHFPAAVHPAPGAREDEHVRAPSGHHQAGRCLLWACKACKRKTTNADRRKAATMRERRRLSKVNEAFETLKRCTSSNPNQRLPKVEILRNAIRYIEGLQALLRDQDAAPPGAAAAFYAPGPLPPGRGGEHYSGDSDASSPRSNCSDGMMDYSGPPSGARRRNCYEGAYYNEAPSEPRPGKSAAVSSLDCLSSIVERISTESPAAPALLLADVPSESPPRRQEAAAPSEGESSGDPTQSPDAAPQCPAGANPNPIYQVL\" \r\n#TAL1_HUMAN (human)\r\nP17542 = \"MTERPPSEAARSDPQLEGRDAAEASMAPPHLVLLNGVAKETSRAAAAEPPVIELGARGGPGGGPAGGGGAARDLKGRDAATAEARHRVPTTELCRPPGPAPAPAPASVTAELPGDGRMVQLSPPALAAPAAPGRALLYSLSQPLASLGSGFFGEPDAFPMFTTNNRVKRRPSPYEMEITDGPHTKVVRRIFTNSRERWRQQNVNGAFAELRKLIPTHPPDKKLSKNEILRLAMKYINFLAKLLNDQEEEGTQRAKTGKDPVVGAGGGGGGGGGGAPPDDLLQDVLSPNSSCGSSLDGAASPDSYTEEPAPKHTARSLHPAMLPAADGAGPR\"\r\n#MYOD1_MOUSE (Mouse)\r\nP10085 = \"MELLSPPLRDIDLTGPDGSLCSFETADDFYDDPCFDSPDLRFFEDLDPRLVHMGALLKPEEHAHFPTAVHPGPGAREDEHVRAPSGHHQAGRCLLWACKACKRKTTNADRRKAATMRERRRLSKVNEAFETLKRCTSSNPNQRLPKVEILRNAIRYIEGLQALLRDQDAAPPGAAAFYAPGPLPPGRGSEHYSGDSDASSPRSNCSDGMMDYSGPPSGPRRQNGYDTAYYSEAARESRPGKSAAVSSLDCLSSIVERISTDSPAAPALLLADAPPESPPGPPEGASLSDTEQGTQTPSPDAAPQCPAGSNPNAIYQVL\" \r\n#MYOD1_CHICK (Chicken)\r\nP16075 = \"MDLLGPMEMTEGSLCSFTAADDFYDDPCFNTSDMHFFEDLDPRLVHVGGLLKPEEHPHTRAPPREPTEEEHVRAPSGHHQAGRCLLWACKACKRKTTNADRRKAATMRERRRLSKVNEAFETLKRCTSTNPNQRLPKVEILRNAIRYIESLQALLREQEDAYYPVLEHYSGESDASSPRSNCSDGMMEYSGPPCSSRRRNSYDSSYYTESPNDPKHGKSSVVSSLDCLSSIVERISTDNSTCPILPPAEAVAEGSPCSPQEGGNLSDSGAQIPSPTNCTPLPQESSSSSSSNPIYQVL\" \r\n#MYODA_XENLA (African clawed frog)\r\nP13904 = \"MELLPPPLRDMEVTEGSLCAFPTPDDFYDDPCFNTSDMSFFEDLDPRLVHVTLLKPEEPHHNEDEHVRAPSGHHQAGRCLLWACKACKRKTTNADRRKAATMRERRRLSKVNEAFETLKRYTSTNPNQRLPKVEILRNAIRYIESLQALLHDQDEAFYPVLEHYSGDSDASSPRSNCSDGMMDYNSPPCGSRRRNSYDSSFYSDSPNDSRLGKSSVISSLDCLSSIVERISTQSPSCPVPTAVDSGSEGSPCSPLQGETLSERVITIPSPSNTCTQLSQDPSSTIYHV\" \r\n#MYOD1_DANRE (Zebrafish)\r\nQ90477 = \"MELSDIPFPIPSADDFYDDPCFNTNDMHFFEDLDPRLVHVSLLKPDEHHHIEDEHVRAPSGHHQAGRCLLWACKACKRKTTNADRRKAATMRERRRLSKVNDAFETLKRCTSTNPNQRLPKVEILRNAISYIESLQALLRSQEDNYYPVLEHYSGDSDASSPRSNCSDGMMDFMGPTCQTRRRNSYDSSYFNDTPNADARNNKNSVVSSLDCLSSIVERISTETPACPVLSVPEGHEESPCSPHEGSVLSDTGTTAPSPTSCPQQQAQETIYQVL\" \r\n#Q8IU24_BRABE (Amphioxus)\r\nQ8IU24 = \"MEFVELSSCRFDATPTFCDRPAAPNATVLPGEHFPVPNGSYEDQGDGHVLAPGPSFHGPGRCLLWACKACKKKTVPIDRRKAATMRERRRLVKVNEAFDILKKKSCANPNQRLPKVEILRNAISYIEQLHKLLRDSKENSSGEVSDTSAPSPGSCSDGMAAHSPHSFCTDTSGNSSWEQGDGQPGNGYENQSCGNTVSSLDCLSLIVQSISTIEGEENNNASNTPR\"\r\n#MYOD_DROME (Fruit fly)\r\nP22816 = \"MTKYNSGSSEMPAAQTIKQEYHNGYGQPTHPGYGFSAYSQQNPIAHPGQNPHQTLQNFFSRFNAVGDASAGNGGAASISANGSGSSCNYSHANHHPAELDKPLGMNMTPSPIYTTDYDDENSSLSSEEHVLAPLVCSSAQSSRPCLTWACKACKKKSVTVDRRKAATMRERRRLRKVNEAFEILKRRTSSNPNQRLPKVEILRNAIEYIESLEDLLQESSTTRDGDNLAPSLSGKSCQSDYLSSYAGAYLEDKLSFYNKHMEKYGQFTDFDGNANGSSLDCLNLIVQSINKSTTSPIQNKATPSASDTQSPPSSGATAPTSLHVNFKRKCST\"\r\n#LIN32_CAEEL (C. elegans)\r\nQ10574 = \"MSWEQYQMYVPQCHPSFMYQGSIQSTMTTPLQSPNFSLDSPNYPDSLSNGGGKDDKKKCRRYKTPSPQLLRMRRSAANERERRRMNTLNVAYDELREVLPEIDSGKKLSKFETLQMAQKYIECLSQILKQDSKNENLKSKSG\" \r\n#SYFM_HUMAN (Human)\r\nO95363 = \"MVGSALRRGAHAYVYLVSKASHISRGHQHQAWGSRPPAAECATQRAPGSVVELLGKSYPQDDHSNLTRKVLTRVGRNLHNQQHHPLWLIKERVKEHFYKQYVGRFGTPLFSVYDNLSPVVTTWQNFDSLLIPADHPSRKKGDNYYLNRTHMLRAHTSAHQWDLLHAGLDAFLVVGDVYRRDQIDSQHYPIFHQLEAVRLFSKHELFAGIKDGESLQLFEQSSRSAHKQETHTMEAVKLVEFDLKQTLTRLMAHLFGDELEIRWVDCYFPFTHPSFEMEINFHGEWLEVLGCGVMEQQLVNSAGAQDRIGWAFGLGLERLAMILYDIPDIRLFWCEDERFLKQFCVSNINQKVKFQPLSKYPAVINDISFWLPSENYAENDFYDLVRTIGGDLVEKVDLIDKFVHPKTHKTSHCYRITYRHMERTLSQREVRHIHQALQEAAVQLLGVEGRF\"\r\n\r\n#array of sequences that will be used\r\nsequences = [P15172, P17542, P10085, P16075, P13904, Q90477, Q8IU24, P22816, Q10574, O95363]\r\nsequences_abrv = [\"P15172\", \"P17542\", \"P10085\", \"P16075\", \"P13904\", \"Q90477\", \"Q8IU24\", \"P22816\", \"Q10574\", \"O95363\"]\r\n\r\ndef read_score_matrix(file_name):\r\n\tsigma = {}\r\n\tf = open(file_name, \"r\")\r\n\r\n\t#first get the characters in the dict\r\n\twhile True:\r\n\t\tline = f.next()\r\n\t\tif not line.startswith(\"#\"):\r\n\t\t\tchars = line.split()\r\n\t\t\tbreak\r\n\r\n\t#now we fill in the matrix\r\n\tfor line in f:\r\n\t\tif line.startswith(\"#\"):\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tsentence = line.split()\r\n\t\t\tsigma[sentence[0]] = {}\r\n\t\t\tfor i in range(1, len(sentence)):\r\n\t\t\t\tsigma[sentence[0]][chars[i-1]] = int(sentence[i])\r\n\treturn sigma\r\n\r\n#return the sequences from lines that do not start with a #\r\ndef read_sequences(file_name):\r\n\tf = open(file_name, \"r\")\r\n\tsequences = {}\r\n\tfor line in f:\r\n\t\tif not line.startswith(\"#\"):\r\n\t\t\tline = line.split(\":\")\r\n\t\t\tsequences[line[0]] = line[1] #do I need to get rid of the new line?\r\n\treturn sequences\r\n\r\n\r\n#permutes an array using the technique discussed in class\r\n#input: an array\r\n#output: a new, permuted array\r\ndef permute_array(array):\r\n\tpermuted_array = list(array)\r\n\tfor i in range(len(array)):\r\n\t\ti = len(array)-i-1\r\n\t\tj = random.randint(0,i)\r\n\t\tpermuted_array[i], permuted_array[j] = permuted_array[j], permuted_array[i]\r\n\treturn permuted_array\r\n\r\n\r\n# Smith-Waterman Local sequence alignment algorithm\r\n# input: Two strings S and T as well and a score dictionary. Optional tags for printing the alignment matrix, \r\n# \t\tprinting the aligned substrings, printing the score, and an optional output_file\r\n# effects: if any of the print_matrix, print_strings, print_score tags are set to true, the corresponding object will be printed, \r\n#\t\tif an output_file is given they will print to that.\r\n# output: the score of the optimal local alignment\r\ndef local_alignment(S, T, sigma, print_matrix = False, print_strings = True, print_score = True, output_file = None):\r\n\t#first we will create our 2D array and fill it with values using the recurrence relation\r\n\tv = [[0 for j in range(len(T)+1)] for i in range(len(S)+1)]\r\n\tfor j in range(1, len(T)+1):\r\n\t\tfor i in range(1, len(S)+1):\r\n\t\t\tv[i][j] = max([0, v[i-1][j-1]+sigma[S[i-1]][T[j-1]], v[i-1][j]+sigma[S[i-1]][\"*\"], v[i][j-1]+sigma[\"*\"][T[j-1]]])\r\n\r\n\t#if print_matrix is true we print out the alignment matrix\r\n\tif print_matrix:\r\n\t\tmatrix_string = \"\"\r\n\t\tt_string = \" *\"\r\n\t\tfor i in range(len(T)):\r\n\t\t\tt_string += \" \"\r\n\t\t\tt_string += T[i]\r\n\t\tmatrix_string += t_string\r\n\t\tmatrix_string += \"\\n\"\r\n\t\tfor i in range(len(S)+1):\r\n\t\t\tnumber_string = \"\"\r\n\t\t\tif(i == 0):\r\n\t\t\t\tnumber_string += \" *\"\r\n\t\t\telse:\r\n\t\t\t\tnumber_string += \" \"\r\n\t\t\t\tnumber_string += S[i-1]\r\n\t\t\tfor j in range(len(T)+1):\r\n\t\t\t\tif(len(str(v[i][j])) == 1):\r\n\t\t\t\t\tnumber_string += \" \"\r\n\t\t\t\t\tnumber_string += str(v[i][j])\r\n\t\t\t\telif(len(str(v[i][j])) == 2):\r\n\t\t\t\t\tnumber_string += \" \"\r\n\t\t\t\t\tnumber_string += str(v[i][j])\r\n\t\t\tmatrix_string += number_string\r\n\t\t\tmatrix_string += \"\\n\"\r\n\t\tif output_file == None:\r\n\t\t\tprint(matrix_string + \"\\n\")\r\n\t\telse:\r\n\t\t\toutput_file.write(matrix_string + \"\\n\")\r\n\r\n\t# now we find the cell of v which holds the maximum value\r\n\tmax_value = 0\r\n\tmax_i, max_j = 0, 0\r\n\tfor i in range(1,len(S)+1):\r\n\t\tfor j in range(1, len(T)+1):\r\n\t\t\tif (v[i][j] > max_value):\r\n\t\t\t\tmax_value = v[i][j]\r\n\t\t\t\tmax_i, max_j = i, j\r\n\r\n\t#now if the print flag was set to true then we want to construct the aligned strings and print them out\r\n\tif(print_strings == True):\r\n\t\t#if S and T are one of the default sequences we can find which ones they are\r\n\t\ts_abbreviation = \"\"\r\n\t\tt_abbreviation = \"\"\r\n\t\tfor i in range(len(sequences)):\r\n\t\t\tif S == sequences[i]:\r\n\t\t\t\ts_abbreviation = sequences_abrv[i]\r\n\t\t\t\ts_abbreviation += \":\"\r\n\t\t\tif T == sequences[i]:\r\n\t\t\t\tt_abbreviation = sequences_abrv[i]\r\n\t\t\t\tt_abbreviation += \":\"\r\n\r\n\t\t#We trace back thruogh the 2-D to find the optimal alignment\r\n\t\topt_path = [(max_i,max_j)]\r\n\t\tcur_i = max_i\r\n\t\tcur_j = max_j\r\n\t\twhile(v[cur_i][cur_j] != 0):\r\n\t\t\t#construct an array of possible steps before the cur pointers that lead to the cur pointers.\r\n\t\t\treverse_path_positions = []\r\n\t\t\tif(v[cur_i-1][cur_j]+sigma[S[cur_i-1]][\"*\"] == v[cur_i][cur_j]):\r\n\t\t\t\treverse_path_positions += [(cur_i-1,cur_j)]\r\n\r\n\t\t\tif(v[cur_i][cur_j-1]+sigma[\"*\"][T[cur_j-1]] == v[cur_i][cur_j]):\r\n\t\t\t\treverse_path_positions += [(cur_i,cur_j-1)]\r\n\r\n\t\t\tif(v[cur_i-1][cur_j-1]+sigma[S[cur_i-1]][T[cur_j-1]] == v[cur_i][cur_j]):\r\n\t\t\t\treverse_path_positions += [(cur_i-1,cur_j-1)]\r\n\r\n\t\t\t#pick a random, valid path direction\r\n\t\t\treverse_path_positions = permute_array(reverse_path_positions)\r\n\t\t\ta,b = reverse_path_positions[0]\r\n\t\t\topt_path += [(a,b)]\r\n\t\t\tcur_i = a\r\n\t\t\tcur_j = b\r\n\r\n\t\t#finally, we construct the strings in the alignment to be printed out.\r\n\t\tlocal_aligned_S = \"\"\r\n\t\tlocal_aligned_T = \"\"\r\n\t\topt_path = opt_path[::-1] #reverse the path and throw away the first element because it corresponds to a 0 in the alignment matrix\r\n\t\topt_path = opt_path[1:]\r\n\t\tstart_i, start_j = opt_path[0] #the start index of each string, will be printed later.\r\n\t\tfor i in range(len(opt_path)):\r\n\t\t\ta,b = opt_path[i]\r\n\t\t\tif i==0:\r\n\t\t\t\tlocal_aligned_S += S[a-1]\r\n\t\t\t\tlocal_aligned_T += T[b-1]\r\n\t\t\telse:\r\n\t\t\t\tc,d = opt_path[i-1]\r\n\t\t\t\tif((a == c+1) and (b == d+1)):\r\n\t\t\t\t\tlocal_aligned_S += S[a-1]\r\n\t\t\t\t\tlocal_aligned_T += T[b-1]\r\n\t\t\t\telif((a == c) and (b == d+1)):\r\n\t\t\t\t\tlocal_aligned_S += \"-\"\r\n\t\t\t\t\tlocal_aligned_T += T[b-1]\r\n\t\t\t\telif((a == c+1) and (b == d)):\r\n\t\t\t\t\tlocal_aligned_S += S[a-1]\r\n\t\t\t\t\tlocal_aligned_T += \"-\"\r\n\r\n\t\t#create the middle line, This just tells you which characters are aligned.\r\n\t\tmiddle_line = \"\"\r\n\t\tfor i in range(len(local_aligned_S)):\r\n\t\t\tif(local_aligned_S[i] == local_aligned_T[i]):\r\n\t\t\t\tmiddle_line += local_aligned_S[i]\r\n\t\t\telif((local_aligned_S[i] == \"-\") or (local_aligned_T[i] == \"-\")):\r\n\t\t\t\tmiddle_line += \" \"\r\n\t\t\telif(sigma[local_aligned_S[i]][local_aligned_T[i]] > 0):\r\n\t\t\t\tmiddle_line += \"+\"\r\n\t\t\telse:\r\n\t\t\t\tmiddle_line += \" \"\r\n\t\t\r\n\t\t#now we want to only print out 60 characters of each string at a time\r\n\t\tcounter = 0\r\n\t\twhile(len(local_aligned_S) > 0):\r\n\t\t\tif output_file == None:\r\n\t\t\t\tprint(s_abbreviation + \"\\t\" + str(start_i + counter) + \"\\t\" + local_aligned_S[:60] + \"\\n\")\r\n\t\t\t\tprint(\"\\t\\t\" + middle_line[:60] + \"\\n\")\r\n\t\t\t\tprint(t_abbreviation + \"\\t\" + str(start_j +counter ) + \"\\t\" + local_aligned_T[:60] + \"\\n\")\r\n\t\t\t\tprint(\"\\n\")\r\n\t\t\telse:\r\n\t\t\t\toutput_file.write(s_abbreviation + \"\\t\" + str(start_i + counter) + \"\\t\" + local_aligned_S[:60] + \"\\n\")\r\n\t\t\t\toutput_file.write(\"\\t\\t\" + middle_line[:60] + \"\\n\")\r\n\t\t\t\toutput_file.write(t_abbreviation + \"\\t\" + str(start_j + counter) + \"\\t\" + local_aligned_T[:60] + \"\\n\")\r\n\t\t\t\toutput_file.write(\"\\n\")\r\n\r\n\t\t\tlocal_aligned_S = local_aligned_S[60:]\r\n\t\t\tlocal_aligned_T = local_aligned_T[60:]\r\n\t\t\tmiddle_line = middle_line[60:]\r\n\t\t\tcounter += 60\r\n\r\n\t#if the score tag was set to True then pring the score\r\n\tif print_score:\r\n\t\tif output_file == None:\r\n\t\t\tprint(\"Score = \" + str(max_value) + \"\\n\\n\\n\")\r\n\t\telse:\r\n\t\t\toutput_file.write(\"Score = \" + str(max_value) + \"\\n\\n\\n\")\r\n\treturn max_value\r\n\r\n\r\n#an example of how to run the code:\r\n#sigma = read_score_matrix(\"BLOSUM62.txt\")\r\n#local_alignment(P15172,P17542, sigma)","sub_path":"Smith_Waterman.py","file_name":"Smith_Waterman.py","file_ext":"py","file_size_in_byte":10702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643095682","text":"#!/usr/bin/env python\n# coding=utf-8\n#visualize with TensorBoard\nimport tensorflow as tf\nwith tf.name_scope('graph') as scope:\n matrix1 = tf.constant([[3.,3.]],name = 'matrix1')\n matrix2 = tf.constant([[2.],[2.]],name = 'matrix2')\n product = tf.matmul(matrix1,matrix2,name = 'product')\n\nsess = tf.Session()\nwriter = tf.summary.FileWriter(\"logs/\",sess.graph)\n\ninit = tf.global_variables_initializer()\n\nsess.run(init)\n","sub_path":"c2_Basis_Operations/c2.py","file_name":"c2.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"153375732","text":"import tensorflow as tf\nimport numpy as np\nimport cf\nfrom tqdm import tqdm\nfrom gain.model_gain import *\n\ndef train(X,M,H,New_X,D_loss1,G_loss1,MSE_train_loss,MSE_test_loss,D_solver,G_solver,G_sample,Dim,Train_No,trainX,p_miss,sess,gain_iter,batch_size,p_hint,\n prob_masks,maskss=[0,1,2]):\n # import tensorboardX\n # import torch\n p_miss = 1/Dim \n print(\"Init p_miss = \", p_miss)\n # %% Start Iterations\n mb_size = batch_size\n p_hint = p_hint\n\n train_loss_curr = []\n test_loss_curr = []\n # writer = tensorboardX.SummaryWriter()\n prob_cols_tracks = [[] for _ in range(Dim)]\n total_iter = 0\n\n threshold_mean = 0.1 \n threshold_std = 0.02\n max_counter = 200\n\n pmisses = [p_miss for _ in range(Dim)]\n vp_cols = [0 for _ in range(Dim)] # -1 = vt, 0 = unknown, 1 = vp\n\n for it in tqdm(range(gain_iter)):\n # %% Inputs\n # print(it)\n np.random.shuffle(trainX)\n \n for ii in range(int(Train_No / mb_size)):\n mb_idx = [i for i in range(ii * mb_size, (ii + 1) * mb_size)]\n # mb_idx = sample_idx(Train_No, mb_size)\n X_mb = trainX[mb_idx, :]\n\n Z_mb = np.zeros([mb_size, Dim])\n # M_mb = trainM[mb_idx, :]\n #M_mb = np.zeros_like(X_mb)\n M_mb = np.ones_like(X_mb)\n\n ## FD only\n #for i in range(Dim): # all column\n for i in maskss: # some column\n ## FD mask random\n M_mb[:,i] = np.random.choice(2, size=(X_mb.shape[0],), p=[p_miss, 1-p_miss])\n ## FD mask total\n # M_mb[:,i] = np.zeros((X_mb.shape[0],))\n\n ## N value mask random\n #miss_ori = np.ones(Dim)\n #miss_pos = np.random.choice(Dim,2,replace=False)\n #miss_pos = [0,1]\n #for ii_pos in miss_pos:\n # miss_ori[ii_pos] = 0\n #print(miss_ori)\n #for i in range(mb_size):\n # #M_mb[i,:] = np.random.permutation(miss_ori)\n # M_mb[i,:] = miss_ori\n \n #print(M_mb)\n H_mb1 = sample_M(mb_size, Dim, 1 - p_hint)\n H_mb = M_mb * H_mb1\n\n New_X_mb = M_mb * X_mb + (1 - M_mb) * Z_mb # Missing Data Introduce\n\n _, D_loss_curr = sess.run([D_solver, D_loss1], feed_dict={M: M_mb, New_X: New_X_mb, H: H_mb})\n _, G_loss_curr, MSE_train_loss_curr, MSE_test_loss_curr, prob_masks_vals = sess.run(\n [G_solver, G_loss1, MSE_train_loss, MSE_test_loss, prob_masks],\n feed_dict={X: X_mb, M: M_mb, New_X: New_X_mb, H: H_mb})\n train_loss_curr.append(MSE_train_loss_curr)\n test_loss_curr.append(MSE_test_loss_curr)\n\n # for i in range(int(Dim)):\n # if prob_masks_vals[i] > prob_cols_tracks[i][-1]:\n # counter_cols[i] += 1\n # if counter_cols[i] >= max_counter:\n # # converge -> check mean and std\n # mean_prob = np.mean(prob_cols_tracks[i][-max_counter:])\n # std_prob = np.std(prob_cols_tracks[i][-max_counter:])\n # print(\"col {}: mean: {:.3f} - std: {:.3f}\".format(i, mean_prob, std_prob))\n # else:\n # counter_cols[i] = 0\n\n # prob_cols_tracks[i].append(prob_masks_vals[i])\n\n for i in range(int(Dim)):\n prob_cols_tracks[i].append(prob_masks_vals[i])\n \n if (it+1) % (gain_iter//2) == 0:\n for i in range(int(Dim)): \n mean_prob = np.mean(prob_cols_tracks[i][-max_counter:])\n std_prob = np.std(prob_cols_tracks[i][-max_counter:])\n print(\"col {}: mean: {:.3f} - std: {:.3f}\".format(i, mean_prob, std_prob))\n\n if mean_prob < threshold_mean and std_prob < threshold_std:\n # col i is VP\n vp_cols[i] = 1\n pmisses[i] = 1 - p_miss\n else:\n vp_cols[i] = -1\n pmisses[i] /= 4\n \n total_iter += 1\n\n # for i in range(int(Dim)):\n # writer.add_scalar(\"prob_col/{}\".format(i), torch.FloatTensor([prob_masks_vals[i]]), it)\n\n # total_iter += 1\n return train_loss_curr,test_loss_curr\n","sub_path":"gain/model_gain_random_prior.py","file_name":"model_gain_random_prior.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598171513","text":"\nimport pyvisa, math, sys\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\nclass Powermeter(object):\n\n\tread_timeout = 0.\n\tunit = 'mW'\t\t\t# {'W', 'mW', 'dBm'}\n\tmodel = None\t\t# This is populated with the powermeter model, for handling different interfaces. Case sensitive\n\t\t\t\t\t\t# Supported models: N7747A; PM100D; PM100USB\n\n\tdef __init__(self, model, serial=None, unit=None, wavelength=None, averages=None):\n\n\t\t# Find instrument corresponding to model and serial numbers\n\n\t\trm = pyvisa.ResourceManager()\n\t\tserial_names=rm.list_resources()\n\t\t# print(serial_names)\n\n\t\tpm_serial = None\n\t\tinstrs = []\n\t\tfor serial_name in serial_names:\n\t\t\tinstr = rm.open_resource(serial_name)\n\t\t\tinstr.timeout=2\n\n\t\t\t# if 'model_name' not in dir(instr) or 'serial_number' not in dir(instr):\n\t\t\t\t# print ('not our instrument')\n\t\t\t\t# instrs.append({'port':serial_name, 'model':None, 'num':None})\n\t\t\t\t# continue\n\n\t\t\tmn = instr.model_name\n\t\t\tsn = instr.serial_number\t\n\t\t\t\n\t\t\tinstrs.append({'port':serial_name, 'model':mn, 'num':sn})\n\t\t\tinstr.close()\n\t\t\t\n\t\t\tif mn == model and (sn == serial or serial is None):\n\t\t\t\tpm_serial = serial_name\n\t\t\t\tbreak\n\n\t\t# If we didn't find any matchin instrument, raise an error\n\t\tif pm_serial is None:\n\t\t\traise StandardError('Unable to find device {0} {1}. Found devices {2}'.format(model, serial, instrs))\n\n\t\tself.instr = rm.open_resource(pm_serial)\n\t\tself.model = self.instr.model_name\n\n\t\t# Set up default and initialisation values\n\t\tif wavelength is not None:\n\t\t\tself.set_wavelength(wavelength)\n\t\tif averages is not None:\n\t\t\tself.set_averages(averages)\n\t\tif unit is not None:\n\t\t\tself.unit = unit\n\n\t\t\t\n\tdef close(self):\n\t\tself.instr.close()\n\n\tdef __del__(self):\n\t\tself.close()\n\n\tdef measure(self, channel = 1):\n\n\t\tif (self.model == 'PM100D' or 'PM100USB'):\n\t\t\t\n\t\t\tresult_W = -float('inf')\n\t\t\tn_tries = 5\n\t\t\tfor iter in range(n_tries):\n\t\t\t\ttry:\n\t\t\t\t\tresult_W = float(self.instr.query('read?', delay=self.read_timeout))\n\t\t\t\texcept pyvisa.errors.VisaIOError:\n\t\t\t\t\tprint (\"Caught error during powermeter read '{:}'. Tries remaining: {:}.\".format(sys.exc_info()[0], n_tries-iter+1))\n\t\t\t\t\tcontinue\n\t\t\t\tbreak\n\t\t\tif iter == n_tries-1:\n\t\t\t\ttry:\n\t\t\t\t\tprint ('Trying to reinitialise powermeter.')\n\t\t\t\t\tself.close()\n\t\t\t\t\tself.__init__(self.model)\n\t\t\t\t\tresult_W = float(self.instr.query('read?', delay=self.read_timeout))\n\t\t\t\texcept:\n\t\t\t\t\traise RuntimeError('Error:Powermeter:measure: Tried {:} times to read powermeter, failed. Tried to reinitialise powermeter, failed.'.format(n_tries))\n\t\t\t\t\n\t\t\t\t\n\t\telif (self.model == 'N7747A'):\n\t\t\tif (channel not in [1,2]):\n\t\t\t\traise AttributeError ('Channel number for model {0} must be in [1,2]. Specified channel {1} is invalid.'.format(self.model, channel))\n\t\t\tresult_W = float(self.instr.query('read{ch}:pow?'.format(ch = channel), delay=self.read_timeout))\n\t\telse:\n\t\t\traise AttributeError('Unknown model \"{0}\".'.format(self.model))\n\n\n\t\tif self.unit == 'W':\n\t\t\treturn result_W\n\t\telif self.unit == 'mW':\n\t\t\treturn result_W * 1000\n\t\telif self.unit == 'dBm':\n\t\t\treturn 10 * math.log(result_W * 1000, 10 ) if (result_W > 0) else (-float('Inf'))\n\t\telse:\n\t\t\traise AttributeError('Measurement unit, {0}, unrecognised.'.format(self.unit))\n\n\n\tdef query(self,command):\n\t\treturn float(self.instr.query(command))\n\t\n\tdef set_wavelength(self, wl_nm):\n\t\tself.instr.write('CORR:WAV {0}\\n'.format(wl_nm))\n\n\tdef set_averages(self, n_averages):\n\t\tself.instr.write('sens:aver {0}\\n'.format(n_averages))\n\n\tdef measure_average(self,n_averages):\n\t\tmemo = np.zeros(n_averages)\n\t\tfor j in range(n_averages):\n\t\t\tmemo[j] = self.measure()\n\t\treturn np.mean(memo)\n\n\tdef power_live(self,dt=0.1,averages=1,save=False,array_Live=100):\n\t\ttry:\n\t\t\tget_ipython().magic('matplotlib notebook')\n\t\t\tcounter=0\n\t\t\tn_sample=[]\n\t\t\tpower=[]\n\t\t\tfig,ax=plt.subplots(figsize=(9,6))\n\t\t\tplt.ylabel('Power (dBm)')\n\t\t\tplt.xlabel('Time flies')\n\t\t\tax.plot([],[])\n\t\t\t\n\t\t\twhile True:\n\t\t\t\tpower.append(self.measure_average(n_averages=averages))\n\t\t\t\tn_sample.append(counter)\n\t\t\t\tself.PlotWindowed(ax,n_sample,power,array_Live)\n\t\t\t\tplt.tight_layout()\n\t\t\t\tfig.canvas.draw()\n\t\t\t\tcounter+=1\n\t\t\t\t\n\t\t\t\ttime.sleep(dt)\n\t\t\t\t\n\t\texcept KeyboardInterrupt:\n\t\t\tif save is True:\n\t\t\t\treturn power,n_sample\n\t\t\telse:\n\t\t\t\tNone\n\n\tdef PlotWindowed(self,axes,x,y,array_Live):\n\t\t\n\t\taxes.lines[0].set_data(x[-min(array_Live,len(x)):],y[-min(array_Live,len(x)):])\n\t\taxes.relim()\n\t\taxes.autoscale_view()\n\t\treturn None\n\ndef power_live_multiple(*detectors,dt=0.001,array_Live=200):\n\n\tn=len(detectors)\n\tget_ipython().magic('matplotlib notebook')\n\tfig=plt.figure(figsize=(9,6))\n\tpwr=([],)\n\t\n\tfor i in range(n):\n\t\tfig.add_subplot(n,1,i+1)\n\t\tfig.axes[i].plot([],[])\n\t\tpwr=pwr+([],)\n\t\t\t\n\tfig.subplots_adjust(hspace=0.3)\n\twhile True:\n\t\tfor i,detector in enumerate(detectors):\n\t\t\t(pwr[i]).append(detector.measure())\n\t\t\taxTemp=fig.axes[i]\n\t\t\tpwrTemp=pwr[i]\n\t\t\tPlot(axTemp,pwrTemp,array_Live)\n\t\t\taxTemp.set_title(\"{}: {:>4} dBm\".format(detector.instr.serial_number,round(pwrTemp[-1],2)))\n\t\t\ttime.sleep(dt)\n\t\tfig.canvas.draw()\n\treturn Plot(axTemp,pwrTemp,array_Live)\n\ndef Plot(ax,y,array_Live):\n\n\tax.lines[0].set_data(np.arange(1,len(y[-min(array_Live,len(y)):])+1),y[-min(array_Live,len(y)):])\n\tax.relim()\n\tax.autoscale_view()\n\n# def power_live_multiple(*detectors,dt=0.1,averages=1,array_Live):\n\t\n\t# get_ipython().magic('matplotlib notebook')\n\t# fig = plt.figure()\n\t# n=len(detectors)\n\t# fig=plt.figure()\n\t# fig.add_subplot(111)\n\t# plt.plot([],[])\n\t# pwr=([],)\n\t\n\t# if n>1:\n\t\t# for i in range(n):\n\t\t\t# fig.axes[i].change_geometry(n, 1, i+1)\n\t\t\t# fig.add_subplot(n,1,i+1)\n\t\t\t# plt.plot([],[])\n\t\t\t# pwr+([],)\n\t# while True:\n\t\t# for detector in detectors:\n\t\t\t\n\t\t\t# Plot(detector.measure(),array_Live)\n\t\t\t\n\n# def Plot(y,array_Live):\n\t\t\n\t# # axes.lines[0].set_data(x[-min(array_Live,len(x)):],y[-min(array_Live,len(x)):])\n\t# axes.lines[0].set_ydata(y[-min(array_Live,len(y)):])\n\t# axes.relim()\n\t# axes.autoscale_view()\n\t# return None\n\t\t\ndef list_visa_instruments():\n\trm = pyvisa.ResourceManager()\n\tserial_names=rm.list_resources()\n\n\tpm_serial = None\n\tinstrs = []\n\tfor serial_name in serial_names:\n\t\tinstr = rm.open_resource(serial_name)\n\t\tinstr.timeout=2\n\n\t\tif 'model_name' not in dir(instr) or 'serial_number' not in dir(instr):\n\t\t\tinstrs.append({'port':serial_name, 'model':None, 'num':None})\n\t\t\tcontinue\n\n\t\tmn = instr.model_name\n\t\tsn = instr.serial_number\n\n\t\tinstrs.append({'port':serial_name, 'model':mn, 'num':sn})\n\t\tinstr.close()\n\n\treturn instrs\n\n#print(list_visa_instruments())\n","sub_path":".ipynb_checkpoints/powermeter-checkpoint.py","file_name":"powermeter-checkpoint.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"366043910","text":"\"\"\"\nRefactoring the wine x and y value variables initializations in one file\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom iris_common_funcs import plot_decision_regions\n\n\ndef wine_initializer(arg = 'std'):\n df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)\n df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash',\n 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',\n 'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline']\n\n # print('Class labels', np.unique(df_wine['Class label']))\n # print(df_wine)\n x, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)\n\n # Normalised data: useful for data to be in a bound interval. More sensitive to outliers.\n mms = MinMaxScaler()\n x_train_norm = mms.fit_transform(x_train)\n x_test_norm = mms.transform(x_test)\n\n # Standardization: maintains useful information about outliers, uses standard deviation. Preferred.\n sc = StandardScaler()\n if arg == 'sc_mat':\n x_train_std = sc.fit_transform(x_train)\n x_test_std = sc.fit_transform(x_test)\n return x_train_std, y_train, x_test_std, y_test, x, y\n if arg == 'std':\n x_train_std = sc.fit_transform(x_train)\n x_test_std = sc.fit_transform(x_test)\n return x_train_std, y_train, x_test_std, y_test, df_wine.columns\n if arg == 'val':\n return x_train, y_train, x_test, y_test, df_wine.columns\n else:\n print(\"Invalid argument\")\n return 0\n\n\n\"\"\"\nCreating mean-vector matrices, a within class and a between class scatter matrix for LDA -\nLinear Discriminant Analysis\nThe functions initialize the matrices\n\"\"\"\n\n\ndef mean_vecs(x_train_std, y_train):\n \"\"\"Calculating the mean vectors of the x_strain_std array\"\"\"\n mean_vectors =[]\n for label in range(1, 4):\n mean_vectors.append(np.mean(x_train_std[y_train == label], axis=0))\n # print('MV {0}:\\n {1}'.format(label, mean_vectors[label - 1]))\n return mean_vectors\n\n\ndef within_class_sc_mat(mean_vectors, x_train_std, y_train,x,y):\n \"\"\"Calculating the within-class scatter matrix from mean vectors\"\"\"\n dim = 13 # number of features\n sc_w = np.zeros((dim, dim)) # sc_w - within-class scatter matrix\n for label, m_vec in zip(range(1, 4), mean_vectors):\n \"\"\"\n # Not scaled\n class_scatter = np.zeros((dim, dim))\n for row in x[y == label]:\n row, m_vec = row.reshape(dim, 1), m_vec.reshape(dim, 1)\n class_scatter += (row-m_vec).dot((row-m_vec).T)\n sc_w += class_scatter\n # print(\"Within-class scatter matrix: {0}x{0}\".format(sc_w.shape[1]))\n \"\"\"\n \"\"\"Scaled\"\"\"\n class_scatter = np.cov(x_train_std[y_train == label].T)\n sc_w += class_scatter\n # print(\"Scaled within-class scatter matrix: {0}x{1}\".format(sc_w.shape[0], sc_w.shape[1]))\n return sc_w\n\n\ndef between_class_sc_mat(mean_vectors, x, y, x_train_std):\n \"\"\"Calculating the between-class scatter matrix from the scaled within-class scatter matrix\"\"\"\n dim = 13\n mean_overall = np.mean(x_train_std, axis=0)\n sc_b = np.zeros((dim, dim)) # sc_b - betwee-class scatter matrix\n for label, mean_vec in enumerate(mean_vectors):\n n = x[y == label + 1, :].shape[0]\n mean_vec = mean_vec.reshape(dim, 1)\n mean_overall = mean_overall.reshape(dim, 1)\n sc_b += n * (mean_vec - mean_overall).dot((mean_vec - mean_overall).T)\n # print(\"Between-class scatter matrix: {0}x{1}\".format(sc_b.shape[0], sc_b.shape[1]))\n return sc_b\n\n\ndef wine_matrix_init():\n x_train_std, y_train, x_test_std, y_test, x, y = wine_initializer('sc_mat')\n np.set_printoptions(precision=4)\n mean_vectors = mean_vecs(x_train_std, y_train)\n # unique, counts = np.unique(y_train, return_counts=True)\n # print('Class label distribution: ', dict(zip(unique, counts)))\n sc_w = within_class_sc_mat(mean_vectors, x_train_std, y_train, x,y)\n sc_b = between_class_sc_mat(mean_vectors, x, y, x_train_std)\n return mean_vectors, sc_w, sc_b\n\n\n# wine_matrix_init()\n","sub_path":"wine_comon_funcs.py","file_name":"wine_comon_funcs.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"611141315","text":"#==================================================================================================\r\n# triggerset.py\r\n# \r\n# This script tests the STL triggerset commands.\r\n#\r\n# Non-DBMS Specific\r\n#\r\n# Author: Ryan Donahue\r\n# $Header: /Vigilert/Tests/Non-DBMS Specific/triggerset.py 5 11/05/01 1:01p Ryan $\r\n#==================================================================================================\r\nfrom Tests.testapi import *\r\n\r\necho(\"\")\r\necho(\"============================\")\r\necho(\" Test triggerset commands.\")\r\necho(\"============================\")\r\necho(\"\")\r\n\r\nlong_name = 128*\"t\"\r\nsql(\"create table T (x int)\")\r\ntl(\"create data source T\")\r\ntl(\"create trigger trig2 from T when x = 2 begin echo 'trig2 fired' end\")\r\nfire_trig2 = \"insert into T values (2)\"\r\n\r\n#-------------------------------\r\n# Test create triggerset.\r\n#-------------------------------\r\necho(\"\")\r\necho(\"----------------------------\")\r\necho(\" Test create triggerset.\")\r\necho(\"----------------------------\")\r\ntl(\"create triggerset TestTrigSet\")\r\n\r\n# Make sure that a trigger can be created and fired.\r\ntl(\"create trigger trig1 in TestTrigSet from T when x = 1 begin echo 'trig1 fired' end\")\r\nfire_trig1 = \"insert into T values (1)\"\r\necho(\"trig1 should fire\")\r\nsql(fire_trig1)\r\nprocess_updates()\r\n\r\n# Test when it already exists and is enabled.\r\ntl(\"create triggerset TestTrigSet\")\r\n\r\n# Test when it already exists and is disabled.\r\ntl(\"disable triggerset TestTrigSet\")\r\ntl(\"create triggerset TestTrigSet\")\r\n\r\n# Test long triggerset name\r\ntl(\"create triggerset \"+long_name)\r\n\r\n\r\n#-------------------------------\r\n# Test enable triggerset.\r\n#-------------------------------\r\necho(\"\")\r\necho(\"----------------------------\")\r\necho(\" Test enable triggerset.\")\r\necho(\"----------------------------\")\r\n# Test when triggerset is disabled.\r\ntl(\"enable triggerset TestTrigSet\")\r\n# Make sure that its triggers fired.\r\necho(\"trig1 should fire\")\r\nsql(fire_trig1)\r\nprocess_updates()\r\n\r\n# Test when the triggerset is already enabled.\r\ntl(\"enable triggerset TestTrigSet\")\r\n\r\n# Test long triggerset name\r\ntl(\"disable triggerset \"+long_name)\r\ntl(\"enable triggerset \"+long_name)\r\n\r\n# Test when the the triggerset does not exist.\r\ntl(\"enable triggerset DoesNotExist\")\r\n\r\n\r\n#-------------------------------\r\n# Test disable triggerset.\r\n#-------------------------------\r\necho(\"\")\r\necho(\"----------------------------\")\r\necho(\" Test disable triggerset.\")\r\necho(\"----------------------------\")\r\ntl(\"disable triggerset TestTrigSet\")\r\n# Make sure that its triggers cannot be fired.\r\nshould_not_fire(\"trig1\")\r\necho(\"trig1 should not fire\")\r\nsql(fire_trig1)\r\nprocess_updates()\r\n\r\n# Test when the triggerset is already disabled.\r\ntl(\"disable triggerset TestTrigSet\")\r\n\r\n# Test long triggerset name.\r\ntl(\"disable triggerset \"+long_name)\r\n\r\n# Test disabling the default triggerset\r\ntl(\"disable triggerset default_triggerset\")\r\n# Make sure that its triggers cannot be fired.\r\nshould_not_fire(\"trig2\")\r\nsql(fire_trig2)\r\nprocess_updates()\r\n\r\n# Test when triggerset does not exist.\r\ntl(\"disable triggerset DoesNotExist\")\r\n\r\n#-------------------------------\r\n# Test enable all triggerset.\r\n#-------------------------------\r\necho(\"\")\r\necho(\"----------------------------\")\r\necho(\" Test enable all triggerset.\")\r\necho(\"----------------------------\")\r\ntl(\"enable all triggersets\")\r\n# Make sure all the triggers can fire.\r\nshould_fire(\"trig1\")\r\nsql(fire_trig1)\r\nprocess_updates()\r\nshould_fire(\"trig2\")\r\nsql(fire_trig2)\r\nprocess_updates()\r\n\r\n#-------------------------------\r\n# Test disable all triggerset.\r\n#-------------------------------\r\necho(\"\")\r\necho(\"----------------------------\")\r\necho(\" Test disable all triggersets.\")\r\necho(\"----------------------------\")\r\ntl(\"disable all triggersets\")\r\n# Make sure all the triggers cannot fire.\r\nshould_not_fire(\"trig1\")\r\nsql(fire_trig1)\r\nprocess_updates()\r\nshould_not_fire(\"trig2\")\r\nsql(fire_trig2)\r\nprocess_updates()\r\n\r\n#-------------------------------\r\n# Test drop triggerset.\r\n#-------------------------------\r\necho(\"\")\r\necho(\"----------------------------\")\r\necho(\" Test drop triggerset.\")\r\necho(\"----------------------------\")\r\n# Test when triggerset exists and is disabled.\r\ntl(\"drop triggerset TestTrigSet\")\r\n\r\n# Test when triggerset does not exist.\r\ntl(\"drop triggerset TestTrigSet\")\r\n\r\n# Test when triggerset exists and is enabled\r\ntl(\"create triggerset TestTrigSet\")\r\ntl(\"drop triggerset TestTrigSet\")\r\n\r\n# Test long name\r\ntl(\"drop triggerset \"+long_name)\r\n\r\n# Test dropping the default triggerset.\r\ntl(\"drop triggerset default_triggerset\")\r\n\r\n# Test when triggerset does not exist.\r\ntl(\"drop triggerset DoesNotExist\")\r\n\r\n\r\n#-------------------------------\r\n# Test drop all triggerset.\r\n#-------------------------------\r\necho(\"\")\r\necho(\"----------------------------\")\r\necho(\" Test drop all triggersets.\")\r\necho(\"----------------------------\")\r\ntl(\"create triggerset tset1\")\r\ntl(\"create triggerset tset2\")\r\ntl(\"drop all triggersets\")\r\n\r\n# Clean Up\r\ntl(\"drop data source T\")\r\nsql(\"drop table T\")\r\n","sub_path":"Vigilert/Tests/Non-DBMS Specific/triggerset.py","file_name":"triggerset.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211872613","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020 Gabriele Iannetti \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\n\nimport argparse\nimport logging\nimport signal\nimport time\nimport os\n\nfrom ctrl.pid_control import PIDControl\nfrom comm.database_proxy_handler import DatabaseProxyCommHandler\nfrom conf.database_proxy_config_file_reader import DatabaseProxyConfigFileReader\nfrom db.ost_perf_history_table_handler import OSTPerfHistoryTableHandler\n\n\nRUN_FLAG = True\n\n\ndef init_arg_parser():\n\n parser = argparse.ArgumentParser(description='LFSM Database Proxy')\n\n default_config_file = \"/etc/lfsm/database-proxy.conf\"\n\n parser.add_argument('-f',\n '--config-file',\n dest='config_file',\n type=str,\n required=False,\n help=str('Path to the config file (default: %s)'\n % default_config_file),\n default=default_config_file)\n\n parser.add_argument('--create-table',\n dest='create_table',\n required=False,\n action='store_true',\n help='Creates proper database table '\n 'for storing OST performance measurements.')\n\n parser.add_argument('-D',\n '--debug',\n dest='enable_debug',\n required=False,\n action='store_true',\n help='Enables debug log messages.')\n\n return parser.parse_args()\n\n\ndef init_logging(log_filename, enable_debug):\n\n if enable_debug:\n log_level = logging.DEBUG\n else:\n log_level = logging.INFO\n\n if log_filename:\n logging.basicConfig(filename=log_filename,\n level=log_level,\n format=\"%(asctime)s - %(levelname)s: %(message)s\")\n else:\n logging.basicConfig(level=log_level,\n format=\"%(asctime)s - %(levelname)s: %(message)s\")\n\n\ndef set_run_flag_false():\n\n global RUN_FLAG\n\n if RUN_FLAG:\n RUN_FLAG = False\n\n\ndef signal_handler(signum, frame):\n\n if signum == signal.SIGHUP:\n\n logging.info('Retrieved hang-up signal.')\n set_run_flag_false()\n\n if signum == signal.SIGINT:\n\n logging.info('Retrieved interrupt program signal.')\n set_run_flag_false()\n\n if signum == signal.SIGTERM:\n\n logging.info('Retrieved signal to terminate.')\n set_run_flag_false()\n\n\ndef main():\n\n error = False\n\n try:\n\n args = init_arg_parser()\n\n config_file_reader = DatabaseProxyConfigFileReader(args.config_file)\n\n init_logging(config_file_reader.log_filename, args.enable_debug)\n\n # TODO: Check Exception with *with* statement.\n with PIDControl(config_file_reader.pid_file) as pid_control, \\\n DatabaseProxyCommHandler(\n config_file_reader.comm_target,\n config_file_reader.comm_port,\n config_file_reader.poll_timeout) as comm_handler, \\\n OSTPerfHistoryTableHandler(\n config_file_reader.host,\n config_file_reader.user,\n config_file_reader.password,\n config_file_reader.database,\n config_file_reader.table) as table_handler:\n\n try:\n\n if pid_control.lock():\n\n logging.info(\"Started\")\n logging.info(\"Database Proxy PID: %s\", pid_control.pid())\n logging.debug(\"Version: %s\", config_file_reader.version)\n\n signal.signal(signal.SIGHUP, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n signal.siginterrupt(signal.SIGHUP, True)\n signal.siginterrupt(signal.SIGINT, True)\n signal.siginterrupt(signal.SIGTERM, True)\n\n last_store_timestamp = int(time.time())\n store_timeout = config_file_reader.store_timeout\n store_max_count = config_file_reader.store_max_count\n\n if args.create_table:\n\n table_handler.create_table()\n logging.info('Created database table.')\n logging.info(\"Finished\")\n os._exit(0)\n\n comm_handler.connect()\n\n while RUN_FLAG:\n\n last_exec_timestamp = int(time.time())\n\n # TODO: Building an object and validate data...\n recv_data = comm_handler.recv_string()\n\n if recv_data:\n\n logging.debug(\"Retrieved data: %s\", recv_data)\n\n table_handler.insert(recv_data)\n\n else:\n logging.debug('Timeout...')\n\n if (last_exec_timestamp >=\n (last_store_timestamp + store_timeout)) or \\\n table_handler.count() >= store_max_count:\n\n if table_handler.count():\n\n logging.debug(\"Storing results...\")\n\n table_handler.store()\n table_handler.clear()\n\n last_store_timestamp = int(time.time())\n\n else:\n\n logging.error(\"Another instance might be already running (PID file: %s)!\" \n % config_file_reader.pid_file)\n os._exit(1)\n\n except Exception as e:\n\n logging.error(\"Caught exception in inner block: %s\", e)\n set_run_flag_false()\n error = True\n\n except Exception as e:\n\n logging.error(\"Caught exception in outer block: %s\", e)\n os._exit(1)\n\n if table_handler and table_handler.count():\n\n logging.debug(\"Storing results into database...\")\n\n table_handler.store()\n table_handler.clear()\n\n logging.info(\"Finished\")\n\n if error:\n os._exit(1)\n else:\n os._exit(0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lfsm_database_proxy.py","file_name":"lfsm_database_proxy.py","file_ext":"py","file_size_in_byte":6954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"138445040","text":"from sklearn.datasets import load_iris\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport sys\nsys.path.append('/Users/yangzhaobin/projects/ml/ml-demo/package/')\nfrom logistic import logistic\n\n# 在该数据集中,m = 150, n = 4\niris = load_iris()\ndata = iris.data\ntarget = iris.target\nX = data[0 : 100, [0, 1]]\ny = target[0 : 100]\nm, n = X.shape\n\nlabel = np.array(y)\nindex_0 = np.where(label.ravel() == 0)\nplt.scatter(X[index_0, 0], X[index_0, 1], marker = 'x', color = 'b', label = '0', s = 15)\nindex_1 = np.where(label.ravel() == 1)\nplt.scatter(X[index_1, 0], X[index_1, 1], marker = 'o', color = 'r', label = '1', s = 15)\n\nplt.xlabel('X1')\nplt.ylabel('X2')\nplt.legend(loc = 'upper left')\nplt.show()\n\n\ny = y.reshape((-1, 1)) #有时候经常需要reshape,我建议多用,避免不必要的bug\n#add the x0=1\none = np.ones((m, 1))\nxTrain = np.hstack((one, X))\nclassify = logistic()\ncosts = classify.train(xTrain, y)\nprint('train result : \\n', classify.W)\n\nplt.plot(costs)\nplt.xlabel('Iteration number')\nplt.ylabel('Loss value')\nplt.show()\n\n# 可视化决策边界\nlabel = np.array(y)\nindex_0 = np.where(label.ravel() == 0)\nplt.scatter(X[index_0, 0], X[index_0, 1], marker = 'x', color = 'b', label = '0', s = 15)\nindex_1 = np.where(label.ravel() == 1)\nplt.scatter(X[index_1, 0], X[index_1, 1], marker = 'o', color = 'r', label = '1', s = 15)\n\n#show the decision boundary\nx1 = np.arange(4,7.5,0.5)\nx2 = (-classify.W[0] - classify.W[1] * x1) / classify.W[2]\nplt.plot(x1, x2, color = 'black')\nplt.xlabel('X1')\nplt.ylabel('X2')\nplt.legend(loc = 'upper left')\nplt.show()","sub_path":"ml-demo/package_demo/logic_regression.py","file_name":"logic_regression.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317629352","text":"import unittest\nimport requests\n\naddDataURL = 'http://127.0.0.1:5000/post_location'\nbaseURL ='http://127.0.0.1:5000/'\n\nclass testFlask(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tpass\n\n\tdef test_existing_pincode(self):\n\t\tdata = {\n\t\t\t'pincode':'IN/110001',\n\t\t\t'place_name':'Connaught Place',\n\t\t\t'admin_name1':'New Delhi',\n\t\t\t'longitude':77.2167,\n\t\t\t'latitude':28.6333,\n\t\t\t'accuracy':4\n\t\t}\n\t\tresult = requests.post(addDataURL, data=data)\n\t\tstring = result.text\n\t\tself.assertIn('This Pincode Already Exists!',string)\n\n\tdef test_proximity_points(self):\n\t\tdata={\n\t\t\t'pincode':'IN/99999',\n\t\t\t'place_name':'test_place',\n\t\t\t'admin_name1':'test_location',\n\t\t\t'longitude':77.2160,\n\t\t\t'latitude':28.6330,\n\t\t\t'accuracy':99\n\t\t}\n\t\tresult = requests.post(addDataURL, data=data)\n\t\tstring = result.text\n\t\tself.assertIn('A location already exists within',string)\n\n\tdef test_key_null(self):\n\t\tdata={\n\t\t\t'pincode':'IN/99999',\n\t\t\t'place_name':'test_place',\n\t\t\t'admin_name1':'test_location',\n\t\t\t'longitude':77.2160,\n\t\t\t'latitude':28.6330,\n\t\t\t'accuracy':99\n\t\t}\n\t\tdata['pincode']='NULL'\n\t\tresult = requests.post(addDataURL,data=data)\n\t\tstring = result.text\n\t\tself.assertIn('You must supply a pincode',string)\n\n\tdef test_place_name_null(self):\n\t\tdata={\n\t\t\t'pincode':'IN/99999',\n\t\t\t'place_name':'test_place',\n\t\t\t'admin_name1':'test_location',\n\t\t\t'longitude':77.2160,\n\t\t\t'latitude':28.6330,\n\t\t\t'accuracy':99\n\t\t}\n\t\tdata['place_name']='NULL'\n\t\tresult = requests.post(addDataURL,data=data)\n\t\tstring = result.text\n\t\tself.assertIn('You must supply a place name',string)\n\n\tdef test_get_using_self(self):\n\t\tresult = requests.get(baseURL+'get_using_self?longitude=77.216&latitude=28.633')\n\t\tstring = result.text\n\t\tself.assertIn('Pincodes within 5Kms of given cordinates:',string)\n\n\tdef test_get_using_postgres(self):\n\t\tresult = requests.get(baseURL+'get_using_postgres?longitude=77.216&latitude=28.633')\n\t\tstring = result.text\n\t\tself.assertIn('Pincodes within 5Kms of given cordinates:',string)\n\n\tdef test_compare_get(self):\n\t\tresult = requests.get(baseURL+'get_using_self?longitude=77.216&latitude=28.633')\n\t\tstring = result.text\n\t\tget_self = string.split(':')\n\t\tresult = requests.get(baseURL+'get_using_postgres?longitude=77.216&latitude=28.633')\n\t\tstring = result.text\n\t\tget_postgres = string.split(':')\n\t\tself.assertTrue(set(get_self[1])==set(get_postgres[1]))\n\n\tdef test_get_incomplete(self):\n\t\tresult = requests.get(baseURL+'get_using_self')\n\t\tstring = result.text\n\t\tself.assertIn('You must supply valid arguments!',string)\n\n\tdef test_get_place(self):\n\t\tresult = requests.get(baseURL+'get_place?longitude=77.216&latitude=28.633')\n\t\tstring = result.text\n\t\tself.assertIn('The Point falls under:',string)\n\n\tdef test_get_place_incomplete(self):\n\t\tresult = requests.get(baseURL+'get_place')\n\t\tstring = result.text\n\t\tself.assertIn('You must supply valid arguments!',string)\n\n\tdef test_get_place_out_of_bound(self):\n\t\tresult = requests.get(baseURL+'get_place?longitude=0&latitude=0')\n\t\tstring = result.text\n\t\tself.assertIn(\"Given point doesn't fall under any known location!\",string)\n\nif __name__=='__main__':\n\tunittest.main()","sub_path":"test_API.py","file_name":"test_API.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"223015523","text":"import os\nimport pandas as pd\nimport random\nimport copy\nfrom keras.preprocessing import text, sequence\nimport torch\nfrom torch import nn\nfrom torch.utils import data\nfrom torch.nn import functional as F\nimport numpy as np\nimport time\nimport math\nimport gc\nfrom sklearn.metrics import roc_auc_score\n\n\nclass JigsawEvaluator:\n def __init__(self, y_true, y_identity, power=-5, overall_model_weight=0.25):\n self.y = (y_true >= 0.5).astype(int)\n self.y_i = (y_identity >= 0.5).astype(int)\n self.n_subgroups = self.y_i.shape[1]\n self.power = power\n self.overall_model_weight = overall_model_weight\n\n @staticmethod\n def _compute_auc(y_true, y_pred):\n try:\n return roc_auc_score(y_true, y_pred)\n except ValueError:\n return np.nan\n\n def _compute_subgroup_auc(self, i, y_pred):\n mask = self.y_i[:, i] == 1\n return self._compute_auc(self.y[mask], y_pred[mask])\n\n def _compute_bpsn_auc(self, i, y_pred):\n mask = self.y_i[:, i] + self.y == 1\n return self._compute_auc(self.y[mask], y_pred[mask])\n\n def _compute_bnsp_auc(self, i, y_pred):\n mask = self.y_i[:, i] + self.y != 1\n return self._compute_auc(self.y[mask], y_pred[mask])\n\n def compute_bias_metrics_for_model(self, y_pred):\n records = np.zeros((3, self.n_subgroups))\n for i in range(self.n_subgroups):\n records[0, i] = self._compute_subgroup_auc(i, y_pred)\n records[1, i] = self._compute_bpsn_auc(i, y_pred)\n records[2, i] = self._compute_bnsp_auc(i, y_pred)\n return records\n\n def _calculate_overall_auc(self, y_pred):\n return roc_auc_score(self.y, y_pred)\n\n def _power_mean(self, array):\n total = sum(np.power(array, self.power))\n return np.power(total / len(array), 1 / self.power)\n\n def get_final_metric(self, y_pred):\n bias_metrics = self.compute_bias_metrics_for_model(y_pred)\n bias_score = np.average([\n self._power_mean(bias_metrics[0]),\n self._power_mean(bias_metrics[1]),\n self._power_mean(bias_metrics[2])\n ])\n overall_score = self.overall_model_weight * self._calculate_overall_auc(y_pred)\n bias_score = (1 - self.overall_model_weight) * bias_score\n return overall_score + bias_score\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, alpha=1, gamma=2, logits=True, reduce=False):\n super(FocalLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.logits = logits\n self.reduce = reduce\n\n def forward(self, inputs, targets):\n if self.logits:\n bce_loss = nn.BCEWithLogitsLoss(reduction=\"none\")(inputs, targets)\n else:\n bce_loss = nn.BCELoss(reduction=\"none\")(inputs, targets)\n pt = torch.exp(-bce_loss)\n focal_loss = self.alpha * (1-pt)**self.gamma * bce_loss\n #focal_loss = (1 - pt) ** self.gamma * bce_loss\n if self.reduce:\n return torch.mean(focal_loss)\n else:\n return focal_loss\n\n\nclass SpatialDropout(nn.Dropout2d):\n def forward(self, x):\n x = x.unsqueeze(2) # (N, T, 1, K)\n x = x.permute(0, 3, 2, 1) # (N, K, 1, T)\n x = super(SpatialDropout, self).forward(x) # (N, K, 1, T), some features are masked\n x = x.permute(0, 3, 2, 1) # (N, T, 1, K)\n x = x.squeeze(2) # (N, T, K)\n return x\n\n\nclass NeuralNet(nn.Module):\n def __init__(self, embedding_matrix):\n super(NeuralNet, self).__init__()\n unique_word_num = embedding_matrix.shape[0]\n embed_size = embedding_matrix.shape[1]\n lstm_size = 128\n dense_size = 512\n # 嵌入层\n self.embedding = nn.Embedding(unique_word_num, embed_size)\n self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))\n self.embedding.weight.requires_grad = False\n self.embedding_dropout = SpatialDropout(0.3)\n # LSTM\n self.lstm1 = nn.LSTM(embed_size, lstm_size, bidirectional=True, batch_first=True)\n self.lstm2 = nn.LSTM(lstm_size * 2, lstm_size, bidirectional=True, batch_first=True)\n # 全连接层\n self.linear1 = nn.Linear(dense_size, dense_size)\n self.linear2 = nn.Linear(dense_size, dense_size)\n self.linear3 = nn.Linear(dense_size * 2, dense_size)\n # 输出层\n self.linear_out = nn.Linear(dense_size, 1)\n self.linear_aux_out = nn.Linear(dense_size, 5)\n self.linear_identity_out = nn.Linear(dense_size, 9)\n self.linear_identity_out2 = nn.Linear(dense_size, dense_size)\n self.bn1 = nn.BatchNorm1d(dense_size)\n self.bn2 = nn.BatchNorm1d(dense_size)\n\n def forward(self, x):\n # 嵌入层\n h_embedding = self.embedding(x)\n h_embedding = self.embedding_dropout(h_embedding)\n # LSTM\n h_lstm1, _ = self.lstm1(h_embedding)\n h_lstm2, _ = self.lstm2(h_lstm1)\n # pooling\n avg_pool = torch.mean(h_lstm2, 1)\n max_pool, _ = torch.max(h_lstm2, 1)\n # 全连接层\n h_conc = torch.cat((max_pool, avg_pool), 1)\n\n identity_hidden = self.linear_identity_out2(h_conc)\n identity_hidden = F.relu(identity_hidden)\n #identity_hidden = self.bn1(identity_hidden)\n identity_hidden = F.dropout(identity_hidden, p=0.3)\n identity_result = self.linear_identity_out(identity_hidden)\n h_conc2 = torch.cat((h_conc, identity_hidden), 1)\n gate_hidden = self.linear3(h_conc2)\n #gate_hidden = self.bn2(gate_hidden)\n gate = torch.sigmoid(gate_hidden)\n #gate = F.dropout(gate, p=0.3)\n h_conc = h_conc * gate\n\n h_conc_linear1 = F.relu(self.linear1(h_conc))\n h_conc_linear2 = F.relu(self.linear2(h_conc))\n # 拼接\n hidden = h_conc + h_conc_linear1 + h_conc_linear2\n # 输出层,用 sigmoid 就用 BCELoss,不用 sigmoid 就用 BCEWithLogitsLoss\n result = self.linear_out(hidden)\n aux_result = self.linear_aux_out(hidden)\n out = torch.cat([result, aux_result, identity_result], 1)\n return out\n\n\nclass Trainer:\n def __init__(self, model_name, epochs=5, batch_size=512, part=1., seed=1234, debug_mode=False):\n self.debug_mode = debug_mode\n self.model_name = model_name\n self.seed = seed\n self.identity_list = ['male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish', 'muslim', 'black', 'white', 'psychiatric_or_mental_illness']\n self.toxicity_type_list = ['severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat']\n if part == 1.:\n self.weight_dict = {\"severe_toxicity\": 1000, \"obscene\": 235, \"identity_attack\": 236, \"insult\": 22,\n \"threat\": 646, \"male\": 45, \"female\": 35, \"homosexual_gay_or_lesbian\": 176, \"christian\": 50,\n \"jewish\": 249, \"muslim\": 91, \"black\": 130, \"white\": 75, \"psychiatric_or_mental_illness\": 442,\n \"pp\": 101, \"np\": 13, \"pn\": 20, \"nn\": 1,\n \"pp_male\": 431, \"np_male\": 50, \"pn_male\": 17, \"nn_male\": 1,\n \"pp_female\": 384, \"np_female\": 39, \"pn_female\": 17, \"nn_female\": 1,\n \"pp_homosexual_gay_or_lesbian\": 900, \"np_homosexual_gay_or_lesbian\": 219, \"pn_homosexual_gay_or_lesbian\": 17, \"nn_homosexual_gay_or_lesbian\": 1,\n \"pp_christian\": 859, \"np_christian\": 54, \"pn_christian\": 17, \"nn_christian\": 1,\n \"pp_jewish\": 2365, \"np_jewish\": 278, \"pn_jewish\": 17, \"nn_jewish\": 1,\n \"pp_muslim\": 606, \"np_muslim\": 108, \"pn_muslim\": 17, \"nn_muslim\": 1,\n \"pp_black\": 586, \"np_black\": 167, \"pn_black\": 17, \"nn_black\": 1,\n \"pp_white\": 387, \"np_white\": 94, \"pn_white\": 17, \"nn_white\": 1,\n \"pp_psychiatric_or_mental_illness\": 2874, \"np_psychiatric_or_mental_illness\": 523, \"pn_psychiatric_or_mental_illness\": 17, \"nn_psychiatric_or_mental_illness\": 1}\n else:\n self.weight_dict = {\"severe_toxicity\": 1000, \"obscene\": 196, \"identity_attack\": 278, \"insult\": 22,\n \"threat\": 609, \"male\": 45, \"female\": 33, \"homosexual_gay_or_lesbian\": 198, \"christian\": 48,\n \"jewish\": 243, \"muslim\": 133, \"black\": 131, \"white\": 90, \"psychiatric_or_mental_illness\": 369,\n \"pp\": 107, \"np\": 13, \"pn\": 19, \"nn\": 1,\n \"pp_male\": 434, \"np_male\": 51, \"pn_male\": 17, \"nn_male\": 1,\n \"pp_female\": 324, \"np_female\": 37, \"pn_female\": 17, \"nn_female\": 1,\n \"pp_homosexual_gay_or_lesbian\": 1055, \"np_homosexual_gay_or_lesbian\": 244, \"pn_homosexual_gay_or_lesbian\": 17, \"nn_homosexual_gay_or_lesbian\": 1,\n \"pp_christian\": 986, \"np_christian\": 50, \"pn_christian\": 17, \"nn_christian\": 1,\n \"pp_jewish\": 2680, \"np_jewish\": 268, \"pn_jewish\": 16, \"nn_jewish\": 1,\n \"pp_muslim\": 772, \"np_muslim\": 161, \"pn_muslim\": 17, \"nn_muslim\": 1,\n \"pp_black\": 633, \"np_black\": 165, \"pn_black\": 17, \"nn_black\": 1,\n \"pp_white\": 465, \"np_white\": 111, \"pn_white\": 17, \"nn_white\": 1,\n \"pp_psychiatric_or_mental_illness\": 2748, \"np_psychiatric_or_mental_illness\": 427, \"pn_psychiatric_or_mental_illness\": 16, \"nn_psychiatric_or_mental_illness\": 1}\n self.stopwords = '!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n“”’\\'∞θ÷α•à−β∅³π‘₹´°£€\\×™√²—'\n self.seed_everything()\n self.max_len = 220\n self.epochs = epochs\n self.batch_size = batch_size\n self.split_ratio = 0.95\n self.sample_num = 1804874\n if not self.debug_mode:\n self.train_df = pd.read_csv(\"../input/jigsaw-unintended-bias-in-toxicity-classification/predict.csv\").sample(int(self.sample_num * part), random_state=1234).fillna(0.)\n self.test_df = pd.read_csv(\"../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv\")\n else:\n self.train_df = pd.read_csv(\"../input/jigsaw-unintended-bias-in-toxicity-classification/predict.csv\").head(1000).fillna(0.)\n self.test_df = pd.read_csv(\"../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv\").head(1000)\n self.train_len = int(len(self.train_df) * self.split_ratio)\n self.evaluator = self.init_evaluator()\n\n def seed_everything(self):\n random.seed(self.seed)\n os.environ['PYTHONHASHSEED'] = str(self.seed)\n np.random.seed(self.seed)\n torch.manual_seed(self.seed)\n torch.cuda.manual_seed(self.seed)\n torch.backends.cudnn.deterministic = True\n\n def init_evaluator(self):\n # 初始化评分函数类\n y_true = self.train_df['target'].values\n y_identity = self.train_df[self.identity_list].values\n valid_y_true = y_true[self.train_len:]\n valid_y_identity = y_identity[self.train_len:]\n evaluator = JigsawEvaluator(valid_y_true, valid_y_identity) # y_true 必须是0或1,不能是离散值\n return evaluator\n\n def create_dataloader(self):\n # 读取输入输出\n train_comments = self.train_df[\"comment_text\"].astype(str)\n train_label = self.train_df[\"target\"].values\n train_type_labels = self.train_df[self.toxicity_type_list].values\n\n # 身份原始值\n train_identity_values = self.train_df[self.identity_list].fillna(0.).values\n # 所有身份原始值之和\n train_identity_sum = train_identity_values.sum(axis=1)\n # 将身份之和限制在1以下(sigmoid)\n train_identity_sum_label = np.where(train_identity_sum > 1, 1, train_identity_sum)\n # 身份01值\n train_identity_binary = copy.deepcopy(self.train_df[self.identity_list])\n for column in self.identity_list:\n train_identity_binary[column] = np.where(train_identity_binary[column] > 0.5, 1, 0)\n # 身份01值有一个就算1\n train_identity_binary_sum = train_identity_binary.sum(axis=1)\n train_identity_or_binary = np.where(train_identity_binary_sum >= 1, 1, 0)\n # 所有身份标签\n train_identity_type_labels = train_identity_values\n train_identity_type_binary_lables = train_identity_binary\n train_identity_sum_label = train_identity_sum_label\n train_identity_binary_label = train_identity_or_binary\n\n # tokenizer 训练\n test_comments = self.test_df[\"comment_text\"].astype(str)\n tokenizer = text.Tokenizer(filters=self.stopwords)\n tokenizer.fit_on_texts(list(train_comments) + list(test_comments)) # train_comments 是 dataframe 的一列,是 Series 类, list(train_comments) 直接变成 list\n # tokenization\n train_tokens = tokenizer.texts_to_sequences(train_comments) # 可以给 Series 也可以给 list?\n test_tokens = tokenizer.texts_to_sequences(test_comments)\n # 用 sequence 类补到定长\n train_tokens = sequence.pad_sequences(train_tokens, maxlen=self.max_len)\n test_tokens = sequence.pad_sequences(test_tokens, maxlen=self.max_len)\n # 划分训练集和验证集\n valid_tokens = train_tokens[self.train_len:]\n valid_label = train_label[self.train_len:]\n valid_type_labels = train_type_labels[self.train_len:]\n train_tokens = train_tokens[:self.train_len]\n train_label = train_label[:self.train_len]\n train_type_labels = train_type_labels[:self.train_len]\n valid_identity_type_labels = train_identity_type_labels[self.train_len:]\n train_identity_type_labels = train_identity_type_labels[:self.train_len]\n valid_identity_type_binary_lables = train_identity_type_binary_lables[self.train_len:]\n train_identity_type_binary_lables = train_identity_type_binary_lables[:self.train_len]\n valid_identity_sum_label = train_identity_sum_label[self.train_len:]\n train_identity_sum_label = train_identity_sum_label[:self.train_len]\n valid_identity_binary_label = train_identity_binary_label[self.train_len:]\n train_identity_binary_label = train_identity_binary_label[:self.train_len]\n\n # 计算样本权重\n target_weight, aux_weight, identity_weight = self.cal_sample_weights()\n\n # 将符号化数据转成 tensor\n train_x_tensor = torch.tensor(train_tokens, dtype=torch.long)\n valid_x_tensor = torch.tensor(valid_tokens, dtype=torch.long)\n train_y_tensor = torch.tensor(np.hstack([train_label[:, np.newaxis], train_type_labels, train_identity_type_labels]), dtype=torch.float32)\n valid_y_tensor = torch.tensor(np.hstack([valid_label[:, np.newaxis], valid_type_labels, valid_identity_type_labels]), dtype=torch.float32)\n target_weight_tensor = torch.tensor(target_weight, dtype=torch.float32)\n aux_weight_tensor = torch.tensor(aux_weight, dtype=torch.float32)\n identity_weight_tensor = torch.tensor(identity_weight, dtype=torch.float32)\n if torch.cuda.is_available():\n train_x_tensor = train_x_tensor.cuda()\n valid_x_tensor = valid_x_tensor.cuda()\n train_y_tensor = train_y_tensor.cuda()\n valid_y_tensor = valid_y_tensor.cuda()\n target_weight_tensor = target_weight_tensor.cuda()\n aux_weight_tensor = aux_weight_tensor.cuda()\n identity_weight_tensor = identity_weight_tensor.cuda()\n # 将 tensor 转成 dataset,训练数据和标签一一对应,用 dataloader 加载的时候 dataset[:-1] 是 x,dataset[-1] 是 y\n train_dataset = data.TensorDataset(train_x_tensor, train_y_tensor, target_weight_tensor, aux_weight_tensor, identity_weight_tensor)\n valid_dataset = data.TensorDataset(valid_x_tensor, valid_y_tensor)\n # 将 dataset 转成 dataloader\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=self.batch_size, shuffle=False)\n # 返回训练数据\n return train_loader, valid_loader, tokenizer\n\n def cal_sample_weights(self):\n # aux weight\n aux_weight = np.zeros((len(self.train_df), len(self.toxicity_type_list)))\n for i, column in enumerate(self.toxicity_type_list):\n weight = math.pow(self.weight_dict[column], 0.5)\n aux_weight[:, i] = np.where(self.train_df[column] > 0.5, weight, 1)\n # identity weight\n identity_weight = np.zeros((len(self.train_df), len(self.identity_list)))\n for i, column in enumerate(self.identity_list):\n weight = math.pow(self.weight_dict[column], 0.5)\n identity_weight[:, i] = np.where(self.train_df[column] > 0.5, weight, 1)\n # target weight\n for column in self.identity_list + [\"target\"]:\n self.train_df[column] = np.where(self.train_df[column] > 0.5, True, False)\n target_weight = np.ones(len(self.train_df))\n target_weight += self.train_df[\"target\"]\n if False:\n target_weight += (~self.train_df[\"target\"]) * self.train_df[self.identity_list].sum(axis=1)\n target_weight += self.train_df[\"target\"] * (~self.train_df[self.identity_list]).sum(axis=1) * 5\n else:\n target_weight += (~self.train_df[\"target\"]) * np.where(self.train_df[self.identity_list].sum(axis=1) > 0, 1, 0) * self.weight_dict[\"np\"] / 4\n target_weight += self.train_df[\"target\"] * np.where(self.train_df[self.identity_list].sum(axis=1) == 0, 1, 0) * self.weight_dict[\"pn\"] / 4\n target_weight /= target_weight.mean()\n # 只留训练集\n target_weight = np.array(target_weight)\n target_weight = target_weight[:self.train_len]\n aux_weight = aux_weight[:self.train_len, :]\n identity_weight = identity_weight[:self.train_len, :]\n return target_weight, aux_weight, identity_weight\n\n def create_emb_weights(self, word_index):\n # 构建词向量字典\n with open(\"../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec\", \"r\") as f:\n fasttext_emb_dict = {}\n for i, line in enumerate(f):\n if i == 1000 and self.debug_mode: break\n split = line.strip().split(\" \")\n word = split[0]\n if word not in word_index: continue\n emb = np.array([float(num) for num in split[1:]])\n fasttext_emb_dict[word] = emb\n with open(\"../input/glove840b300dtxt/glove.840B.300d.txt\", \"r\") as f:\n glove_emb_dict = {}\n for i, line in enumerate(f):\n if i == 1000 and self.debug_mode: break\n split = line.strip().split(\" \")\n word = split[0]\n if word not in word_index: continue\n emb = np.array([float(num) for num in split[1:]])\n glove_emb_dict[word] = emb\n # 为训练集和测试集出现过的词构建词向量矩阵\n word_embedding = np.zeros((len(word_index) + 1, 600)) # tokenizer 自动留出0用来 padding\n np.random.seed(1234)\n fasttext_random_emb = np.random.uniform(-0.25, 0.25, 300) # 用于 fasttext 找不到词语时\n np.random.seed(1235)\n glove_random_emb = np.random.uniform(-0.25, 0.25, 300) # 用于 glove 找不到词语时\n for word, index in word_index.items():\n # 如果找不到 emb,尝试小写或首字母大写\n if word not in fasttext_emb_dict and word not in glove_emb_dict:\n word = word.lower()\n if word not in fasttext_emb_dict and word not in glove_emb_dict:\n word = word.title()\n if word not in fasttext_emb_dict and word not in glove_emb_dict:\n word = word.upper()\n fasttext_emb = fasttext_emb_dict[word] if word in fasttext_emb_dict else fasttext_random_emb\n glove_emb = glove_emb_dict[word] if word in glove_emb_dict else glove_random_emb\n word_embedding[index] = np.concatenate((fasttext_emb, glove_emb), axis=-1)\n return np.array(word_embedding)\n\n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n\n def custom_loss(self, y_pred, y_batch, epoch, target_weight=1., aux_weight=1., identity_weight=1.):\n target_pred = y_pred[:, 0]\n target_true = y_batch[:, 0]\n aux_pred = y_pred[:, 1: 6]\n aux_true = y_batch[:, 1: 6]\n identity_pred = y_pred[:, 6:]\n identity_true = y_batch[:, 6:]\n if epoch > 9:\n target_loss = FocalLoss()(target_pred, target_true)\n else:\n target_loss = nn.BCEWithLogitsLoss(reduction=\"none\")(target_pred, target_true)\n target_loss = torch.mean(target_loss * target_weight)\n if epoch > 9:\n aux_loss = FocalLoss()(aux_pred, aux_true)\n else:\n aux_loss = nn.BCEWithLogitsLoss(reduction=\"none\")(aux_pred, aux_true)\n aux_loss = torch.mean(aux_loss * aux_weight)\n if epoch > 9:\n identity_loss = FocalLoss()(identity_pred, identity_true)\n else:\n identity_loss = nn.BCEWithLogitsLoss(reduction=\"none\")(identity_pred, identity_true)\n identity_loss = torch.mean(identity_loss * identity_weight)\n return target_loss, aux_loss, identity_loss\n\n def train(self):\n if self.debug_mode: self.epochs = 1\n # 加载 dataloader\n train_loader, valid_loader, tokenizer = self.create_dataloader()\n # 生成 embedding\n word_embedding = self.create_emb_weights(tokenizer.word_index)\n # 训练\n self.seed_everything()\n model = NeuralNet(word_embedding)\n if torch.cuda.is_available():\n model.cuda()\n lr = 1e-3\n # param_lrs = [{'params': param, 'lr': lr} for param in model.parameters()] # 可以为不同层设置不同的学习速率\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n # 渐变学习速率\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: 0.6 ** epoch)\n # 损失函数\n loss_fn = nn.BCEWithLogitsLoss(reduction='mean')\n # 训练\n previous_auc_score = 0\n stop_flag = 0\n for epoch in range(self.epochs):\n start_time = time.time()\n # 调整一次学习速率\n if epoch <= 10:\n scheduler.step()\n # 切换为训练模式\n model.train()\n # 初始化当前 epoch 的 loss\n avg_loss = 0.\n # 加载每个 batch 并训练\n for batch_data in train_loader:\n x_batch = batch_data[0]\n y_batch = batch_data[1]\n target_weight_batch = batch_data[2]\n aux_weight_batch = batch_data[3]\n identity_weight_batch = batch_data[4]\n #y_pred = model(*x_batch)\n y_pred = model(x_batch)\n target_loss, aux_loss, identity_loss = self.custom_loss(y_pred, y_batch, epoch, target_weight_batch, aux_weight_batch, identity_weight_batch)\n loss = target_loss + aux_loss + identity_loss\n #loss = loss_fn(y_pred, y_batch)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n avg_loss += loss.item() / len(train_loader)\n # 计算验证集\n model.eval()\n y_pred = np.zeros((len(self.train_df) - self.train_len))\n for i, batch_data in enumerate(valid_loader):\n x_batch = batch_data[:-1]\n y_batch = batch_data[-1]\n batch_y_pred = self.sigmoid(model(*x_batch).detach().cpu().numpy())[:, 0]\n y_pred[i * self.batch_size: (i + 1) * self.batch_size] = batch_y_pred\n # 计算得分\n auc_score = self.evaluator.get_final_metric(y_pred)\n print(\"epoch: %d duration: %d min auc_score: %.4f\" % (epoch, int((time.time() - start_time) / 60), auc_score))\n if not self.debug_mode and epoch > 0:\n temp_dict = model.state_dict()\n del temp_dict['embedding.weight']\n torch.save(temp_dict, \"model[pytorch][%d][%s][%d][%.4f].bin\" % (self.seed, self.model_name, epoch, auc_score))\n # del 训练相关输入和模型\n training_history = [train_loader, valid_loader, tokenizer, word_embedding, model, optimizer, scheduler]\n for variable in training_history:\n del variable\n gc.collect()\n\n\nprint(\"train18_correct_weight.py\")\ntrainer = Trainer(model_name=\"train10_focal_loss_seed_kernel\", epochs=25, batch_size=512, part=1., seed=1234, debug_mode=False)\ntrainer.train()\n\n\"\"\"\nfasttext-crawl-300d-2m\nglove840b300dtxt\n\"\"\"","sub_path":"train_pytorch/train18_correct_weight.py","file_name":"train18_correct_weight.py","file_ext":"py","file_size_in_byte":25034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555717122","text":"import argparse, math, random, gzip, pickle, types\nimport numpy as np\nfrom functools import partial\nimport multiprocessing\n\nfrom collections import defaultdict\n\ndef rate_symm(fi, fj):\n return math.exp(-(fj - fi) / 2)\n\ndef rate_metropolis(fi, fj):\n return math.exp(-min(0, fj - fi))\n\ndef rate_matrix_1d(F, rate_fcn, x_F_min, x_F_max):\n states = sorted(x for x in F if x >= x_F_min and x <= x_F_max)\n N = len(states)\n rates = np.zeros((N, N))\n for i in range(N):\n for j in range(N):\n if abs(j - i) == 1:\n rates[i,j] = rate_fcn(F[states[i]], F[states[j]])\n for i in range(N):\n rates[i,i] = -sum(rates[i,:])\n return rates, states\n\ndef committors(rates, source=[0], sink=[-1], reverse=False):\n if 0 not in source or len(source) != max(source) + 1 or -1 not in sink or len(sink) != -min(sink):#Format\n raise Exception(\"invalid order of source and sink states\")\n N = rates.shape[0]\n q = np.zeros(N)#States number\n if not reverse:\n b = np.array([-sum(rates[i,j] for j in sink) for i in range(len(source),N-len(sink))])#Check\n for j in sink:\n q[j] = 1.\n else:\n # Check the following line:\n b = np.array([-sum(rates[i,j] for j in source) for i in range(len(source),N-len(sink))])\n for j in source:\n q[j] = 1.\n q[max(source)+1:min(sink)] \\\n = np.linalg.solve(rates[max(source)+1:min(sink),max(source)+1:min(sink)], b)\n return q\n\ndef stationary(rates):\n eig = np.linalg.eig(rates)\n pi = np.fabs(eig[1][:,np.argmax(eig[0])])\n return pi / np.sum(pi)\n\ndef reaction_rate(pi, T, source, sink):\n q = committors(T, source=source, sink=sink)\n k = sum(sum(pi[i] * T[i,j] * (q[j] - q[i]) \\\n for j in range(len(pi)) if j not in source and q[j] > q[i]) \\\n for i in range(len(pi)) if i in source)\n return k\n\ndef sample_transition_paths_1d(rates, id, save_time_only=False):\n N = rates.shape[0]\n i = 0\n tp = [(i, 0.)]\n ntps = 0\n while True:\n if i == 0 or i == N - 1:\n if tp[0][0] != i:# This path is a transition path\n if id % 1 == 0: print('step: %d \\n' % (id))\n if not save_time_only:\n #pickle.dump(tp, stream)\n return tp\n else:\n #pickle.dump((len(tp), sum(t[1] for t in tp)), stream)\n return (len(tp), sum(t[1] for t in tp))\n ntps += 1\n\n if ntps >= npaths:\n break\n tp = [(i, 0.)]\n if i == 0:\n j = i + 1\n cumulative_rate = rates[i,j]\n elif i == N - 1:\n j = i - 1\n cumulative_rate = rates[i,j]\n else:\n cumulative_rate = rates[i,i-1] + rates[i,i+1]\n r = cumulative_rate * np.random.random()\n if r < rates[i,i-1]:\n j = i - 1\n else:\n j = i + 1\n i = j\n dt = np.log(1 / random.random()) / cumulative_rate\n tp.append((i, dt))\n# def transition_path_length_distribution(stream, nbins=10):\n# minL = min(len(tp) for tp in tps)\n# maxL = max(len(tp) for tp in tps) + 1\n# dL = (maxL - minL) / nbins\n# hist = np.zeros(nbins)\n# for tp in tps:\n# hist[int((len(tp) - minL) / dL)] += 1\n# return {minL + dL * (i + 0.5) : hist[i] / np.sum(hist) for i in range(nbins)}\n\ndef transition_path_time_distribution(stream, nbins=10):\n times = []\n while True:\n try:\n tp = pickle.load(stream)\n if isinstance(tp,list):\n times.append(sum(t[1] for t in tp))\n #print(times[-1])\n else:\n times.append(tp[1]) # Time information only\n except (IOError, EOFError):\n break\n print(\"Loaded %d transition paths\" % len(times))\n def bootstrap_fcn_err(fcn, x, nsamples=100):\n return np.std([fcn(np.random.choice(x, len(x))) for i in range(nsamples)])\n meantime = np.mean(times)\n print(\"average time:\", meantime, bootstrap_fcn_err(np.mean, times))\n times = [t / meantime for t in times]\n # stddevtime = np.std(times)\n # times = [t / stddevtime for t in times]\n mint = 0. # min(times)\n maxt = 32. # max(times) + 1 change!\n print(\"time stddev / mean:\", np.std(times), bootstrap_fcn_err(np.std, times))\n dt = (maxt - mint) / nbins\n hist = np.zeros(nbins)\n for i in range(len(times)):\n hist[int((times[i] - mint) / dt)] += 1\n return {mint + dt * (i + 0.5) : (hist[i] / (dt * np.sum(hist)), np.sqrt(hist[i]) / (dt * np.sum(hist))) for i in range(nbins)}\n\ndef transition_path_time_cdf(streamin, streamout):\n times = []\n while True:\n try:\n tp = pickle.load(streamin)\n if isinstance(tp,list):\n times.append(sum(t[1] for t in tp))\n else:\n times.append(tp[1]) # Time information only\n except (IOError, EOFError):\n break\n print(\"Loaded %d transition paths\" % len(times))\n times.sort()\n streamout.write(\"%g %g\\n\" % (0, 0))\n for i in range(len(times)):\n streamout.write(\"%g %g\\n\" % (times[i], (i + 1) / len(times)))\n\ndef jump_size_distribution(stream, dts, ddt=1, step_size=1):\n steps = {dt : defaultdict(int) for dt in dts}\n msd = {dt : 0. for dt in dts}\n ntps = 0\n while True:\n try:\n tp = pickle.load(stream)\n ntps += 1\n except (IOError, EOFError):\n break\n cumulative_time = sum(t[1] for t in tp)\n x_t = np.zeros(int(cumulative_time / ddt) + 2)\n i, t = 0, 0.\n for k in range(int(cumulative_time / ddt) + 2):\n while True:\n if i < len(tp) - 1 and \\\n math.fabs(t + tp[i + 1][1] - k * ddt) < math.fabs(t - k * ddt):\n i += 1\n t += tp[i][1]\n else:\n x_t[k] = tp[i][0]\n break\n for dt in dts:\n drift = (tp[-1][0] - tp[0][0]) / (cumulative_time / dt)\n for t in range(int(dt / ddt), len(x_t)):\n dx = (x_t[t] - x_t[t - int(dt / ddt)]) - drift #Should be - drift?\n msd[dt] += dx**2\n if dx > 0:\n steps[dt][int(dx / step_size + 0.5)] += 1\n else:\n steps[dt][int(dx / step_size - 0.5)] += 1\n del tp, x_t\n print(\"Loaded %d transition paths\" % ntps)\n norm = {dt : sum(steps[dt].values()) for dt in dts}\n return {dt : msd[dt] / norm[dt] for dt in dts}, \\\n {dt : {i * step_size : steps[dt][i] / norm[dt] \\\n for i in range(min(steps[dt]), max(steps[dt]) + 1)} for dt in dts}\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('landscape', type=str, help=\"path to input landscape file\")\n parser.add_argument('--stored-paths', type=str, default='tps.p.gz', \\\n help=\"path to stored paths [tps.p.gz]\")\n parser.add_argument('--time-only', action='store_true', help=\"save length/time only [False]\")\n clargs = parser.parse_args()\n\n with open(clargs.landscape, 'r') as f:\n F = {np.float(line.split()[0]) : np.float(line.split()[1]) for line in f \\\n if len(line) > 1 and line[0] != '#'}\n x_F_mid = (min(F) + max(F)) / 2\n x_F_min = min((z for z in F.items() if z[0] < x_F_mid), key=lambda z: (z[1], z[0]))[0]\n x_F_max = min((z for z in F.items() if z[0] > x_F_mid), key=lambda z: (z[1], -z[0]))[0]\n x_F_barrier = max((z for z in F.items() if z[0] > x_F_min and z[0] < x_F_max), \\\n key=lambda z: z[1])[0]\n\n print(min(F), x_F_min, x_F_barrier, x_F_max, max(F))\n print(\"barrier:\", (2 * F[x_F_barrier] - F[x_F_min] - F[x_F_max]) / 2)\n\n T, states = rate_matrix_1d(F, rate_symm, x_F_min, x_F_max)\n print(\"nstates =\", len(states))\n pi = stationary(np.transpose(T))\n q = committors(T)\n m = pi * q * (1. - q)\n kreaction = reaction_rate(pi, T, [0], [-1])\n print(\"log(reaction rate) =\", math.log(kreaction))\n print(\"Writing tpt_1d.dat\")\n with open('tpt_1d.dat', 'w') as f:\n f.write(\"# x q^+(x) p(x|TP) p(TP|x)\\n\")\n for i in range(len(q)):\n f.write(\"%g %g %g %g\\n\" % (states[i], q[i], m[i] / sum(m), 2. * m[i] / pi[i]))\n\n npaths = 50000\n\n print(\"Sampling %d transition paths and writing to %s...\" % (npaths, clargs.stored_paths))\n with gzip.open(clargs.stored_paths, 'wb') as f:\n print('cores='+str(multiprocessing.cpu_count()))\n task=partial(sample_transition_paths_1d, T, save_time_only=clargs.time_only)\n pool = multiprocessing.Pool() # creates a pool of process, controls worksers\n # the pool.map only accepts one iterable, so use the partial function\n # so that we only need to deal with one variable.\n #task(npaths)\n A=list(pool.map(task, np.arange(npaths))) # make our results with a map call\n\n for obj in A:\n pickle.dump(obj, f)\n\n pool.close() # we are not adding any more processes\n\n # print(\"Writing simulated_tp_length_distribution.dat\")\n # with gzip.open(clargs.stored_paths, 'rb') as f_tps, \\\n # open('simulated_tp_length_distribution.dat', 'w') as f:\n # for L,p in transition_path_length_distribution(f_tps).items():\n # f.write(\"%g %g\\n\" % (L, p))\n print(\"Writing simulated_tp_time_distribution.dat\")\n with gzip.open(clargs.stored_paths, 'rb') as f_tps, \\\n open('simulated_tp_time_distribution.dat', 'w') as f:\n for t,p in transition_path_time_distribution(f_tps, nbins=400).items():\n f.write(\"%g %g %g\\n\" % (t, p[0], p[1]))\n print(\"Writing simulated_tp_time_cdf.dat\")\n with gzip.open(clargs.stored_paths, 'rb') as f_tps, \\\n open('simulated_tp_time_cdf.dat', 'w') as f:\n transition_path_time_cdf(f_tps, f)\n\n step_size = 1\n with gzip.open(clargs.stored_paths, 'rb') as f_tps:\n msd, steps = jump_size_distribution(f_tps, [2.**p for p in range(8)], step_size=step_size)\n print(\"Writing simulated_tp_msd.dat\")\n with open('simulated_tp_msd.dat', 'w') as f:\n for dt in msd:\n f.write(\"%g %g\\n\" % (dt, msd[dt]))\n print(\"Writing simulated_tp_step_size_distribution.dat\")\n with open('simulated_tp_step_size_distribution.dat', 'w') as f:\n for dt in steps:\n for i in range(min(steps[dt]), max(steps[dt]) + 1):\n f.write(\"%g %g %g\\n\" % (math.log(dt) / math.log(2), i * step_size, steps[dt][i]))\n f.write(\"\\n\")\n","sub_path":"4Barriers/simulate_tp_1d.py","file_name":"simulate_tp_1d.py","file_ext":"py","file_size_in_byte":10595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"458146316","text":"from subprocessing import bash_command\n# run on linux (because of subprocess)\ninfile = open('embargolist.txt', 'r')\nlines = infile.readlines()\n\nfor line in lines:\n clean_line = line.strip()\n X, Y = clean_line.split('.xml')\n F = X.replace(\"_DATA-out\", \"\")\n date = str(Y).split('/')\n s = '{}-{}'\n target = s.format(date[2], date[0])\n source = 'mv /mnt/c/Users/rwolfsla/Desktop/ETD_TEST/Embargo/*/{}* /mnt/c/Users/rwolfsla/Desktop/ETD_TEST/embargo_store/{}'.format(F,target)\n bash_command(source)\n","sub_path":"Sup/embargo_bash/embargo.py","file_name":"embargo.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11375635","text":"\"\"\"\n@author: Viet Nguyen \n\"\"\"\nimport os\nimport pickle\nimport random\nimport string\nfrom flask import Flask, request, render_template\nimport torch\nimport torch.nn.functional as F\nimport csv\nimport pandas as pd\nfrom keras_preprocessing.sequence import pad_sequences\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport numpy as np\n\nfrom keras_han.model import HAN\n\napp = Flask(__name__)\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\nIMAGES_FOLDER = \"flask_images\"\nrand_str = lambda n: \"\".join([random.choice(string.ascii_letters + string.digits) for _ in range(n)])\n\nmodel = None\nword2vec = None\nmax_length_sentences = 0\nmax_length_word = 0\nnum_classes = 0\ncategories = None\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"main.html\")\n\n@app.route(\"/input\")\ndef new_input():\n return render_template(\"input.html\")\n\n@app.route(\"/show\", methods=[\"POST\"])\ndef show():\n global han_model, embedding_matrix, word_tokenizer, MAX_WORDS_PER_SENT, MAX_SENT, MAX_VOC_SIZE,\\\n GLOVE_DIM, categories\n MAX_WORDS_PER_SENT = 120\n MAX_SENT = 25\n MAX_VOC_SIZE = 300600\n GLOVE_DIM = 400\n embedding_matrix = pickle.load(open(\"./model/embedding_matrix.pkl\", \"rb\"))\n word_tokenizer = pickle.load(open(\"./model/word_tokenizer.pkl\", \"rb\"))\n han_model = HAN(\n MAX_WORDS_PER_SENT, MAX_SENT, 10, embedding_matrix,\n word_encoding_dim=400, sentence_encoding_dim=200\n )\n han_model.load_weights(\"./model/model.hdf5\")\n categories = pickle.load(open(\"./model/classes.pkl\", \"rb\"))\n return render_template(\"input.html\")\n\n\n@app.route(\"/result\", methods=[\"POST\"])\ndef result():\n global han_model, embedding_matrix, word_tokenizer, MAX_WORDS_PER_SENT, MAX_SENT, MAX_VOC_SIZE, \\\n GLOVE_DIM, categories\n text = request.form[\"message\"]\n\n x_test = [text]\n X_test_num = np.zeros((len(x_test), MAX_SENT, MAX_WORDS_PER_SENT), dtype='int32')\n\n for i, review in enumerate(x_test):\n sentences = sent_tokenize(review)\n tokenized_sentences = word_tokenizer.texts_to_sequences(\n sentences\n )\n tokenized_sentences = pad_sequences(\n tokenized_sentences, maxlen=MAX_WORDS_PER_SENT\n )\n\n pad_size = MAX_SENT - tokenized_sentences.shape[0]\n\n if pad_size < 0:\n tokenized_sentences = tokenized_sentences[0:MAX_SENT]\n else:\n tokenized_sentences = np.pad(\n tokenized_sentences, ((0, pad_size), (0, 0)),\n mode='constant', constant_values=0\n )\n\n # Store this observation as the i-th observation in\n # the data matrix\n X_test_num[i] = tokenized_sentences[None, ...]\n\n result = han_model.predict(X_test_num)\n id = np.argmax(result[0])\n prob = \"{:.2f} %\".format(float(result[0][id])*100)\n print(prob)\n category = categories[id]\n\n return render_template(\"result.html\", text=text, value=prob, index=category)\n\n\nif __name__ == \"__main__\":\n # app.secret_key = os.urandom(12)\n app.run(host=\"0.0.0.0\", port=4555, threaded=False)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"443684405","text":"# testing all constants used\n\nimport unittest\nfrom takai.constants import *\n\nclass ConstantsTest(unittest.TestCase):\n '''Tests all constants defined in takai/takai/constants.py'''\n\n def test_http_request_parameters(self):\n self.assertEqual(PROTOCOL, 'https')\n self.assertEqual(DOMAIN, 'mpowerpayments.com')\n self.assertEqual(VERSION, 'v1')\n self.assertEqual(USER_AGENT, 'Pretty Egbert')\n self.assertEqual(REST_SUBDOMAIN, 'app')\n self.assertEqual(REST_SOCKET_TIMEOUT, 5)\n\n\n def test_http_rest_endpoints(self):\n endpoints = {\n 'checkout-invoice/create',\n 'opr/create',\n 'opr/charge',\n 'direct-pay/credit-account',\n 'direct-card/processcard',\n 'checkout-invoice/confirm/'\n }\n\n self.assertEqual(endpoints, set(REST_ENDPOINTS.keys()))\n\n def test_http_rest_endpoints_methods(self):\n for endpoint in REST_ENDPOINTS.keys():\n if endpoint.endswith('confirm/'):\n self.assertEqual(REST_ENDPOINTS[endpoint], HttpMethods.GET)\n\n else:\n self.assertEqual(REST_ENDPOINTS[endpoint], HttpMethods.POST)\n","sub_path":"takai/test/test_constants.py","file_name":"test_constants.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305988932","text":"# coding=utf-8\n\n# 基于python的队列的实现\n# 队列的抽象数据类型的实现是创建一个新类\n# 队列的操作实现为类方法,\n# 队列的操作包括判断是否为空,入列,出列,size。\n\n# ADT myQueue\n# myQueue(self) 创建空栈\n# isEmpty(self) 判断队列是否为空\n# enqueue(self, item) 将item压入队列\n# dequeue(self) 删除队列里最先进入的元素\n# size(self) 取得队列里元素的个数\n\n\nclass Empty(Exception):\n \"\"\"Error attempting to access an element from an empty container.\"\"\"\n pass\n\n\nclass ArrayQuene:\n \"\"\"FIFO queue implementation using a Python list as underlying storage.\"\"\"\n DEFAULT_CAPACITY = 10\n \n def __init__(self):\n \"\"\"Create an empty queue\"\"\"\n self._data = [None] * ArrayQuene.DEFAULT_CAPACITY\n self._size = 0\n self._front = 0\n \n def __len__(self):\n \"\"\"Return the number of elements in the queue\"\"\"\n return self._size\n \n def isEmpty(self):\n \"\"\"Return True if the queue is empty.\"\"\"\n return self._size == 0\n \n def first(self):\n \"\"\"Return(but do not remove) the element at the front of the queue.\n Raise Empty exception if the queue is empty\"\"\"\n if self.isEmpty():\n raise Empty('myQueue is empty')\n \n def dequeue(self):\n \"\"\"Remove and return the first element of the queue\n Raise Empty exception if the queue is empty.\"\"\"\n if self.isEmpty():\n raise Empty('myQueue is empty')\n answer = self._data[self._front]\n self._data[self._front] = None\n self._front = (self._front + 1) % len(self._data)\n self._size -= 1\n return answer\n \n def enqueue(self, e):\n \"\"\"Add an element to the back of queue\"\"\"\n if self._size == len(self._data):\n self._resize(2 * len(self._data))\n avail = (self._front + self._size) % len(self._data)\n self._data[avail] = e\n self._size += 1\n \n def _resize(self, cap):\n \"\"\"Resize to a new list of capacity >= len(self).\"\"\"\n old = self._data\n self._data = [None] * cap\n walk = self._front\n for k in range(self._size):\n self._data[k] = old[walk]\n walk = (1 + walk) % len(old)\n self._front = 0\n\n# 以下为验证这个刚实现的队列的运行\n\n# s = Quene_K()\n# s.enqueue(1)\n# s.enqueue(2)\n# s.enqueue(3)\n\n\n# print s.size()\n# print s.dequeue()\n","sub_path":"myQueue/MyQuene.py","file_name":"MyQuene.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351345325","text":"import boto3\n\nclientsns = boto3.client('sns')\n\nclient = boto3.client('ses')\nresponse = client.get_identity_verification_attributes(\n Identities=[\n 'example.com',\n ],\n)\ndata = response['VerificationAttributes']['example.com']['VerificationStatus']\n\nif data != 'Success':\n clientsns.publish(\n TopicArn='YOUR SNS TOPIC ARN',\n Subject='DOMAIN CHECK',\n Message='Your domain is unverified'\n\n )\nelse:\n print(\"Everything is okay\")\n\n","sub_path":"sescheck.py","file_name":"sescheck.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"480258504","text":"import sys\nfrom os import system\n\nfrom iu7_basemodule import start_check\n\n\ndef easy_check(x):\n skip = x\n temp = start_check(x)\n if temp[1] is False and temp[0].isdigit() is True:\n x = [skip, temp[1]]\n return x\n else:\n x = [skip, temp[1]]\n return x\n\n\ndef process():\n for row in range(main_length):\n text[row] = text[row].replace('+', ' + ')\n text[row] = text[row].replace('-', ' - ')\n temp_list = text[row].split()\n for i in range(len(temp_list)):\n if temp_list[i] == '':\n del temp_list[0]\n text[row] = ' '.join(temp_list)\n\n global max_l\n max_l = 0\n for j in range(len(text)):\n if len(text[j]) > max_l:\n max_l = len(text[j])\n\n\ndef menu():\n print('1 - Выравнивание по ширине',\n '\\n2 - Выравнивание по левому краю',\n '\\n3 - Выравнивание по правому краю',\n '\\n4 - Замена во всем тексте одного слова другим',\n '\\n5 - Удаление заданного слова из текста',\n '\\n6 - Замена арифметических выражений, состоящих из сложения и '\n 'вычитания, на результат их вычисления',\n '\\n7 - Найти предложение с максимальным количеством слов, в которых '\n 'гласные чередуются с согласными',\n '\\n0 - Выход')\n ans = input()\n if ans.isdigit():\n if 0 <= int(ans) <= 7:\n ans = int(ans)\n if ans == 1:\n center()\n if ans == 2:\n left_side()\n if ans == 3:\n right_side()\n if ans == 4:\n replace()\n if ans == 5:\n delete()\n if ans == 6:\n arithmetic()\n if ans == 7:\n special()\n if ans == 0:\n sys.exit()\n else:\n input('Выбран отсутствующий пункт!\\nНажмите Enter и попробуйте снова.')\n system('cls')\n menu()\n else:\n input('Введены неверные данные!\\nНажмите Enter и попробуйте снова.')\n system('cls')\n menu()\n\n\ndef form():\n ans = int(mode)\n if ans == 1:\n center()\n if ans == 2:\n left_side()\n if ans == 3:\n right_side()\n\n\ndef center():\n process()\n for row in range(main_length):\n local_len = len(text[row])\n if local_len == max_l:\n print(text[row])\n else:\n words = text[row].split()\n words_num = len(words)\n if words_num == 1:\n text[row] = ' ' * ((max_l - local_len) // 2) + ''.join(words) + ' ' * ((max_l - local_len) // 2)\n print(text[row])\n else:\n dif = ((max_l - local_len) % (words_num - 1) + 1)\n if dif != 0:\n st = words[0] + ' ' * ((max_l - local_len) // (words_num - 1) + 1) + words[1]\n for m in range(2, len(words)):\n if dif > 1:\n st += (' ' * ((max_l - local_len) // (words_num - 1) + 2)) + words[m]\n dif -= 1\n else:\n st += (' ' * ((max_l - local_len) // (words_num - 1) + 1)) + words[m]\n\n print(st)\n text[row] = st\n else:\n spaces = ' ' * ((max_l - local_len) // (words_num - 1) + 1)\n text[i] = spaces.join(text[i].split(' '))\n print(text[i])\n global mode, raw\n mode = 1\n raw = False\n input('\\nГотово!\\nНажмите Enter, чтобы продолжить')\n system('cls')\n menu()\n\n\ndef left_side():\n process()\n for i in range(main_length):\n print(text[i])\n\n global mode, raw\n mode = 2\n raw = False\n\n input('\\nГотово!\\nНажмите Enter, чтобы продолжить')\n system('cls')\n menu()\n\n\ndef right_side():\n process()\n for i in range(main_length):\n local_length = len(text[i])\n if local_length == max_l:\n print(text[i])\n else:\n spaces = ' ' * (max_l - local_length)\n st = spaces + text[i]\n print(st)\n text[i] = st\n global mode, raw\n mode = 3\n raw = False\n input('\\nГотово!\\nНажмите Enter, чтобы продолжить')\n system('cls')\n menu()\n\n\ndef replace():\n temp = text\n old_word = input('Введите слово, которое хотите заменить: ')\n if old_word == '':\n input('Введены неверные данные!\\nНажмите Enter и попробуйте снова.')\n system('cls')\n replace()\n new_word = input('Введите слово, которым хотите замменить: ')\n if new_word == '':\n input('Введены неверные данные!\\nНажмите Enter и попробуйте снова.')\n system('cls')\n replace()\n for i in range(main_length):\n row = text[i].split()\n for j in range(len(row)):\n if row[j].upper() == old_word.upper():\n text[i] = new_word.join(text[i].split(row[j]))\n if row[j].replace('.', '').upper() == old_word.upper():\n text[i] = (new_word + '.').join(text[i].split(row[j]))\n if row[j].replace(',', '').upper() == old_word.upper():\n text[i] = (new_word + ',').join(text[i].split(row[j]))\n if raw is False:\n process()\n form()\n for i in range(len(text)):\n print(text[i])\n if temp == text:\n print('(Замен не произведено)')\n input('\\nГотово!\\nНажмите Enter, чтобы продолжить')\n system('cls')\n menu()\n\n\ndef delete():\n temp = text\n check = False\n word = input('Введите слово, которое хотите удалить: ')\n if word == '':\n input('Введены неверные данные!\\nНажмите Enter и попробуйте снова.')\n delete()\n for i in range(main_length):\n row = text[i].split()\n for j in range(len(row)):\n if row[j].replace('.', '').replace(',', '').upper() == word.upper():\n text[i] = ''.join(text[i].split(' ' + row[j]))\n text[i] = ''.join(text[i].split(row[j] + ' '))\n text[i] = ''.join(text[i].split(row[j]))\n check = True\n for i in range(len(text)):\n print(text[i])\n if raw is False:\n process()\n form()\n if len(' '.join(temp)) == len(' '.join(text)):\n print('\\n(Замен не произведено)')\n input('\\nГотово!\\nНажмите Enter, чтобы продолжить')\n\n\ndef arithmetic():\n process()\n ar = False\n last_line = None\n for row in range(main_length):\n line = text[row].split()\n for i in range(len(line) - 1):\n if line[0] == '+' or line[0] == '-':\n if last_line is None:\n pass\n else:\n for d in range(len(last_line)-1, 0 - 1, -1):\n if last_line[d] == '':\n d -= 1\n else:\n break\n a = easy_check(last_line[d].replace(',', '', 1))\n\n for d in range(i + 1, len(line) + 1):\n if line[d] == '':\n d += 1\n else:\n break\n b = easy_check(line[d].replace(',', '', 1))\n if a[1] is False and a[0].isdigit() and b[1] is False and b[0].isdigit():\n ar = True\n if line[i] == '+':\n line[i] = str(round(float(a[0]) + float(b[0])))\n elif line[i] == '-':\n line[i] = str(round(float(a[0]) - float(b[0])))\n if ar:\n last_line[- 1] = ''\n line[i + 1] = ''\n text[row-1] = ' '.join(last_line)\n\n elif line[i] == '+' or line[i] == '-':\n for d in range(i-1, 0-1, -1):\n if line[d] == '':\n d += 1\n else:\n break\n a = easy_check(line[d].replace(',', '', 1))\n for d in range(i+1, len(line)+1):\n if line[d] == '':\n d += 1\n else:\n break\n b = easy_check(line[d].replace(',', '', 1))\n if a[1] is False and a[0].isdigit() and b[1] is False and b[0].isdigit():\n ar = True\n if line[i] == '+':\n line[i] = str(round((float(a[0]) + float(b[0]))))\n elif line[i] == '-':\n line[i] = str(round((float(a[0]) - float(b[0]))))\n if ar:\n process()\n if line[i - 1] == '':\n line[i - 2] = ''\n else:\n line[i - 1] = ''\n line[i + 1] = ''\n text[row] = ' '.join(line)\n last_line = text[row].split()\n if raw is False:\n process()\n form()\n for i in range(main_length):\n print(text[i])\n input('\\nГотово!\\nНажмите Enter, чтобы продолжить')\n system('cls')\n menu()\n\n\ndef special():\n process()\n spm = (' '.join(text)).replace('!', '.')\n spm = spm.replace('?', '.')\n spm = spm.split('. ')\n wis = 0\n max_wis = [0, 0]\n i = 0\n for sntc in range(len(spm)):\n wis = 0\n i = 0\n i += 1\n temp = spm[sntc].split()\n for word in temp:\n next_cons = False\n next_vow = False\n abort = False\n if len(word) == 1:\n continue\n else:\n if word[0].upper() in vow:\n next_cons = True\n next_vow = False\n elif word[0].upper() in cons:\n next_cons = False\n next_vow = True\n else:\n abort = True\n for i in range(len(word)-1):\n lit = word[i+1].upper()\n if next_cons and lit in cons:\n next_cons = False\n next_vow = True\n elif next_vow and lit in vow:\n next_cons = True\n next_vow = False\n elif next_cons and lit in vow or next_vow and lit in cons:\n abort = True\n break\n if abort is False:\n wis += 1\n if wis > float(max_wis[0]):\n max_wis = wis, sntc\n\n print('Искомое предложение:', spm[max_wis[1]])\n input('\\nГотово!\\nНажмите Enter, чтобы продолжить')\n system('cls')\n menu()\n\n\n# Множества гласных и согласных букв\nvow = ['А', 'О', 'У', 'Ы', 'Э', 'Я', 'Е', 'Ё', 'Ю', 'И']\ncons = ['Б', 'В', 'Г', 'Д', 'Й', 'Ж', 'З', 'К', 'Л', 'М', 'Н', 'П',\n 'Р', 'С', 'Т', 'Ф', 'Х', 'Ц', 'Ч', 'Ш', 'Щ']\nnon = {'Ь', 'Ъ'}\nnum = {'1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '+', '.'}\nar = False\nraw = True\n\n# Предварительная обработка\n\"\"\"\ntext = ['Скрипка издергалась, упрашивая, ',\n 'и вдруг разревелась',\n 'так по-детски,',\n 'что барабан не выдержал: ',\n 'Хорошо, хорошо, хорошо!',\n 'А сам устал, 2+3',\n 'не дослушал скрипкиной речи, 1',\n '+3 шмыгнул на горящий Кузнецкий',\n 'и ушел.']\n\"\"\"\n\n# text = [' Текст для тестирования 4 + 5', 'лабораторной работы по', 'программированию. Понедельник 1 - 0 день недели',]\n\ntext = [' Текст для тестирования 4 + 5 + 6', 'лабораторной работы по',\n 'программированию. Понедельник 5 - 1 - 0 день недели',\n 'а декабрь 14',\n '- 2 месяц. Эту работу пишу уже',\n '1 + 1 недели. И все это продолжительное время',\n 'она не работает.']\nmain_length = len(text)\nmax_l = 0\nmode = 2\nfor i in range(len(text)):\n print(text[i])\n\n# Старт программы\nmenu()","sub_path":"1st_semester/Lab 9/Bogatyrev/Lab 9.py","file_name":"Lab 9.py","file_ext":"py","file_size_in_byte":13296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"152560307","text":"from django.conf import settings\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom functools import wraps\n\n\nclass Profile(models.Model):\n \"\"\"\n This models extends the User model storing more information.\n \"\"\"\n\n #Connection to User model\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n\n #Avatar image\n image = models.ImageField(default='default.png', upload_to='profile_pics')\n\n #Custom fields for application\n\n #Basic Details\n name = models.CharField(max_length=50, blank=True, null=True)\n surname = models.CharField(max_length=100, blank=True, null=True)\n birthdate = models.DateField(auto_now_add=False, auto_now=False, blank=True, null=True)\n\n #Details from ForeignKey\n personal_id = models.ForeignKey('Identification', on_delete=models.CASCADE, blank=True, null=True)\n address = models.ForeignKey('Address', on_delete=models.CASCADE, blank=True,null=True)\n family = models.ForeignKey('Family', on_delete=models.CASCADE, blank=True, null=True)\n # curso = models.ForeignKey\n # departamento = models.ForeignKey\n\n #Other Details\n profession = models.CharField(max_length=50, blank=True, null=True)\n personal_email = models.EmailField(verbose_name='personal email address', max_length=255, unique=True, blank=True, null=True)\n personal_website = models.CharField(max_length=30, blank=True, null=True)\n\n #REQUIRED_FIELDS = ['name','surname','birthdate','country']\n\n def __str__(self):\n return f'{self.user.email} Profile'\n\n @property\n def get_name(self):\n return self.name\n\n @property\n def get_surname(self):\n return self.surname\n\n @property\n def get_coutry(self):\n return self.country\n\n @property\n def get_id(self):\n return self.personal_id\n\n @property\n def get_email(self):\n return self.personal_email\n\nclass Address(models.Model):\n #user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n address = models.CharField(max_length=150)\n country = models.CharField(max_length=30)\n city = models.CharField(max_length=30)\n district = models.CharField(max_length=30)\n freguesia = models.CharField(max_length=30)\n\nclass Identification(models.Model):\n ID_TYPE_CHOICES = (('cc','cartao cidadao'),('pp','passport'),)\n #Ids & Fiscal Number\n #user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n id_number = models.IntegerField()\n id_type = models.CharField(max_length=2, choices=ID_TYPE_CHOICES)\n fiscal_id = models.IntegerField()\n\nclass Family(models.Model):\n #user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n mothers_name = models.CharField(max_length=150)\n fathers_name = models.CharField(max_length=150)\n\n\n@receiver(post_save, sender=Profile)\ndef create_or_update_user_profile(sender, instance, **kwargs):\n if not instance:\n return\n\n if hasattr(instance, '_dirty'):\n return\n # \n # if created:\n # Profile.objects.create(user=instance)\n\n else:\n try:\n instance._dirty = True\n instance.save()\n\n finally:\n del instance._dirty\n","sub_path":"userprofiles/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"13740652","text":"from skrf import Network\nimport numpy\nfrom cmath import exp\n\nclass Calibrator(object):\n def __init__(self, T_ant=298, T_L=298, T_NS=350, T_cab=298, T_Ht=373, \n calCabLen = 45, internalGammaL=.1, L=0, inputType=None, \n inputData=None, loadData=None, NSData=None, inputS11Data=None, \n recData=None, cabS11=None, gammaHt=None, mask=None):\n\n self.T_ant = T_ant\n self.T_L = T_L\n self.T_NS = T_NS\n self.T_cab = T_cab\n self.T_Ht = T_Ht\n self.calCabLen = calCabLen\n self.internalGammaL = internalGammaL\n self.L = L\n self.inputType = inputType\n self.inputData = inputData\n self.loadData = loadData\n self.NSData = NSData\n self.inputS11Data = inputS11Data\n self.recData = recData \n self.gammaHt = gammaHt\n self.mask = mask\n\t\n if inputType == 'cold':\n print()\n print('fgsdfg')\n elif inputType == 'hot':\n cabS11 = Network(cabS11)\n self.cabS11 = cabS11.s[:,0,0]\n self.cabS21 = cabS11.s[:,1,0]\n print()\n elif inputType == 'open':\n print()\n elif inputType == 'short':\n print()\n elif inputType == 'custom':\n print()\n elif inputType == None:\n print()\n\n def psd(self, filename, mask=None):\n with open(filename) as file:\n rawdata = numpy.loadtxt(file)\n if self.mask is None:\n self.mask = numpy.ones(rawdata.shape[0], dtype=bool)\n PSD = rawdata[self.mask]\n return PSD\n\n def recReflec(self, filename, internalGammaL, L):\n matrixDat = Network(filename)\n self.freq = matrixDat.f\n recS11 = matrixDat.s[self.mask,0,0]\n recS12 = matrixDat.s[self.mask,0,1]\n recS21 = matrixDat.s[self.mask,1,0]\n recS22 = matrixDat.s[self.mask,1,1]\n \n gammaRec = recS11 + (recS12*recS21*internalGammaL) / (1 - recS22*internalGammaL)\n # LNA reference plane adjustment Sept. 2019\n for i in range(len(self.freq)):\n gammaRec = [gammaRec[i]*exp(-1j*4*numpy.pi*i*L/.69*3E+8) for i in self.freq]\n return gammaRec\n\n\n","sub_path":"misc/ultraClass.py","file_name":"ultraClass.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211362524","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'movies'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^results$', views.results, name='results'),\n url(r'^ifeellike$', views.ifeellike, name='ifeellike'),\n url(r'^getcastcrewrating$', views.getcastcrewrating, name='getcastcrewrating'),\n url(r'^getrelated$', views.getrelated, name='getrelated'),\n]","sub_path":"moviepiqer/apps/movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548188838","text":"import os, glob\nimport scipy.misc\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlist_input = glob.glob(os.path.join(\"./Urban100_SR\", \"image_SRF_2\", \"*_bicubic.png\"))\nlist_input.sort()\nlist_output = glob.glob(os.path.join(\"./Urban100_SR\", \"image_SRF_2\", \"*_HR.png\"))\nlist_output.sort()\n\ntry: os.mkdir(\"./dataset\")\nexcept: print(\"\\'./dataset\\'is Already Exist.\")\ntry: os.mkdir(\"./dataset/bicubic\")\nexcept: print(\"\\'./dataset/bicubic\\'is Already Exist.\")\ntry: os.mkdir(\"./dataset/ground_truth\")\nexcept: print(\"\\'./dataset/ground_truth\\'is Already Exist.\")\n\nfor idx, _ in enumerate(list_output):\n # bisample = scipy.misc.imread(list_input[idx])\n # gtsample = scipy.misc.imread(list_output[idx])\n bisample = scipy.misc.imread(list_input[idx]).astype(np.float32) / 255\n gtsample = scipy.misc.imread(list_output[idx]).astype(np.float32) / 255\n biname = list_input[idx].split('/')[-1].split('.')[0]\n gtname = list_output[idx].split('/')[-1].split('.')[0]\n print(biname, gtname)\n np.save(\"./dataset/bicubic/%s\" %(biname), bisample)\n np.save(\"./dataset/ground_truth/%s\" %(gtname), gtsample)\n","sub_path":"img2npy.py","file_name":"img2npy.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494094907","text":"import pymysql\nimport re \n\ncon = pymysql.connect('localhost', 'root', \n '(hidden)', 'store_improved')\ncur = con.cursor()\ncur.execute(\"select albums.albumid, albums.artistname, albums.albumname, albums.quantity, albums.price from albums;\")\nrows = cur.fetchall()\n\ndata = {}\nusers = []\norder = {}\nusers_db = {}\n\n'''getting all users from sql server. if customer 'logs' in with their email we fetch data to dictionary to print '''\ndef collect_users():\n\tcur_customer_table = con.cursor()\n\tcur_customer_table.execute(\"select customerid, email, fname, lname, telephone, streetadd, city, country, zip from store_improved.customers;\")\n\trows = cur_customer_table.fetchall()\n\tfor customerid, email, fname, lname, telephone, streetadd, city, country, zip_code in rows:\n\t\tusers_db[customerid] = {}\n\t\tusers_db[customerid]['email'] = email\n\t\tusers_db[customerid]['first name'] = fname\n\t\tusers_db[customerid]['last name'] = telephone\n\t\tusers_db[customerid]['first name'] = lname\n\t\tusers_db[customerid]['telephone'] = telephone\n\t\tusers_db[customerid]['streetadd'] = streetadd\n\t\tusers_db[customerid]['city'] = city\n\t\tusers_db[customerid]['country'] = country\n\t\tusers_db[customerid]['zip_code'] = zip_code\n\n'''getting customer details based email address'''\ndef print_db_customer_from_email(self):\n\tcur_email_table = con.cursor()\n\tcur_email_table.execute(\"select customerid from store_improved.customers where email = '\"+question+\"';\")\n\tfetched = cur_email_table.fetchall()\n\tcustomer_id = str(fetched).strip('(,)')\n\tresult = \"\\ncustomer id : \" + str(customer_id) + \"\\nemail address: \" + users_db[int(customer_id)]['email'] +'\\nfirst name: ' + users_db[int(customer_id)]['first name'] +'\\nlast name: ' + users_db[int(customer_id)]['last name'] +'\\ntelephone: ' + users_db[int(customer_id)]['telephone'] +'\\nstreet address: ' + users_db[int(customer_id)]['streetadd'] +'\\ncity : ' + users_db[int(customer_id)]['city']\n\treturn result\n\n'''getting customer details by customer id'''\ndef print_db_customer_from_id(self):\n\tresult = \"\\ncustomer id : \" + str(question) + \"\\nemail address: \" + users_db[int(question)]['email'] +'\\nfirst name: ' + users_db[int(question)]['first name'] +'\\nlast name: ' + users_db[int(question)]['last name'] +'\\ntelephone: ' + users_db[int(question)]['telephone'] +'\\nstreet address: ' + users_db[int(question)]['streetadd'] +'\\ncity : ' + users_db[int(question)]['city']\n\treturn result\n\n\n'''check users in order processing'''\ndef check_customer():\n\tcur_user = con.cursor()\n\tcur_user.execute(\"select customers.email from customers;\")\n\trow_customer = cur_user.fetchall()\n\tfor line in row_customer:\n\t\tusers.append(str(line).strip(\"'(,)'\"))\n\n'''load albums from sql server'''\nfor albumid, artist, album, quantity, price in rows:\n\tdata[albumid] = {}\n\tdata[albumid]['artist'] = artist\n\tdata[albumid]['album'] = album\n\tdata[albumid]['quantity'] = quantity\n\tdata[albumid]['price'] = price\n\n'''print albums from sql data dictionary'''\ndef print_stock():\n\tcur = con.cursor()\n\tcur.execute(\"select albums.albumid, albums.artistname, albums.albumname, albums.quantity, albums.price from albums;\")\n\trows = cur.fetchall()\n\tfor albumid, artist, album, quantity, price in rows:\n\t\tprint(\"\\nalbumid: \" + str(albumid) + \"\\nartist name: \" + artist + \"\\nalbum name: \" + album + \"\\nleft in stock: \" + str(quantity) + \"\\nprice: \" + str(price))\n\n'''parse string values to order dictionary from first while loop'''\ndef string_to_order():\n\torder[album_id] = {}\n\torder[album_id]['artist'] = data[album_id]['artist']\n\torder[album_id]['album'] = data[album_id]['album']\n\torder[album_id]['quantity'] = int(prompt_quantity)\n\torder[album_id]['total balance'] = round(data[album_id]['price'],2) * int(prompt_quantity)\n\t\n'''parse string values to customer dictionary in second while loop'''\t\ndef string_to_customer():\n\ttelephone = input(\"telephone number: \")\n\tstreetadd = input(\"street address: \")\n\tcountry = input(\"country: \")\n\tcity = input(\"city: \")\n\tzipcode = input(\"zipcode: \")\n\tcustomer['fname'] = fname\n\tcustomer['lname'] = lname\n\tcustomer['email'] = email\n\tcustomer['telephone'] = telephone\n\tcustomer['streetadd'] = streetadd\n\tcustomer['city'] = city\n\tcustomer['country'] = country\n\tcustomer['zipcode'] = zipcode\n\n'''insert customer and order dictionary to server'''\ndef sql_procedure_customer_order():\n\n\tins_customer = \"insert into store_improved.customers (fname, lname, email, telephone, streetadd,city,country,zip) values (%s,%s,%s,%s,%s,%s,%s,%s)\"\n\tcur.execute(ins_customer, (customer['fname'],customer['lname'],customer['email'],customer['telephone'],customer['streetadd'],customer['city'],customer['country'],customer['zipcode']))\n\t\n\tcur.execute(\"select customerid from store_improved.customers where email = '\"+customer['email']+\"';\")\n\tcustomerid = cur.fetchall()\n\tcustomer_id = str(customerid).strip('(,)')\n\tcustomer['customer id'] = customer_id\n\tins_order = \"insert into store_improved.orders(total, customerid) values (%s,%s)\"\n\tcur.execute(ins_order, (show_balance(order),customer['customer id'] ))\n\t\n\tcur.execute(\"select orderid from store_improved.orders where customerid = '\"+customer['customer id']+\"';\")\n\torderid = cur.fetchall()\n\torder_id = str(orderid).strip('(,)')\n\t\n\tordered[order_id] = order\n\tfor line1,line2 in order.items():\n\t\tcur.execute(\"insert into store_improved.order_bridge (orderid, albumid, quantity) values ('\"+order_id+\"' ,'\"+str(line1)+\"','\"+str(order[line1]['quantity'])+\"')\")\n\t\n\tcon.commit()\n\n'''insert order from dictionary when user is existing'''\ndef sq_procedure_customer_order_existing_customer():\n\n\tcur.execute(\"select customerid, fname, lname, telephone, streetadd, city, country, zip from store_improved.customers where email = '\"+email+\"';\")\n\trows = cur.fetchall()\n\tfor line1, line2, line3, line4, line5, line6, line7, line8 in rows:\n\t\tcustomer['customer id'] = str(line1)\n\t\tcustomer['fname'] = line2\n\t\tcustomer['lname'] = line3\n\t\tcustomer['telephone'] = line4\n\t\tcustomer['streetadd'] = line5\n\t\tcustomer['city'] = line6\n\t\tcustomer['country'] = line7\n\t\tcustomer['zipcode'] = line8\n\tcustomer['email'] = email\n\tins_order = \"insert into store_improved.orders(total, customerid) values (%s,%s)\"\n\tcur.execute(ins_order, (show_balance(order),customer['customer id'] ))\n\tcur.execute(\"select orderid from store_improved.orders where customerid = '\"+customer['customer id']+\"' and entrydate > TIME(NOW()-INTERVAL 2 second);\")\n\torderid = cur.fetchall()\n\torder_id = str(orderid).strip('(,)')\n\tordered[order_id] = order\n\tfor line1,line2 in order.items():\n\t\tcur.execute(\"insert into store_improved.order_bridge (orderid, albumid, quantity) values ('\"+order_id+\"' ,'\"+str(line1)+\"','\"+str(order[line1]['quantity'])+\"')\")\n\n\n'''function for submitting full payment'''\ndef pay_balance():\n\tfor line1, line2 in ordered.items():\n\t\torderid = line1\n\tins_payment = \"insert into store_improved.payments(orderid, amount) values (%s,%s)\"\t\n\tcur.execute(ins_payment,(orderid,show_balance(order)))\n\tcon.commit()\n\tcur_update = con.cursor()\n\tfor line1, line2 in order.items():\n\t\tcur_update.execute(\"update albums set quantity = quantity - '\"+str(line2['quantity'])+\"' where albumid = '\"+str(line1)+\"';\")\n\t\tcon.commit()\n'''\nfor line1, line2 in order.items():\n\tprint(\"\\nalbum id: \" + str(line1) + \"\\nartist name: \" + line2['artist']+ \"\\nalbum name: \" + line2['album'] + \"\\nquantity: \" + str(line2['quantity']) + \"\\ntotal price: \" + str(line2['total balance']))\t\n'''\t\n\t\n'''show payments after full payment'''\t\ndef show_payment():\n\tfor line1, line2 in ordered.items():\n\t\torderid = line1\n\tcur_entry = con.cursor()\n\tcur_entry.execute(\"select entry, amount, entrydate from payments where orderid = '\" +orderid + \"';\")\n\tpayment_details = cur_entry.fetchall()\n\tprint(\"\\npayment number: \" + str(payment_details[0][0]) + \"\\namount paid: \" + str(payment_details[0][1])+ \"\\npayment date: \" + str(payment_details[0][2]))\n\n'''store total price from all album quantities'''\t\ndef show_balance(self):\n\tresult=0\t\t\t\n\tfor line1,line2 in order.items():\n\t\tresult = result + order[line1]['total balance']\n\treturn result\n\t\n'''print customer details after the insert'''\ndef print_customer_details():\n\tprint(\"\\ncustomer id: \" + customer['customer id'] + \"\\nfirst name: \" + customer['fname'] + \"\\nlast name: \" + customer['lname'] + \"\\ntelephone: \" + customer['telephone'] + \"\\nstreet address: \" + customer['streetadd'] + \"\\ncity: \" + customer['city'] + \"\\ncountry: \" + customer['country'] + \"\\nzip code: \" + customer['zipcode'])\n\n\n'''print order details after insert'''\ndef print_order_details():\n\tfor line1, line2 in ordered.items():\n\t\tprint('\\norder number: ' + line1)\n\t\tfor l,l2 in line2.items():\n\t\t\tprint('\\nalbum id: ' + str(l))\n\t\t\tprint('artist: ' + l2['artist'] +'\\nalbum: ' + l2['album'] + '\\nquantity: ' +str(l2['quantity']) + '\\nbalance: '+ str(l2['total balance']))\n\n'''print the values of the current order'''\ndef print_current_order():\n\tprint(\"\\ncart summary: \")\n\tfor line1, line2 in order.items():\n\t\tprint(\"\\nalbum id: \" + str(line1) + \"\\nartist name: \" + line2['artist']+ \"\\nalbum name: \" + line2['album'] + \"\\nquantity: \" + str(line2['quantity']) + \"\\ntotal price: \" + str(line2['total balance']))\n\n'''we fetch all the orders of the customer if they input their email address'''\ndef get_all_orders_email(self):\n\tcur_view_orders = con.cursor()\n\tcur_view_orders.execute(\"select orders.orderid, order_bridge.albumid, albums.artistname, albums.albumname, orders.shipped as 'paid', albums.price * order_bridge.quantity, order_bridge.quantity, albums.price, orders.entrydate from orders inner join order_bridge on orders.orderid = order_bridge.orderid inner join albums on order_bridge.albumid = albums.albumid inner join customers on orders.customerid = customers.customerid where customers.email = '\"+question+\"';\") \n\tordered_orders=cur_view_orders.fetchall()\n\tcur_balance = con.cursor()\n\tcur_balance.execute(\"select orders.orderid, sum(albums.price * order_bridge.quantity) as 'total balance' from orders inner join order_bridge on orders.orderid = order_bridge.orderid inner join albums on order_bridge.albumid = albums.albumid inner join customers on orders.customerid = customers.customerid where customers.email = '\"+question+\"' group by orders.orderid\")\n\tcustomer_totals = cur_balance.fetchall()\n\t\n\tdct2 = {}\n\tfor a,b,c,d,e,f,g, h, i in ordered_orders:\n\t\tif a not in dct2:\n\t\t\tdct2[a] = {}\n\t\tdct2[a]['status'] = e\n\t\tdct2[a]['date'] = i\n\t\tdct2[a][b] = {'artist': c, 'album': d, 'balance': f, 'quantity': g, 'price': h}\t\n\n\tfor line1, line2 in customer_totals:\n\t\tdct2[line1]['total'] = line2 \n\n\tignore = ['date','status','total']\n\n\tfor line, line2 in dct2.items():\n\t\tprint('\\norder id: ' + str(line))\n\t\tif line2['status'] == 'y':\n\t\t\tprint('status: fully paid')\n\t\telse:\n\t\t\tprint('status: unpaid')\t\n\t\tprint('order date: ' + str(line2['date']) + '\\ntotal: ' + str(line2['total']))\n\t\tfor line3, line4 in line2.items():\n\t\t\tif line3 in ignore:\n\t\t\t\tcontinue\n\t\t\tprint('\\nalbum id: ' + str(line3) + '\\nartist name: '+ line4['artist'] + '\\nalbum name: '+ line4['album'] + '\\nquantity: ' + str(line4['quantity']) + '\\nprice: ' + str(line4['price']) + '\\nbalance: ' + str(line4['balance']))\n\n\n'''we fetch all the orders of the customer if they enter their customer id number'''\ndef get_all_orders_customer(self):\n\tcur_view_orders = con.cursor()\n\tcur_view_orders.execute(\"select orders.orderid, order_bridge.albumid, albums.artistname, albums.albumname, orders.shipped as 'paid', albums.price * order_bridge.quantity, order_bridge.quantity, albums.price, orders.entrydate from orders inner join order_bridge on orders.orderid = order_bridge.orderid inner join albums on order_bridge.albumid = albums.albumid inner join customers on orders.customerid = customers.customerid where customers.customerid = '\"+question+\"';\") \n\tordered_orders=cur_view_orders.fetchall()\n\tcur_balance = con.cursor()\n\tcur_balance.execute(\"select orders.orderid, sum(albums.price * order_bridge.quantity) as 'total balance' from orders inner join order_bridge on orders.orderid = order_bridge.orderid inner join albums on order_bridge.albumid = albums.albumid where orders.customerid = '\"+question+\"' group by orders.orderid;\")\n\tcustomer_totals = cur_balance.fetchall()\n\n\tdct2 = {}\n\tfor a,b,c,d,e,f,g, h, i in ordered_orders:\n\t\tif a not in dct2:\n\t\t\tdct2[a] = {}\n\t\tdct2[a]['status'] = e\n\t\tdct2[a]['date'] = i\n\t\tdct2[a][b] = {'artist': c, 'album': d, 'balance': f, 'quantity': g, 'price': h}\t\n\n\tfor line1, line2 in customer_totals:\n\t\tdct2[line1]['total'] = line2 \n\n\tignore = ['date','status','total']\n\n\tfor line, line2 in dct2.items():\n\t\tprint('\\norder id: ' + str(line))\n\t\tif line2['status'] == 'y':\n\t\t\tprint('status: fully paid')\n\t\telse:\n\t\t\tprint('status: unpaid')\t\n\t\tprint('order date: ' + str(line2['date']) + '\\ntotal: ' + str(line2['total']))\n\t\tfor line3, line4 in line2.items():\n\t\t\tif line3 in ignore:\n\t\t\t\tcontinue\n\t\t\tprint('\\nalbum id: ' + str(line3) + '\\nartist name: '+ line4['artist'] + '\\nalbum name: '+ line4['album'] + '\\nquantity: ' + str(line4['quantity']) + '\\nprice: ' + str(line4['price']) + '\\nbalance: ' + str(line4['balance']))\n\n'''while loop to revert back to album cart if second (customer loop) is broken'''\n\nwhile True:\n\t'''another loop added for viewing their previous orders'''\n\twhile True:\n\t\tmenu = input(\"\\nWelcome to the death metal store. Please choose an option below:\\n1. browser albums for sale\\n2. login to your account\")\n\t\tif menu == '2':\n\t\t\tcheck_customer()\n\t\t\tcollect_users()\n\t\t\tquestion = input(\"Please enter your email address or customer id number: \")\n\t\t\ttry:\n\t\t\t\t#users is just a list of emails from db\n\t\t\t\tif question in users:\n\t\t\t\t\tprint(\"\\nuser found: \")\n\t\t\t\t\t#printing customer details from email\n\t\t\t\t\tprint(print_db_customer_from_email(question))\n\t\t\t\t\tquestion2 = input(\"\\nplease select an option:\\n1. view orders\\n2. pay for orders\\n3. edit your details\")\n\t\t\t\t\tif question2 == '1':\n\t\t\t\t\t\tget_all_orders_email(question)\n\t\t\t\t\t\t#customer can now view all their orders, paid or not. will be adding more \n\t\t\t\t#users_db is a dictionary with the customerid as main key value\n\t\t\t\telif int(question) in users_db:\n\t\t\t\t\tprint(\"\\nuser found: \")\n\t\t\t\t\tprint(print_db_customer_from_id(question))\n\t\t\t\t\tquestion2 = input(\"\\nplease select an option:\\n1. view orders\\n2. pay for orders\\n3. edit your details\")\n\t\t\t\t\tif question2 == '1':\n\t\t\t\t\t\tget_all_orders_customer(question)\n\t\t\t\t\t\t#customer can now view all their orders, paid or not. will be adding more \n\t\t\t\telse:\n\t\t\t\t\tprint(\"user name not found\")\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"user not found\")\n\t\telse:\n\t\t\tprint(\"\\nalbums for sale: \")\n\t\t\tbreak\n\t\n\t'''loop for adding or editing albums'''\n\twhile True:\n\t\t#print stock from database, is refreshed if customer completes order in second loop\n\t\tprint_stock()\n\t\tprompt_order = input(\"\\nwhich album would you like to order? \\nplease enter the album id number: \")\n\t\t\n\t\t#checking if entry is alphabetic, if so rejected\n\t\tif (re.search('[a-zA-Z]', prompt_order)) or prompt_order =='':\n\t\t\tprint(\"invalid entry.\")\n\t\t\tcontinue\n\t\t\t\n\t\talbum_id = int(prompt_order)\n\t\t#if order dictionary already filled with same values, we can update quantity or remove it from basket\n\t\t#if value set to zero\n\t\tif album_id in order:\n\t\t\talbum_id = int(prompt_order)\n\t\t\tprint(\"this album is in your cart. you may update the quantity here.\")\n\t\t\tprompt_quantity = input(\"how many would you like? \")\n\t\t\tif int(data[album_id]['quantity']) - int(prompt_quantity) < 0:\n\t\t\t\tprint(\"can't purchase more than what is available\")\n\t\t\telif prompt_quantity == '0':\n\t\t\t\tprint(\"album id: \"+str(album_id)+\", artist: \"+order[album_id]['artist']+\", album name: \" + order[album_id]['album'] + \" was removed from the cart. \")\n\t\t\t\tdel order[album_id]\n\t\t\t\tprompt_next = input(\"\\norder more? \")\n\t\t\t\tif prompt_next == 'no':\t\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tstring_to_order()\n\t\t\t\tprompt_next = input(\"\\nquantity updated. order more? \")\n\t\t\t\tif prompt_next == 'no':\t\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\t\n\t\t\n\t\t#if user submits entry that matches album in stock, they can update cart\n\t\telif album_id in data:\n\t\t\talbum_id = int(prompt_order)\n\t\t\tprompt_quantity = input(\"how many would you like? \")\n\t\t\tif int(data[album_id]['quantity']) - int(prompt_quantity) < 0:\n\t\t\t\tprint(\"can't purchase more than what is available\")\t\n\t\t\telse:\n\t\t\t\tstring_to_order()\n\t\t\t\tprompt_next = input(\"\\norder more? \")\n\t\t\t\tif prompt_next == 'no':\t\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\t\n\t\telse:\n\t\t\tprint(\"invalid entry.\")\n\t\t\t\n\t#cart details printed, with calculated balance of all items\t\n\tprint_current_order()\n\tprint(\"\\ntotal balance: \"+str(show_balance(order)))\n\t\n\t'''loop for order summary and collecting customer details'''\n\twhile True:\n\t\t#users email address loaded from db, customer dictionary defined so that it can be reset if while loop is broken\n\t\t#ordered dictionary set to empty so we can merge it with orderid from db after insert procedures\n\t\tcheck_customer()\n\t\tedit_cart_option = input(\"\\nselect an option:\\n1. confirm and enter shipping address\\n2. back to main menu\\n3. delete order and quit program\\n\")\n\t\tcustomer = {}\n\t\tordered = {}\n\t\t#customer can fill details\n\t\tif edit_cart_option == '1':\n\t\t\tfname = input(\"\\nfirst name: \")\n\t\t\tlname = input(\"last name: \")\n\t\t\temail = input(\"email address: \")\n\t\t\tif email in users:\n\t\t\t\texisting_user = input(email + \" is an existing customer. copy customer details to this order? \") \n\t\t\t\tif existing_user == 'yes':\n\t\t\t\t\t#order inserts, order summary feedback provided\n\t\t\t\t\tsq_procedure_customer_order_existing_customer()\n\t\t\t\t\tprint(\"order summary\")\n\t\t\t\t\tprint_customer_details()\n\t\t\t\t\tprint_order_details()\n\t\t\t\t\tprint(\"\\ntotal balance: \"+str(show_balance(order)))\n\t\t\t\t\tprint(\"\\nthank you for your order. we recommend full payment in order to confirm availability of your requested albums\")\n\t\t\t\t\t'''payment procedure'''\n\t\t\t\t\tpay_question = input(\"would you like to pay for your order now?\")\n\t\t\t\t\tif pay_question == 'yes':\n\t\t\t\t\t\tpay_card = input(\"please provide a credit card number for your order: \")\n\t\t\t\t\t\tif len(pay_card) != 16:\n\t\t\t\t\t\t\tprint(\"please provide a valid credit card number\") \n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t#balance paid, payment details showed, customer dict deleted and order emptied\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpay_balance()\n\t\t\t\t\t\t\tprint(\"\\nthank you for your payment. here is your payment number: \")\n\t\t\t\t\t\t\tshow_payment()\n\t\t\t\t\t\t\tdel(customer)\n\t\t\t\t\t\t\torder = {}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telif pay_question == 'no':\n\t\t\t\t\t\tprint(\"you may login later to pay for your album. it will be shipped once fully paid. \")\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"please enter yes or no.\")\t\n\t\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprint(\"there cannot be two users with the same email address in our system\")\n\t\t\t\t\tcontinue\n\t\t\t#if does not exist in db, then call function to fill details\n\t\t\telse:\t\n\t\t\t\tstring_to_customer()\n\t\t\t\t\n\t\t\t#order inserts, order summary feedback provided\n\t\t\tedit_customer_details = input(\"enter confirm to save order with your shipping address, or edit to change order or contact details.\")\n\t\t\tif edit_customer_details =='confirm':\n\t\t\t\t#order inserts, order summary feedback provided\n\t\t\t\tsql_procedure_customer_order()\n\t\t\t\tprint(\"order summary\")\n\t\t\t\tprint_customer_details()\n\t\t\t\tprint_order_details()\n\t\t\t\tprint(\"\\ntotal balance: \"+str(show_balance(order)))\n\t\t\t\tprint(\"\\nthank you for your order. we recommend full payment in order to confirm availability of your requested albums\")\n\t\t\t\t'''payment procedure'''\n\t\t\t\tpay_question = input(\"would you like to pay for your order now?\")\n\t\t\t\tif pay_question == 'yes':\n\t\t\t\t\tpay_card = input(\"please provide a credit card number for your order: \")\n\t\t\t\t\tif len(pay_card) != 16:\n\t\t\t\t\t\tprint(\"please provide a valid credit card number\") \n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t#balance paid, payment details showed, customer dict deleted and order emptied\n\t\t\t\t\t\tpay_balance()\n\t\t\t\t\t\tprint(\"\\nthank you for your payment. here is your payment number: \")\n\t\t\t\t\t\tshow_payment()\n\t\t\t\t\t\tdel(customer)\n\t\t\t\t\t\torder = {}\n\t\t\t\t\t\tbreak\n\t\t\t\t#order placed, but not deducted from balance since not paid\n\t\t\t\telif pay_question == 'no':\n\t\t\t\t\tprint(\"you may login later to pay for your album. it will be shipped once fully paid. \")\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint(\"please enter yes or no.\")\t\n\t\t\t\t\tcontinue\n\t\t\t#if selects edit, can re-fill user details during ordering process\n\t\t\telif edit_customer_details == 'edit':\n\t\t\t\tcontinue\n\t\telif edit_cart_option == '2':\n\t\t\tbreak\n\t\t\t\n\t\telif edit_cart_option == '3':\n\t\t\tprint(\"order deleted. thank you for visiting our store\")\n\t\t\tdel(order)\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"please enter a valid option\")\n\n\n\n","sub_path":"store_project/db_connect_store_1.6.py","file_name":"db_connect_store_1.6.py","file_ext":"py","file_size_in_byte":20403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"200317942","text":"import re\nimport json\nimport psycopg2\nfilepath = 'facts.txt'\nout = []\nwith open(filepath) as fp:\n line = fp.readline()\n while line:\n if not re.match(r'^\\s*$', line):\n out.append(line.rstrip().replace(\"'\", \"''\"))\n line = fp.readline()\n\ntry:\n connection = psycopg2.connect(\n user=\"postgres\",\n password=\"cool\",\n host=\"localhost\",\n port=5432,\n database=\"api\"\n )\n\n values = \"\"\n for fact in out:\n if len(fact) > 100:\n continue\n values += \"('{}'),\".format(fact)\n values = values[:-1] + \";\"\n\n cursor = connection.cursor()\n query = \"insert into facts (fact) values \" + values\n\n print(query)\n cursor.execute(query)\n connection.commit()\n\nexcept (Exception, psycopg2.Error) as error:\n print(\"error connection to psql :\", error)\nfinally:\n if(connection):\n cursor.close()\n connection.close()\n print(\"Connection closed\")\n\n\n# for fact in out:\n# print(fact)\n\n\n# with open('facts.json', 'w') as outfile:\n# json.dump(out, outfile)\n","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"283253149","text":"\"\"\"Common cgroups management routines.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport errno\nimport os\n\nimport logging\n\nfrom . import subproc\n\n\n_LOGGER = logging.getLogger(__name__)\n\nCGROOT = '/cgroup'\nPROCCGROUPS = '/proc/cgroups'\nPROCMOUNTS = '/proc/mounts'\n\n\ndef _mkdir_p(path):\n \"\"\"proper mkdir -p implementation\"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef create(subsystem, group):\n \"\"\"mkdir cgroup\"\"\"\n fullpath = makepath(subsystem, group)\n return _mkdir_p(fullpath)\n\n\ndef delete(subsystem, group):\n \"\"\"Delete cgroup (and all sub-cgroups)\"\"\"\n fullpath = makepath(subsystem, group)\n for (dirname, _subdirs, _files) in os.walk(fullpath, topdown=False):\n os.rmdir(dirname)\n\n\ndef exists(subsystem, group):\n \"\"\"os.path.exists the cgroup\"\"\"\n fullpath = makepath(subsystem, group)\n return os.path.exists(fullpath)\n\n\ndef makepath(subsystem, group, pseudofile=None):\n \"\"\"Pieces together a full path of the cgroup\"\"\"\n mountpoint = get_mountpoint(subsystem)\n group = group.strip('/')\n if pseudofile:\n return os.path.join(mountpoint, group, pseudofile)\n return os.path.join(mountpoint, group)\n\n\ndef set_value(subsystem, group, pseudofile, value):\n \"\"\"Set value in cgroup pseudofile\"\"\"\n fullpath = makepath(subsystem, group, pseudofile)\n _LOGGER.debug('%s : %s', fullpath, value)\n with open(fullpath, 'w+') as f:\n f.write(str(value))\n\n\ndef get_value(subsystem, group, pseudofile):\n \"\"\"Reads the value of cgroup parameter.\"\"\"\n fullpath = makepath(subsystem, group, pseudofile)\n with open(fullpath) as f:\n return f.read().strip()\n\n_BLKIO_THROTTLE_TYPES = {\n 'bps': 'blkio.throttle.io_service_bytes',\n 'iops': 'blkio.throttle.io_serviced',\n}\n\n\ndef get_blkio_info(cgrp, kind):\n \"\"\"Get blkio throttle info.\"\"\"\n assert kind in _BLKIO_THROTTLE_TYPES\n\n blkio_data = get_value('blkio', cgrp, _BLKIO_THROTTLE_TYPES[kind])\n blkio_info = {}\n for entry in blkio_data.split('\\n'):\n if not entry or entry.startswith('Total'):\n continue\n\n major_minor, metric_type, value = entry.split(' ')\n blkio_info.setdefault(major_minor, {})[metric_type] = int(value)\n\n return blkio_info\n\n\ndef get_cpu_shares(cgrp):\n \"\"\"Get cpu shares\"\"\"\n shares = get_value('cpu', cgrp, 'cpu.shares')\n return int(shares)\n\n\ndef set_cpu_shares(cgrp, shares):\n \"\"\"set cpu shares\"\"\"\n return set_value('cpu', cgrp, 'cpu.shares', shares)\n\n\ndef join(subsystem, group, pid=None):\n \"\"\"Move process into a specific cgroup\"\"\"\n if pid is None:\n pid = os.getpid()\n return set_value(subsystem, group, 'tasks', pid)\n\n\ndef mount(subsystem):\n \"\"\"Mounts cgroup subsystem.\"\"\"\n _LOGGER.info('Mounting cgroup: %s', subsystem)\n path = os.path.join(CGROOT, subsystem)\n if not os.path.exists(path):\n os.mkdir(path)\n\n subproc.check_call(['mount', '-t', 'cgroup', '-o',\n subsystem, subsystem, path])\n\n\ndef ensure_mounted(subsystems):\n \"\"\"Ensure that given subsystems are properly mounted.\"\"\"\n mounted = mounted_subsystems()\n for subsystem in subsystems:\n if subsystem not in mounted:\n mount(subsystem)\n\n\ndef available_subsystems():\n \"\"\"Get set of available cgroup subsystems\"\"\"\n subsystems = list()\n\n with open(PROCCGROUPS) as cgroups:\n for cgroup in cgroups:\n try:\n (subsys_name, _hierarchy,\n _num_cgroups, enabled) = cgroup.split()\n if subsys_name[0] != '#' and enabled == '1':\n subsystems.append(subsys_name)\n except: # pylint: disable=W0702\n pass\n\n return subsystems\n\n\ndef mounted_subsystems():\n \"\"\"Return a dict with cgroup subsystems and their mountpoints\"\"\"\n subsystems2mounts = dict()\n\n with open(PROCMOUNTS) as mounts:\n subsystems = available_subsystems()\n for mountline in mounts:\n try:\n (_fs_spec, fs_file, fs_vfstype,\n fs_mntops, _fs_freq, _fs_passno) = mountline.split()\n if fs_vfstype == 'cgroup':\n for op in fs_mntops.split(','):\n if op in subsystems:\n subsystems2mounts[op] = fs_file\n except: # pylint: disable=W0702\n pass\n\n return subsystems2mounts\n\n\ndef get_mountpoint(subsystem):\n \"\"\"Returns mountpoint of a particular subsystem\"\"\"\n mounts = mounted_subsystems()\n return mounts[subsystem]\n","sub_path":"lib/python/treadmill/cgroups.py","file_name":"cgroups.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248381022","text":"# Josh Shifman\n# shifmanj\n\nimport tweepy\nimport pandas\nimport json\nimport time\nimport csv\n\n# get these at apps.twitter.com\n# use individual tokens/keys\nACCESS_TOKEN = \"\"\nACCESS_TOKEN_SECRET = \"\"\nCONSUMER_KEY = \"\"\nCONSUMER_KEY_SECRET = \"\"\n\n# set up auth\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_KEY_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\n# create api obejct\napi = tweepy.API(auth)\n\ndemScreenNames = [\"TheDemocrats\", \"HouseDemocrats\", \"SenateDems\"]\nrepScreenNames = [\"GOP\", \"HouseGOP\", \"Senate_GOPs\"]\ndemIds = set()\nrepIds = set()\n\nrequestNum = 0\n\n# for each relevant account, get all followers\nfor sn in demScreenNames + repScreenNames:\n for page in tweepy.Cursor(api.followers_ids, screen_name=sn).pages():\n\n requestNum += 1\n # Add new follower ids to set\n if sn in demScreenNames:\n demIds |= set(page)\n else:\n repIds |= set(page)\n print (\"Getting more IDS, request count: \", requestNum)\n print(\"# of dem ids: \", len(demIds))\n print(\"# of rep ids: \", len(repIds))\n\n # The twitter api rate limit allows for 15 requests per 15 min window, so we 'sleep' for 60 seconds\n # after every request in order to not exceed to rate limit.\n\n print (\"Sleeping for 60s ...\")\n time.sleep(60)\n\n if sn in demScreenNames:\n print(\"Writing dem csv for \" + sn + \"...\")\n cw = csv.writer(open(\"dem_\" + sn + \"_ids.csv\",'w'))\n cw.writerow(demIds)\n demIds.clear()\n else:\n print(\"Writing rep csv for \" + sn + \"...\")\n cw = csv.writer(open(\"rep_\" + sn + \"_ids.csv\",'w'))\n cw.writerow(repIds)\n repIds.clear()\n\n# remove ids which occur in both rep and dem sets\nprint (\"Removing duplicates ...\")\nintersection = demIds & repIds\ndemIds -= intersection\nrepIds -= intersection\n\nprint(\"Writing csv's ...\")\n# write the lists of ids to csv files\ncw = csv.writer(open(\"demIds.csv\",'w'))\ncw.writerow(demIds)\ncw = csv.writer(open(\"repIds.csv\",'w'))\ncw.writerow(repIds)\n","sub_path":"Scripts/getIds.py","file_name":"getIds.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"137522496","text":"import django.utils.timezone\nfrom django.db import migrations, models\n\nimport openslides.utils.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),\n ('is_superuser', models.BooleanField(\n default=False,\n verbose_name='superuser status',\n help_text='Designates that this user has all permissions without explicitly assigning them.')),\n ('username', models.CharField(max_length=255, blank=True, verbose_name='Username', unique=True)),\n ('first_name', models.CharField(max_length=255, blank=True, verbose_name='First name')),\n ('last_name', models.CharField(max_length=255, blank=True, verbose_name='Last name')),\n ('structure_level', models.CharField(\n default='',\n max_length=255,\n blank=True,\n verbose_name='Structure level',\n help_text='Will be shown after the name.')),\n ('title', models.CharField(\n default='',\n max_length=50,\n blank=True,\n verbose_name='Title',\n help_text='Will be shown before the name.')),\n ('about_me', models.TextField(default='', blank=True, verbose_name='About me', help_text='Your profile text')),\n ('comment', models.TextField(default='', blank=True, verbose_name='Comment', help_text='Only for notes.')),\n ('default_password', models.CharField(default='', max_length=100, blank=True, verbose_name='Default password')),\n ('is_active', models.BooleanField(\n default=True,\n verbose_name='active',\n help_text='Designates whether this user should be treated as '\n 'active. Unselect this instead of deleting accounts.')),\n ('is_present', models.BooleanField(\n default=False,\n verbose_name='present',\n help_text='Designates whether this user is in the room or not.')),\n ('groups', models.ManyToManyField(\n blank=True,\n verbose_name='groups',\n related_query_name='user',\n related_name='user_set',\n to='auth.Group',\n help_text='The groups this user belongs to. A user will get '\n 'all permissions granted to each of his/her group.')),\n ('user_permissions', models.ManyToManyField(\n blank=True,\n verbose_name='user permissions',\n related_query_name='user',\n related_name='user_set',\n to='auth.Permission',\n help_text='Specific permissions for this user.')),\n ],\n options={\n 'permissions': (\n ('can_see_name', 'Can see names of users'),\n ('can_see_extra_data', 'Can see extra data of users'),\n ('can_manage', 'Can manage users')),\n 'ordering': ('last_name',),\n },\n bases=(openslides.utils.models.RESTModelMixin, models.Model),\n ),\n ]\n","sub_path":"openslides/users/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"399078589","text":"import math\n\nif __name__ == \"__main__\":\n m = int(input())\n n = int(input())\n x = int(input())\n\n tablica = {}\n bool = True\n if m > n:\n print(\"nema podatoci\")\n bool = False\n\n else:\n for m in range(m,n+1):\n tablica[m] = (m*m, m*m*m, round(math.sqrt(m), 5))\n\n if x > n and bool:\n print(\"nema podatoci\")\n\n if x <= n: print(tablica[x])\n print(sorted(tablica.items()))","sub_path":"code.finki tasks/TableSquareCubeRoot/tableSquareCubeRoot.py","file_name":"tableSquareCubeRoot.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"124704832","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 28 10:03:13 2020\n\n@author: dariocorral\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nfrom utils import Utils\n\n\n\n\nclass Preprocessing(object):\n \n \"\"\"\n Class to preprocessing data\n \n \"\"\"\n \n def __init__(self):\n \n self.__utils = Utils()\n \n \n def execute(self, saveCSV = True):\n \n \"\"\"\n ETL Process Execution\n \"\"\"\n \n #Datasets Varese houses Test\n df_varese = pd.read_csv(\"data/varese_houses.csv\")\n \n \n df_varese['district'] = 'WG - ' + df_varese['district']\n \n df_monza = pd.read_csv(\"data/monza_houses.csv\")\n \n df_monza ['district'] = 'SU - ' + df_monza['district']\n \n #Df total\n df = df_varese.append(df_monza)\n \n #Drop first column\n df.drop('Unnamed: 0',axis = 1,inplace=True)\n \n #Apply unnest json columns\n column_features_exp = self.__utils.unnest_column_json(df, 'features')\n column_parking_exp = self.__utils.unnest_column_json(df, 'parkingSpace')\n \n #Add columns to Df\n df[[*column_features_exp.columns]]=column_features_exp\n df[[*column_parking_exp.columns]]=column_parking_exp\n \n #Select Column important\n df = df[['price','propertyType','size','floor','rooms','bathrooms',\n 'district','latitude','longitude','status','hasLift','hasAirConditioning', \n 'hasBoxRoom', 'hasTerrace','hasGarden', 'hasSwimmingPool', \n 'hasParkingSpace','isParkingSpaceIncludedInPrice', \n 'parkingSpacePrice']]\n \n #Price + parking Price\n df['price'] = np.where(\n (df['isParkingSpaceIncludedInPrice']==False)&\n (df['hasParkingSpace']==True),\n df['price']+df['parkingSpacePrice'],\n df['price']\n )\n \n #Avg Price District\n df['priceByArea'] = df['price']/df['size']\n \n #Floor to Int\n df.floor.replace(['nan','en','bj','st','ss'],0,inplace = True)\n df.floor.fillna(0,inplace = True)\n df.floor = df.floor.astype(int)\n \n #District /Status as Category\n df.district = df.district.astype('category')\n \n #Fill NaN hasLift\n df.hasLift.fillna(False,inplace = True)\n \n avg_price_district_df = (\n pd.DataFrame(df.groupby(['district']).mean()\n ['priceByArea'].astype(int))).reset_index()\n \n df = df.merge(avg_price_district_df, left_on='district', \n right_on='district')\n \n df.rename(columns={'priceByArea_x':'priceByArea',\n 'priceByArea_y':'avgPriceZone'},inplace = True)\n \n #Status NaN to renew or Good\n df['status'] = np.where( \n (df['priceByArea'] < df['avgPriceZone'])\n &(df['status'].isnull())\n ,'renew',\n np.where(\n (df['priceByArea'] >= df['avgPriceZone'])\n &(df['status'].isnull()),\n 'good',\n df['status']\n ))\n \n #Add renew Ligth\n df['status'] = np.where( \n (df['priceByArea'] < df['avgPriceZone'])&\n (df['status']=='good'),\n 'renew', df['status']\n )\n \n \n #Status as category\n df.status = df.status.astype('category')\n\n \n df = df[['price', 'propertyType', 'size','priceByArea', 'floor', 'rooms',\n 'bathrooms','district', 'latitude','longitude','avgPriceZone',\n 'status', 'hasLift','hasAirConditioning', 'hasBoxRoom',\n 'hasTerrace', 'hasGarden',\n 'hasSwimmingPool','hasParkingSpace','parkingSpacePrice'\n ]]\n \n df['priceByArea'] = df['priceByArea'].astype(int)\n \n #Change boolean columns to 1/0\n df[['hasLift','hasAirConditioning', 'hasBoxRoom',\n 'hasTerrace', 'hasGarden', 'hasSwimmingPool', \n 'hasParkingSpace']]=(\n df[['hasLift','hasAirConditioning', \n 'hasBoxRoom','hasTerrace', 'hasGarden', \n 'hasSwimmingPool', 'hasParkingSpace']].astype(int))\n \n #status Int\n statusInt=(df.groupby('status').mean()['priceByArea']).rank().astype(int)\n \n df['statusInt'] = pd.merge(df['status'],statusInt.reset_index(),\n on='status',how='left').iloc[:,-1:]\n \n #Box Posto Auto Union\n df['box_posto_auto'] = np.where(\n ((df['hasBoxRoom'] ==1)|(df['hasParkingSpace'] == 1)),\n 1,0)\n \n #Property Type\n df['propertyType'] = df['propertyType'].astype('category')\n \n propertyTypeInt=(\n df.groupby('propertyType').mean()['priceByArea']).rank().astype(int)\n \n df['propertyTypeInt'] = pd.merge(df['propertyType'],\n propertyTypeInt.reset_index(),on='propertyType',\n how='left').iloc[:,-1:]\n \n #Garden / Terrace Union\n df['garden_terrace'] = np.where(\n (df['hasTerrace'] == 1)|\n (df['hasGarden'] ==1),\n 1,0)\n \n #Floor \n df['floorCat']=np.where(\n (df['floor']<=1)\n ,0,1)\n #Lift Cat\n df['liftCat'] = np.where(\n (df['hasLift'] == 1) & (df['floor'] >= 2)\n ,1,0)\n \n #Rooms Cat\n df['roomsCat'] = np.where(\n (df['rooms'] >=4 ) \n ,4,df['rooms'])\n \n #Rooms Cat\n df['bathroomsCat'] = np.where(\n (df['bathrooms'] >= 2) \n ,2,df['bathrooms'])\n \n if saveCSV:\n \n df.to_csv(\"data/houses_clean.csv\")\n \n return df\n\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"303935174","text":"import sys\r\n\r\ndisqualified = []\r\nname_to_score = {}\r\n\r\ndef sorter(t):\r\n return t[-1]\r\n\r\ndef sorter_1(l):\r\n return int(l[1])\r\n\r\n# \"a\" Being The Total Shots\r\ndef build_mappings(par, index, a):\r\n\r\n player_scores = []\r\n\r\n # Zero = Score To Par\r\n for i in range(0, 18):\r\n player_scores.append([par[i], index[i], a[i], 0]) \r\n\r\n return player_scores\r\n\r\n# \"a\" Being Mappings from above function\r\ndef calculate_score(a, handicap, d, name):\r\n\r\n # Sort The List By the Index Of the Hole\r\n a = sorted(a, key=sorter_1)\r\n\r\n extra_strokes = handicap // 18\r\n leftover_strokes = handicap % 18\r\n\r\n total = 0\r\n\r\n # Adds 1 To And Hole Within The Index\r\n for item in range(0, leftover_strokes):\r\n a[item][3] += 1\r\n\r\n # If Handicap Is > 18, 36, or == 54, Adds 1,2 Or 3 To All Items\r\n # Respectively\r\n for item in a:\r\n item[3] += extra_strokes\r\n\r\n try:\r\n score = int(item[2]) - int(item[0]) - int(item[3])\r\n if score in d:\r\n total += d[score]\r\n\r\n except ValueError:\r\n if item[2] == \"X\":\r\n continue\r\n else:\r\n disqualified.append(name)\r\n name_to_score[name] = -1\r\n break\r\n\r\n return total\r\n\r\ndef main():\r\n\r\n score_to_par = {\r\n -7: 9,\r\n -6: 8,\r\n -5: 7,\r\n -4: 6,\r\n -3: 5,\r\n -2: 4,\r\n -1: 3,\r\n 0: 2,\r\n 1: 1,\r\n 2: 0,\r\n }\r\n\r\n # Read In File Info\r\n lines = sys.stdin.readlines()\r\n hole_pars = lines[0].strip().split()\r\n hole_indexs = lines[1].strip().split()\r\n\r\n # Scan Through Every Line Starting At Line 3\r\n for line in lines[2:]:\r\n line = line.split()\r\n\r\n # Get Info For Printing And Calculating Score\r\n name = \" \".join(line[:-19])\r\n handicap = int(line[-19])\r\n list_of_strokes = line[-18:]\r\n\r\n player_stats = build_mappings(hole_pars, hole_indexs, list_of_strokes)\r\n player_score = calculate_score(player_stats, handicap, score_to_par, name)\r\n\r\n if name not in disqualified:\r\n name_to_score[name] = player_score\r\n\r\n max_name = len(max(name_to_score.keys(), key=len))\r\n\r\n for (k, v) in sorted(name_to_score.items(), key=sorter, reverse=True):\r\n if v != -1:\r\n print(\"{:>{}} : {:>2}\".format(k, max_name, v))\r\n\r\n for s in disqualified:\r\n print(\"{:>{}} : Disqualified\".format(s, max_name))\r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"bucket-list-main/stableford_122.py","file_name":"stableford_122.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417623475","text":"import timeit\nimport sys\nfrom hellaPy import *\nimport pylab as plt\nimport scipy.integrate as si\nfrom numpy import *\nfrom numpy.linalg import *\nimport nucheb as cheb\nimport numpy \n\nprint('Running heat.py with numpy (CPU-MKL)')\nSKIP = 200\n\ndef get_corner_elements(A):\n corner_ind = ix_((0,-1),(0,-1))\n return A[corner_ind]\n\ndef mybacksolve(A,b):\n # Solves A.T x.T = b.T and returns x = b A^{-1}\n x = solve(A.T,b.T)\n return x.T\n\ndef rk4(f,time,u):\n def method(h,f,t,u):\n k1 = h*f(t ,u )\n k2 = h*f(t+0.5*h,u+0.5*k1)\n k3 = h*f(t+0.5*h,u+0.5*k2)\n k4 = h*f(t+ h,u+ k3)\n return u + (k1+2*(k2+k3)+k4)/6\n U = zeros((len(time),len(u)))\n U[0] = u\n dt= diff(time)\n for k,t in enumerate(time):\n if k == 0:\n continue\n U[k] = method(dt[k-1],f,t,U[k-1])\n v = norm(U[k])\n return U\n\ndef main(N,tspan,h,gBC=r_[1,1]):\n # u_t = u_xx -1 1:\n mul = float(sys.argv[1])\n gBC = mul * r_[1.,1]\n N = 32\n tspan = [0,3]\n h = 1e-4\n print('Running heat equation solver with')\n print(f' N = {N}')\n print(f' tspan = [{tspan[0]},{tspan[1]}]')\n print(f'timestep = {h}')\n print(f' flux = {gBC[0]}')\n T,X,U,ctime,err = main(N,tspan,h,gBC)\n print(f'Finished (wall time): {ctime}')\n plt.figure(1,figsize=(10,7))\n mycontourf(T,X,U,numpy.linspace(-1,1)*numpy.abs(U).max(),cmap=mycm15,edgecolor='#999999')\n plt.xticks(fontsize=25)\n plt.yticks(fontsize=25)\n plt.xlabel('time',fontsize=30)\n plt.ylabel('space',fontsize=30)\n plt.title(f'Solution case: {int(mul):3d}',fontsize=36)\n plt.savefig(f'figs/out_{int(mul):03d}.png')\n","sub_path":"intro-python/heat/heatnb.py","file_name":"heatnb.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221083600","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport csv\nimport argparse\nimport numpy as np\n\nimport torch\nimport torch.utils\nimport torchvision\nimport torch.backends.cudnn as cudnn\n\nfrom common import utils\nfrom common.utils import RandomPixelMasking, RandomHalfMasking, CenterMasking\nfrom common.eval_test import evaluate\nfrom inpainting_cat.cae_model import ProbablisticCAE\nfrom inpainting_cat.train import arch_search_valid, train\n\n\ndef load_data(path='../data/', data_name='celebA', img_size=64):\n print('Loading ' + data_name + 'data...')\n train_transform, test_transform = utils.data_transforms(img_size=img_size)\n\n if data_name != 'svhn':\n # The image data should be contained in sub folders (e.g., ../data/celebA/train/image/aaa.png)\n train_data = torchvision.datasets.ImageFolder('{}{}/train'.format(path, data_name), transform=train_transform)\n test_data = torchvision.datasets.ImageFolder('{}{}/test'.format(path, data_name), transform=test_transform)\n else:\n train_data = torchvision.datasets.SVHN(path, split='train', transform=train_transform, download=True)\n test_data = torchvision.datasets.SVHN(path, split='test', transform=test_transform, download=True)\n # extra_data = torchvision.datasets.SVHN(path, split='extra', transform=train_transform, download=True)\n # train_data = torch.utils.data.ConcatDataset([train_data, extra_data])\n\n print('train_data_size: %d, test_data_size: %d' % (len(train_data), len(test_data)))\n return train_data, test_data\n\n\n# Save result data\nclass SaveResult(object):\n def __init__(self, res_file_name='result.csv'):\n self.res_file_name = res_file_name\n # header\n with open(self.res_file_name, 'w') as fp:\n writer = csv.writer(fp, lineterminator='\\n')\n writer.writerow(['exp_index', 'train_time', 'MLE_MSE', 'MLE_PSNR', 'MLE_SSIM', 'det_param', 'max_param',\n 'node_num', 'cat_d', 'cat_valid_d', 'cat_param_num', 'active_num', 'net_str'])\n\n def save(self, exp_index, model, train_time, res):\n dist = model.asng\n params = np.sum(np.prod(param.size()) for param in model.parameters())\n net_str = model.mle_network_string(sep=' ')\n with open(self.res_file_name, 'a') as fp:\n writer = csv.writer(fp, lineterminator='\\n')\n writer.writerow([exp_index, train_time, res['MLE_MSE'], res['MLE_PSNR'], res['MLE_SSIM'],\n model.get_params_mle(), params, len(model.module_info), dist.d, dist.valid_d, dist.N,\n int(model.is_active.sum()), net_str])\n\n\ndef experiment(exp_num=1, start_id=0, data_name='celebA', dataset_path='../data/', corrupt_type='RandomPixel', gpu_id=0,\n init_delta_factor=0.0, batchsize=16, train_ite=200000, retrain_ite=500000, out_dir='./result/'):\n\n if gpu_id >= 0:\n torch.cuda.set_device(gpu_id)\n cudnn.benchmark = True\n cudnn.enabled = True\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n # Corrupt function\n if corrupt_type == 'RandomPixel':\n corrupt_func = RandomPixelMasking()\n elif corrupt_type == 'RandomHalf':\n corrupt_func = RandomHalfMasking()\n elif corrupt_type == 'Center':\n corrupt_func = CenterMasking()\n else:\n print('Invalid corrupt function type!')\n return\n\n train_res = SaveResult(res_file_name=out_dir + 'train_result.csv')\n retrain_res = SaveResult(res_file_name=out_dir + 'retrain_result.csv')\n with open(out_dir + 'description.txt', 'w') as o:\n o.write('data_name: ' + data_name + '\\n')\n o.write('corrupt_func: ' + corrupt_type + '\\n')\n o.write('batchsize: %d\\n' % batchsize)\n o.write('train_ite: %d\\n' % train_ite)\n o.write('retrain_ite: %d\\n' % retrain_ite)\n\n train_data, test_data = load_data(path=dataset_path, data_name=data_name, img_size=64)\n ch_size = train_data[0][0].shape[0]\n\n for n in np.arange(start_id, start_id + exp_num):\n prefix = out_dir + '{:02d}_'.format(n)\n\n print('Architecture Search...')\n nn_model = ProbablisticCAE(in_ch_size=ch_size, out_ch_size=ch_size, row_size=1, col_size=20, level_back=5,\n downsample=True, k_sizes=(1, 3, 5), ch_nums=(64, 128, 256), skip=(True, False),\n M=None, delta_init_factor=init_delta_factor)\n optimizer = torch.optim.SGD(nn_model.parameters(), lr=0.025, momentum=0.9, weight_decay=0., nesterov=False)\n lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, train_ite)\n\n # Training\n period = {'max_ite': train_ite, 'save': train_ite/50, 'verbose_ite': 100}\n n_model, train_time = \\\n arch_search_valid(nn_model, train_data, test_data, corrupt_func, optimizer, lr_scheduler, clip_value=5.,\n batchsize=batchsize, lam=2, valid_rate=0.5, gpu_id=gpu_id, period=period,\n out_model=prefix + 'trained_model.pt', log_file=prefix + 'train_log.csv')\n\n # Testing\n res = evaluate(nn_model, test_data, corrupt_func, gpu_id=gpu_id, batchsize=batchsize,\n img_out_dir=prefix+'trained_model_out_img/')\n\n train_res.save(n, nn_model, train_time, res) # Save result\n\n # Load theta from log file\n #import pandas as pd\n #df = pd.read_csv(prefix + 'train_log.csv')\n #theta = np.array(df.iloc[-1, 14:])\n #nn_model = ProbablisticCAE(in_ch_size=ch_size, out_ch_size=ch_size, row_size=1, col_size=20, level_back=5,\n # downsample=True, k_sizes=(1, 3, 5), ch_nums=(64, 128, 256), skip=(True, False),\n # M=None)\n #nn_model.asng.load_theta_from_log(theta)\n\n print('Retraining...')\n nn_model = ProbablisticCAE(in_ch_size=ch_size, out_ch_size=ch_size, row_size=1, col_size=20, level_back=5,\n downsample=True, k_sizes=(1, 3, 5), ch_nums=(64, 128, 256), skip=(True, False),\n M=nn_model.asng.mle())\n optimizer = torch.optim.Adam(nn_model.parameters(), lr=0.001, betas=(0.9, 0.999))\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(retrain_ite*2/5),\n int(retrain_ite*4/5)], gamma=0.1)\n\n # Re-training\n period = {'max_ite': retrain_ite, 'save': retrain_ite/50, 'verbose_ite': 100}\n nn_model, train_time = train(nn_model, train_data, test_data, corrupt_func, optimizer, lr_scheduler,\n clip_value=5., batchsize=batchsize, gpu_id=gpu_id, period=period,\n out_model=prefix + 'retrained_model.pt', log_file=prefix + 'retrain_log.csv')\n\n # Testing\n res = evaluate(nn_model, test_data, corrupt_func, gpu_id=gpu_id, batchsize=batchsize,\n img_out_dir=prefix + 'retrained_model_out_img/')\n\n retrain_res.save(n, nn_model, train_time, res) # Save result\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='ASNG-NAS (Cat) for Inpainting')\n parser.add_argument('--exp_id_start', '-s', type=int, default=0, help='Starting index number of experiment')\n parser.add_argument('--exp_num', '-e', type=int, default=1, help='Number of experiments')\n parser.add_argument('--data_path', '-p', default='../data/', help='Data path')\n parser.add_argument('--data_name', '-d', default='celebA', help='Data name (celebA / cars / svhn)')\n parser.add_argument('--corrupt_type', '-c', default='RandomPixel',\n help='Corrupt function (RandomPixel / RandomHalf / Center)')\n parser.add_argument('--gpu_id', '-g', type=int, default=0, help='GPU ID')\n\n parser.add_argument('--init_delta_factor', '-f', type=float, default=0.0, help='Init delta factor')\n parser.add_argument('--batch_size', '-b', type=int, default=16, help='Mini-batch size')\n parser.add_argument('--train_ite', '-t', type=int, default=50000,\n help='Maximum number of training iterations (W updates)')\n parser.add_argument('--retrain_ite', '-r', type=int, default=500000,\n help='Maximum number of re-training iterations (W updates)')\n parser.add_argument('--out_dir', '-o', default='./result/', help='Output directory')\n args = parser.parse_args()\n\n start_id = args.exp_id_start\n exp_num = args.exp_num\n data_path = args.data_path\n data_name = args.data_name\n corrupt_type = args.corrupt_type\n gpu_id = args.gpu_id\n init_delta_factor = args.init_delta_factor\n batch_size = args.batch_size\n train_ite = args.train_ite\n retrain_ite = args.retrain_ite\n out_dir = args.out_dir + data_name + '_' + corrupt_type + '/'\n\n experiment(exp_num=exp_num, start_id=start_id, data_name=data_name, dataset_path=data_path,\n corrupt_type=corrupt_type, gpu_id=gpu_id, init_delta_factor=init_delta_factor, batchsize=batch_size,\n train_ite=train_ite, retrain_ite=retrain_ite, out_dir=out_dir)\n","sub_path":"inpainting/main_inpainting_cat.py","file_name":"main_inpainting_cat.py","file_ext":"py","file_size_in_byte":9212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"184887433","text":"import os\r\nimport shutil\r\nfrom zipfile import ZipFile\r\nfrom colorama import Fore\r\nimport re\r\n\r\ndef check_re(pathName, fileName, searchRE):\r\n search_file = root+'/'+fileName\r\n fl = open (search_file,'r')\r\n tx = fl.read()\r\n fl.close()\r\n if len (re.findall( searchRE,tx))>0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\ndir = os.getcwd()\r\ndir = '/home/urik/Python38/Complete-Python-3-Bootcamp-master/12-Advanced Python Modules/08-Advanced-Python-Module-Exercise/'\r\nfile_name = dir+'unzip_me_for_instructions.zip'\r\nxdir = os.getcwd()+'/temp' \r\n\r\nshutil.unpack_archive(file_name,xdir)\r\nprint ('________________________________________________________________________________')\r\nprint(\"{}Archive file unpacked successfully. Extract directory {}\".format(Fore.RED,xdir)) \r\nprint ('________________________________________________________________________________')\r\n\r\ni = 0\r\n'''\r\nfor root,dirs,files in os.walk(xdir): \r\n print ('{}\\t ROOT:{}'.format(Fore.WHITE,root) )\r\n for d in dirs:\r\n print ('\\t\\t DIR:{}'.format(d))\r\n for f in files:\r\n print ('\\t\\t\\t FILE:{}'.format(f))\r\nprint ('________________________________________________________________________________')\r\n'''\r\nfor root,dirs,files in os.walk(xdir): \r\n for f in files:\r\n i += 1\r\n# print ('\\t\\t\\t {} ) PATH: {} FILE:{}'.format(i,root,f))\r\n if (check_re(root, f, r'\\d\\d\\d-\\d\\d\\d-\\d{4}')):\r\n print ('Found in {} - {}'.format(root,f))\r\n else:\r\n pass\r\n\r\n","sub_path":"Puzzle.py","file_name":"Puzzle.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"445272258","text":"#!/user/bin/env_automation python3\n#-*- coding: utf-8 -*-\n\nfrom io import StringIO\nimport json\nwith open('/Users/Administrator/python-master/test.txt', 'w') as f:\n f.write('Hello world!')\n\ndef stringIO_test():\n\n f = StringIO()\n f.write('hello')\n f.write(' ')\n f.write('world!')\n print(f.getvalue())\n\ndef json_test():\n d= dict(name='Bob', age=20, score=68)\n json_str = json.dumps(d)\n print(json_str)\n print(json.loads(json_str))\n\nif __name__ == '__main__':\n stringIO_test()\n json_test()\n","sub_path":"mydict/IO_test.py","file_name":"IO_test.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121818561","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport os\ndef changeconf(tls_index:list,cipher_index:list,confpath='conf/nginx.conf',conf_template='./origin.conf'):\n tlslist = ['TLSv1','TLSv1.1','TLSv1.2','SSLv3']\n cipherlist = ['3DES','AES','AESGCM','CAMELLIA','IDEA','RC4','SEED']\n cipher_out = []\n tls_out = []\n for i in tls_index:\n if i > len(tlslist):\n print('index out of range!in function changeconf')\n return\n tls_out.append(tlslist[i])\n tls_out_str = ' '.join(tls_out)\n for i in cipher_index:\n if i > len(cipherlist):\n print('index out of range!in function changeconf')\n return\n cipher_out.append(cipherlist[i])\n cipher_out_str = \":\".join(cipher_out)\n if not os.system(\"cp \"+confpath+\" \"+confpath+'.back'):\n with open(conf_template,'r') as f:\n conf = f.read()\n print('[+] read template ok')\n with open(confpath, 'w') as f:\n f.write(conf%(tls_out_str, cipher_out_str))\n print('[+] write conf ok')\n os.system('sudo make reload')\n else:\n print('cant cp conf file,check permissions!')\nif __name__ == '__main__':\n changeconf([0,1,2,3],[0,1,2,5],'conf/nginx.conf','./origin.conf')\n","sub_path":"script/changeconf.py","file_name":"changeconf.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"362613791","text":"import os\nimport torch\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport argparse\nimport torch.utils.data as data\nfrom data import AnnotationTransform, BaseTransform, VOCDetection, detection_collate, coco_detection_collate, seq_detection_collate, mb_cfg, dataset_training_cfg, COCOroot, COCODetection\nfrom utils.augmentations import SSDAugmentation\nfrom layers.modules import MultiBoxLoss, RefineMultiBoxLoss\nfrom layers.functions import PriorBox\nimport numpy as np\nimport time\nimport logging\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\ndef print_log(args):\n logging.info('model_name: '+ args.model_name)\n logging.info('ssd_dim: '+ str(args.ssd_dim))\n logging.info('Backbone: '+ args.backbone)\n logging.info('BN: '+ str(args.bn))\n logging.info('Conv7 Channel: '+ str(args.c7_channel))\n if 'RefineDet' in args.backbone:\n logging.info('Refine: ' + str(args.refine))\n logging.info('Deform: ' + str(args.deform))\n logging.info('Multi-head: ' + str(args.multihead))\n if args.resume:\n logging.info('resume: '+ args.resume )\n logging.info('start_iter: '+ str(args.start_iter))\n elif args.resume_from_ssd:\n logging.info('resume_from_ssd: '+ args.resume_from_ssd )\n else:\n logging.info('load pre-trained backbone: '+ args.basenet )\n logging.info('lr: '+ str(args.lr))\n logging.info('warm_epoch: '+ str(args.warm_epoch))\n logging.info('gamam: '+ str(args.gamma))\n logging.info('step_list: '+ str(args.step_list))\n logging.info('save_interval: '+ str(args.save_interval))\n logging.info('dataset_name: '+ args.dataset_name )\n logging.info('set_file_name: '+ args.set_file_name )\n logging.info('gpu_ids: '+ args.gpu_ids)\n logging.info('augm_type: '+ args.augm_type)\n logging.info('batch_size: '+ str(args.batch_size))\n logging.info('loss weights: '+ str(args.loss_coe))\nparser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training')\nparser.add_argument('--basenet', default='vgg16bn_reducedfc.pth', help='pretrained base model')\nparser.add_argument('--jaccard_threshold', default=0.5, type=float, help='Min Jaccard index for matching')\nparser.add_argument('--batch_size', default=4, type=int, help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str, help='Resume from checkpoint') #'./weights/tssd300_VID2017_b8s8_RSkipTBLstm_baseAugmDrop2Clip5_FixVggExtraPreLocConf/ssd300_seqVID2017_20000.pth'\nparser.add_argument('--resume_from_ssd', default=None, type=str, help='Resume vgg and extras from ssd checkpoint')\nparser.add_argument('--num_workers', default=8, type=int, help='Number of workers used in dataloading')\nparser.add_argument('--start_iter', default=0, type=int, help='Begin counting iterations starting from this value (should be used with resume)')\nparser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-4, type=float, help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, help='momentum')\nparser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')\nparser.add_argument('--log_iters', default=True, type=bool, help='Print the loss at each iteration')\nparser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization')\nparser.add_argument('--save_folder', default='./weights040/test', help='Location to save checkpoint models')\nparser.add_argument('--dataset_name', default='VOC0712', help='VOC0712/VIDDET/seqVID2017/MOT17Det/seqMOT17Det')\nparser.add_argument('--step_list', nargs='+', type=int, default=[30,50], help='step_list for learning rate')\nparser.add_argument('--backbone', default='RefineDet_ResNet101', type=str, help='Backbone')\nparser.add_argument('--c7_channel', default=1024, type=int, help='out_channel of Conv7 in VGG')\nparser.add_argument('--refine', default=True, type=str2bool, help='Only work when backbone==RefineDet')\nparser.add_argument('--deform', default=1, type=int, help='number of deform group. 0: Do not use defomable conv. Only work when backbone==RefineDet')\nparser.add_argument('--multihead', default=True, type=str2bool, help='Multihead detection')\nparser.add_argument('--drop', default=1.0, type=float, help='DropOut, Only work when backbone==RefineDet')\nparser.add_argument('--model_name', default='ssd', type=str, help='which model selected')\nparser.add_argument('--ssd_dim', default=320, type=int, help='ssd_dim 300, 320 or 512')\nparser.add_argument('--gpu_ids', default='4,5', type=str, help='gpu number')\nparser.add_argument('--augm_type', default='base', type=str, help='how to transform data')\nparser.add_argument('--set_file_name', default='train', type=str, help='train_VID_DET/train_video_remove_no_object/train, MOT dataset does not use it')\nparser.add_argument('--loss_coe', nargs='+', type=float, default=[1.0,1.0, 0.5], help='coefficients for loc, conf, att, asso')\nparser.add_argument('--bn', default=False, type=str2bool, help='select sequence data in a skip way')\nparser.add_argument('--save_interval', default=10, type=int, help='frequency of checkpoint saving')\nparser.add_argument('--warm_epoch', default=0, type=int, help='warm epoch')\nargs = parser.parse_args()\nif not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\ncurrent_time = time.strftime(\"%b_%d_%H:%M:%S_%Y\", time.localtime())\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename=os.path.join(args.save_folder, current_time+'.log'),\n filemode='w')\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger('').addHandler(console)\nprint_log(args)\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_ids\ndevice = torch.device('cuda' if args.cuda and torch.cuda.is_available() else 'cpu')\nssd_dim = args.ssd_dim\nif args.dataset_name in ['MOT15', 'seqMOT15']:\n prior = 'MOT_300'\n cfg = mb_cfg[prior]\nelse:\n prior = 'VOC_'+ str(args.ssd_dim)\n if args.ssd_dim==300 and 'RFB' in args.backbone:\n prior += '_RFB'\n elif args.ssd_dim==512 and 'RefineDet' in args.backbone:\n prior += '_RefineDet'\n cfg = mb_cfg[prior]\ntrain_sets, num_classes, data_root = dataset_training_cfg[args.dataset_name]\nlogging.info('train sets: ' + str(train_sets))\nset_filename = args.set_file_name\nif args.dataset_name[:3] == 'seq':\n collate_fn = seq_detection_collate\nelif args.dataset_name == 'COCO':\n collate_fn = coco_detection_collate\nelse:\n collate_fn = detection_collate\nif args.dataset_name == 'UW':\n means = (128, 128, 128)\nelse:\n means = (104, 117, 123)\nmean_np = np.array(means, dtype=np.int32)\nbatch_size = args.batch_size\nweight_decay = args.weight_decay\nmax_epoch = args.step_list[-1]\ngamma = 0.1\nmomentum = args.momentum\n\nif args.visdom:\n import visdom\n viz = visdom.Visdom()\n\nif 'RFB' in args.backbone:\n from model.rfbnet_vgg import build_net\n ssd_net = build_net('train', ssd_dim, num_classes, bn=args.bn)\nelif 'RefineDet' in args.backbone:\n if 'MobNet' in args.backbone:\n if args.deform:\n from model.dualrefinedet_mobilenet import build_net\n ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes,\n def_groups=args.deform, multihead=args.multihead)\n else:\n from model.refinedet_mobilenet import build_net\n ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, use_refine=args.refine)\n elif args.deform:\n from model.dualrefinedet_vggbn import build_net\n ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, c7_channel=args.c7_channel, def_groups=args.deform, bn=args.bn, multihead=args.multihead)\n else:\n from model.refinedet_vgg import build_net\n ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, use_refine=args.refine, c7_channel=args.c7_channel, bn=args.bn, multihead=args.multihead)\nelif 'MobNet' in args.backbone:\n from model.ssd4scale_mobile import build_net\n ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, c7_channel=args.c7_channel)\nelif '4s' in args.backbone:\n from model.ssd4scale_vgg import build_net\n ssd_net = build_net('train', size=ssd_dim, num_classes=num_classes, c7_channel=args.c7_channel, bn=args.bn)\nelse:\n ssd_net = None\nnet = ssd_net\nif device==torch.device('cuda'):\n net = torch.nn.DataParallel(ssd_net)\n cudnn.benchmark = True\nprint(ssd_net)\nnet = net.to(device)\n\nif args.resume:\n logging.info('Resuming training, loading {}...'.format(args.resume))\n ssd_net.load_weights(args.resume)\nelse:\n backbone_weights = torch.load('../weights/'+ args.basenet)\n logging.info('Loading base network...')\n ssd_net.backbone.load_state_dict(backbone_weights)\n\nif not args.resume:\n from model.networks import net_init\n net_init(ssd_net, args.backbone, logging, refine=args.refine, deform=args.deform, multihead=args.multihead)\n\nif args.augm_type == 'ssd':\n data_transform = SSDAugmentation\nelse:\n data_transform = BaseTransform\n\noptimizer = optim.SGD(net.parameters(),\n lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n# criterion\nif 'RefineDet' in args.backbone and args.refine:\n use_refine = True\n arm_criterion = RefineMultiBoxLoss(2, 0.5, True, 0, True, 3, 0.5, False, device=device, only_loc=True)\n criterion = RefineMultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False, device=device)\nelse:\n use_refine = False\n criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False, device=device)\n\npriorbox = PriorBox(cfg)\nwith torch.no_grad():\n priors = priorbox.forward().to(device)\n\ndef train():\n net.train()\n epoch = args.start_iter\n if args.dataset_name == 'COCO':\n dataset = COCODetection(COCOroot, year='trainval2014', image_sets=train_sets, transform=data_transform(ssd_dim, means), phase='train')\n else:\n dataset = VOCDetection(data_root, train_sets, data_transform(ssd_dim, means),\n AnnotationTransform(dataset_name=args.dataset_name),\n dataset_name=args.dataset_name, set_file_name=set_filename)\n epoch_size = len(dataset) // args.batch_size\n drop_step = [s * epoch_size for s in args.step_list]\n max_iter = max_epoch * epoch_size\n logging.info('Loading Dataset:' + args.dataset_name + ' dataset size: ' +str(len(dataset)))\n\n step_index = 0\n if args.visdom:\n # initialize visdom loss plot\n y_dim = 3\n legend = ['Loss', 'Loc Loss', 'Conf Loss',]\n if use_refine:\n y_dim += 1\n legend += ['Arm Loc Loss',]\n\n lot = viz.line(\n X=torch.zeros((1,)),\n Y=torch.zeros((1, y_dim)),\n opts=dict(\n xlabel='Iteration',\n ylabel='Loss',\n title=args.save_folder.split('/')[-1],\n legend=legend,\n )\n )\n batch_iterator = None\n data_loader = data.DataLoader(dataset, batch_size, num_workers=args.num_workers, shuffle=True,\n collate_fn=collate_fn,\n pin_memory=True)\n\n for iteration in range(epoch*epoch_size, max_iter + 10):\n if (not batch_iterator) or (iteration % epoch_size == 0):\n # create batch iterator\n batch_iterator = iter(data_loader)\n if epoch % args.save_interval == 0:\n logging.info('Saving state, epoch: '+ str(epoch))\n torch.save(ssd_net.state_dict(), os.path.join(args.save_folder, args.model_name + str(\n ssd_dim) + '_' + args.dataset_name + '_' +repr(epoch) + '.pth'))\n epoch += 1\n\n t0 = time.time()\n if iteration in drop_step:\n step_index = drop_step.index(iteration) + 1\n adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)\n # adjust_learning_rate(optimizer, args.gamma)\n\n collected_data = next(batch_iterator)\n with torch.no_grad():\n images, targets = collected_data[:2]\n images = images.to(device)\n targets = [anno.to(device) for anno in targets]\n\n # forward\n loss = torch.tensor(0., requires_grad=True).to(device)\n out = net(images)\n # backward\n optimizer.zero_grad()\n if use_refine:\n loss_arm_l = arm_criterion(out[0], priors, targets)\n loss_l, loss_c = criterion(out[2:], priors, targets, arm_data=out[:2])\n loss += args.loss_coe[0] * loss_arm_l\n\n else:\n loss_l, loss_c = criterion(out, priors, targets)\n loss += args.loss_coe[0] * loss_l + args.loss_coe[1] * loss_c\n\n loss.backward()\n optimizer.step()\n t1 = time.time()\n if iteration % 10 == 0:\n if use_refine:\n logging.info('Epoch:' + repr(epoch) + ', epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) + ', total_iter ' + repr(\n iteration) + ' || loss: %.4f, Loss_l: %.4f, loss_c: %.4f, loss_arm_l: %.4f, lr: %.5f || Timer: %.4f sec.' % (\n loss, loss_l, loss_c,loss_arm_l, optimizer.param_groups[0]['lr'], t1 - t0))\n else:\n logging.info('Epoch:' + repr(epoch) + ', epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) + ', total_iter ' + repr(\n iteration) + ' || loss: %.4f, Loss_l: %.4f, loss_c: %.4f, lr: %.5f || Timer: %.4f sec.' % (loss, loss_l, loss_c, optimizer.param_groups[0]['lr'], t1 - t0))\n\n if args.visdom:\n y_dis = [loss.cpu(), args.loss_coe[0]*loss_l.cpu(), args.loss_coe[1]*loss_c.cpu()]\n if iteration == 1000:\n # initialize visdom loss plot\n lot = viz.line(\n X=torch.zeros((1,)),\n Y=torch.zeros((1, y_dim)),\n opts=dict(\n xlabel='Iteration',\n ylabel='Loss',\n title=args.save_folder.split('/')[-1],\n legend=legend,\n )\n )\n if use_refine:\n y_dis += [args.loss_coe[0]*loss_arm_l.cpu(),]\n # update = 'append' if iteration\n viz.line(\n X=torch.ones((1, y_dim)) * iteration,\n Y=torch.FloatTensor(y_dis).unsqueeze(0),\n win=lot,\n update='append',\n opts=dict(\n xlabel='Iteration',\n ylabel='Loss',\n title=args.save_folder.split('/')[-1],\n legend=legend,)\n )\n\n\n torch.save(ssd_net.state_dict(),\n os.path.join(args.save_folder, args.model_name + str(ssd_dim) + '_' + args.dataset_name + '_' +\n repr(iteration) + '.pth'))\n print('Complet Training. Saving state, iter:', iteration)\n\n# def adjust_learning_rate(optimizer, gamma):\n\n # for param_group in optimizer.param_groups:\n # param_group['lr'] *= gamma\n\ndef adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):\n\n if epoch <= args.warm_epoch:\n lr = 1e-6 + (args.lr - 1e-6) * iteration / (epoch_size * args.warm_epoch)\n else:\n lr = args.lr * (gamma ** (step_index))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n # return lr\n\nif __name__ == '__main__':\n train()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525039197","text":"\"\"\"Schedules spreadsheet table\"\"\"\n\nfrom common.databases.base_spreadsheet import BaseSpreadsheet\nfrom common.api.spreadsheet import (\n Cell,\n find_corresponding_cell_best_effort,\n find_corresponding_cells_best_effort,\n)\n\n\nclass SchedulesSpreadsheet(BaseSpreadsheet):\n \"\"\"Schedules spreadsheet class\"\"\"\n\n def __init__(self, session=None, *args, **kwargs):\n super().__init__(session, *args, **kwargs)\n self._type = \"schedules\"\n\n __tablename__ = \"schedules_spreadsheet\"\n\n range_match_id = str(\"A2:A\")\n range_team1 = str(\"B2:B\")\n range_score_team1 = str(\"C2:C\")\n range_score_team2 = str(\"D2:D\")\n range_team2 = str(\"E2:E\")\n range_date = str(\"F2:F\")\n range_time = str(\"G2:G\")\n range_referee = str(\"H2:H\")\n range_streamer = str(\"I2:I\")\n range_commentator = str(\"J2:K\")\n range_mp_links = str(\"L2:L\")\n date_format = str()\n use_range = bool(False)\n max_referee = int(1)\n max_streamer = int(1)\n max_commentator = int(2)\n\n\nclass MatchIdNotFound(Exception):\n \"\"\"Thrown when a match id is not found.\"\"\"\n\n def __init__(self, match_id):\n self.match_id = match_id\n\n\nclass DuplicateMatchId(Exception):\n \"\"\"Thrown when a match id is found multiple times.\"\"\"\n\n def __init__(self, match_id):\n self.match_id = match_id\n\n\nclass MatchInfo:\n \"\"\"Contains all info about a match.\"\"\"\n\n def __init__(self, match_id_cell):\n self.match_id = match_id_cell\n self.team1 = Cell(-1, -1, \"\")\n self.team2 = Cell(-1, -1, \"\")\n self.score_team1 = Cell(-1, -1, \"\")\n self.score_team2 = Cell(-1, -1, \"\")\n self.date = Cell(-1, -1, \"\")\n self.time = Cell(-1, -1, \"\")\n self.referees = []\n self.streamers = []\n self.commentators = []\n self.mp_links = []\n\n def get_datetime(self):\n return \" \".join(filter(None, [self.date.value, self.time.value]))\n\n @staticmethod\n def from_id(schedules_spreadsheet, match_id, filled_only=True):\n match_id_cells = schedules_spreadsheet.worksheet.get_range(schedules_spreadsheet.range_match_id)\n corresponding_match_id_cells = schedules_spreadsheet.worksheet.find_cells(match_id_cells, match_id, False)\n if not corresponding_match_id_cells:\n raise MatchIdNotFound(match_id)\n if len(corresponding_match_id_cells) > 1:\n raise DuplicateMatchId(match_id)\n match_id_cell = corresponding_match_id_cells[0]\n return MatchInfo.from_match_id_cell(schedules_spreadsheet, match_id_cell, filled_only)\n\n @staticmethod\n def from_match_id_cell(schedules_spreadsheet, match_id_cell, filled_only=True):\n match_id_best_effort_ys = match_id_cell.y_merge_range\n match_info = MatchInfo(match_id_cell)\n worksheet = schedules_spreadsheet.worksheet\n match_info.team1 = find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_team1), match_id_best_effort_ys, match_id_cell.y,\n )\n match_info.team2 = find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_team2), match_id_best_effort_ys, match_id_cell.y,\n )\n match_info.score_team1 = find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_score_team1), match_id_best_effort_ys, match_id_cell.y,\n )\n match_info.score_team2 = find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_score_team2), match_id_best_effort_ys, match_id_cell.y,\n )\n match_info.date = find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_date), match_id_best_effort_ys, match_id_cell.y,\n )\n match_info.time = find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_time), match_id_best_effort_ys, match_id_cell.y,\n )\n if schedules_spreadsheet.use_range:\n match_info.referees = find_corresponding_cells_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_referee),\n match_id_best_effort_ys,\n match_id_cell.y,\n filled_only,\n )\n match_info.streamers = find_corresponding_cells_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_streamer),\n match_id_best_effort_ys,\n match_id_cell.y,\n filled_only,\n )\n match_info.commentators = find_corresponding_cells_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_commentator),\n match_id_best_effort_ys,\n match_id_cell.y,\n filled_only,\n )\n match_info.mp_links = find_corresponding_cells_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_mp_links),\n match_id_best_effort_ys,\n match_id_cell.y,\n filled_only,\n )\n else:\n match_info.referees = [\n find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_referee), match_id_best_effort_ys, match_id_cell.y,\n )\n ]\n match_info.streamers = [\n find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_streamer), match_id_best_effort_ys, match_id_cell.y,\n )\n ]\n match_info.commentators = [\n find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_commentator),\n match_id_best_effort_ys,\n match_id_cell.y,\n )\n ]\n match_info.mp_links = [\n find_corresponding_cell_best_effort(\n worksheet.get_range(schedules_spreadsheet.range_mp_links), match_id_best_effort_ys, match_id_cell.y,\n )\n ]\n return match_info\n","sub_path":"backend/common/databases/schedules_spreadsheet.py","file_name":"schedules_spreadsheet.py","file_ext":"py","file_size_in_byte":6088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"623682523","text":"class Solution:\r\n def plusOne(self, digits):\r\n \"\"\"\r\n :type digits: List[int]\r\n :rtype: List[int]\r\n \"\"\"\r\n carry = 0\r\n # Add one to the last digit\r\n digits[-1] += 1\r\n if digits[-1] >= 10:\r\n carry = 1\r\n digits[-1] -= 10\r\n\r\n # Iterate through the rest of the digits\r\n # adding carries as necessary\r\n for i in range(len(digits)-2, -1, -1):\r\n digits[i] += carry\r\n carry = 0\r\n if digits[i] >= 10:\r\n digits[i] -= 10\r\n carry = 1\r\n\r\n # If there is a carry at the end, need to add a 1\r\n # to the start of the list\r\n if carry == 1:\r\n digits = [1] + digits\r\n\r\n return digits\r\n","sub_path":"plus-one.py","file_name":"plus-one.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550964940","text":"#!/usr/bin/env Python\r\n# coding=utf-8\r\n\r\nimport os\r\nimport time\r\nimport re\r\nimport chardet\r\nimport difflib\r\nfrom urllib import request\r\nfrom urllib import parse\r\nfrom bs4 import BeautifulSoup\r\nfrom posixpath import normpath\r\n\r\ndeep = 0\r\ntmp = \"\"\r\nvisited_url = set()\r\nvisited_img = set()\r\nfolderDict = {}\r\nurl = \"\"\r\ndomain = \"\"\r\nweb_title = \"\"\r\nsavedir = \"\"\r\npermit_url = \"\"\r\n\r\n#网址校验\r\ndef check_url():\r\n global url\r\n global domain\r\n while True:\r\n url = input(r\"请输入网址(http://...形式):\")\r\n reg = r'^((ht|f)tps?):\\/\\/[\\w\\-]+(\\.[\\w\\-]+)+([\\w\\-\\.,@?^=%&:\\/~\\+#]*[\\w\\-\\@?^=%&\\/~\\+#])?$'\r\n pattern = re.compile(reg)\r\n regret = re.match(pattern, url, flags=0)\r\n if regret is None:\r\n print(\"网址无效,请重新输入\")\r\n continue\r\n domain = parse.urlparse(url)[1]\r\n return\r\n\r\n#爬取范围校验\r\ndef check_domain():\r\n global permit_url\r\n while True:\r\n permit_url = input(r\"设定爬取范围(不设定可直接回车):\")\r\n if permit_url is None or permit_url == \"\":\r\n return\r\n reg = r'^((ht|f)tps?):\\/\\/[\\w\\-]+(\\.[\\w\\-]+)+([\\w\\-\\.,@?^=%&:\\/~\\+#]*[\\w\\-\\@?^=%&\\/~\\+#])?$'\r\n pattern = re.compile(reg)\r\n regret = re.match(pattern, permit_url, flags=0)\r\n if regret is None:\r\n print(\"爬取范围无效,请重新输入\")\r\n continue\r\n return\r\n\r\n'''TODO\r\n#目录校验\r\ndef check_dir():\r\n global savedir\r\n while True:\r\n savedir = input(r\"请输入保存目录:\")\r\n isExists = os.path.exists(savedir)\r\n if not isExists:\r\n print(\"目录无效,请重新输入\")\r\n continue\r\n return\r\n'''\r\n#创建保存目录\r\ndef create_savedir():\r\n global savedir\r\n ymd = time.strftime(\"%Y%m%d\", time.localtime())\r\n# savedir = savedir + \"/\" + ymd\r\n# isExists = os.path.exists(savedir)\r\n# if not isExists:\r\n# os.makedirs(savedir)\r\n savedir = \".\\\\\" + ymd\r\n\r\n#★★★前处理★★★\r\ndef pre_process():\r\n check_url()\r\n #check_dir()\r\n check_domain()\r\n create_savedir()\r\n\r\ndef get_local_pages(url,domain):\r\n global deep\r\n global visited_url\r\n global tmp\r\n repeat_time = 0\r\n pages = []\r\n \r\n #防止url读取卡住\r\n while True:\r\n try:\r\n time.sleep(1)\r\n #print(\"Opening the web\", url)\r\n web = request.urlopen(url)\r\n #print(\"Success to Open the web\")\r\n break\r\n except:\r\n #print(\"Open Url Failed !!! Repeat\")\r\n time.sleep(1)\r\n repeat_time = repeat_time+1\r\n if repeat_time == 5:\r\n return\r\n\r\n soup = BeautifulSoup(web.read(), \"html.parser\")\r\n tags = soup.findAll(name='a')\r\n url_list = []\r\n #去除url网页扩展名,对比用\r\n no_ext_url = get_no_ext_url(url)\r\n for tag in tags:\r\n #避免参数传递异常\r\n try:\r\n ret = tag['href']\r\n except:\r\n #print(\"Maybe not the attr : href\")\r\n continue\r\n\r\n #整理页面上的链接,相对url变换绝对url\r\n ret = join_sub_url(url, ret)\r\n\r\n #url变换异常或已经访问过\r\n if ret is None or ret in visited_url:\r\n continue\r\n\r\n if no_ext_url in ret:\r\n url_list.append((1.0, ret))\r\n else:\r\n #获取url相似度\r\n ratio = difflib.SequenceMatcher(lambda x: x in \"_\", url, ret).quick_ratio()\r\n #用相似度作为key把ret存入列表\r\n url_list.append((ratio, ret))\r\n \r\n #url列表元素相似度降序排序\r\n url_list = sorted(url_list, key=lambda x:x[0], reverse=True)\r\n for ret in url_list:\r\n o = parse.urlparse(ret[1])\r\n #协议处理\r\n if 'http' not in o[0] and 'https' not in o[0]:\r\n #print(\"Bad Page:\" + ret.encode('ascii'))\r\n continue\r\n\r\n #url合理性校验\r\n if o[0] is \"\" and o[1] is not \"\":\r\n #print(\"Bad Page: \" + ret[1])\r\n continue\r\n\r\n #域名校验\r\n if domain not in o[1]:\r\n #print(\"Bad Page: \" + ret[1])\r\n continue\r\n\r\n #范围校验\r\n if permit_url not in ret[1]:\r\n continue\r\n\r\n #整理,输出\r\n newpage = ret[1]\r\n if newpage not in visited_url:\r\n #print(\"Add New Page: \" + newpage)\r\n pages.append(newpage)\r\n return pages\r\n\r\n#组装子网页url\r\ndef join_sub_url(base,url):\r\n try:\r\n url = parse.urljoin(base, url)\r\n arr = parse.urlparse(url)\r\n path = normpath(arr[2])\r\n except:\r\n return None\r\n\r\n return parse.urlunparse((arr.scheme, arr.netloc, path, arr.params, arr.query, arr.fragment))\r\n \r\n#取得网页代码\r\ndef get_html(url):\r\n try:\r\n page = request.urlopen(url)\r\n html = page.read()\r\n html = utf8_transfer(html)\r\n except:\r\n print(\"链接打开失败:\" + url)\r\n return \"\"\r\n return html\r\n\r\n#取得图片地址\r\ndef get_img(html,url):\r\n global visited_img\r\n global web_title\r\n reg = r'http[s]?://\\S+\\.jpg'\r\n img_re = re.compile(reg)\r\n img_list = re.findall(img_re, html.lower())\r\n #print(\"★★★Count of Pictures★★★:\", end=\"\");print(len(img_list))\r\n if len(img_list) > 0:\r\n title = get_html_title(html)\r\n ratio = difflib.SequenceMatcher(lambda x: x in \"_\", title, web_title).quick_ratio()\r\n #页面标题相似度小于某值,创建新文件夹\r\n if ratio < 0.72:\r\n web_title = title\r\n else:\r\n title = web_title\r\n \r\n if title is None or title == \"\":\r\n title = r\"未知标题\"\r\n\r\n #创建文件夹\r\n global savedir\r\n path = savedir + \"/\" + title\r\n n = 1\r\n if mkDir(path):\r\n pass\r\n else:\r\n if title in folderDict:\r\n n = folderDict[title]\r\n\r\n for img_url in img_list:\r\n #已下载过的图片链接忽视\r\n if img_url in visited_img:\r\n continue\r\n try:\r\n request.urlretrieve(img_url, path + '/%s.jpg' % n)\r\n if filesize_filter(path + '/' + str(n) + '.jpg'):\r\n n += 1\r\n #已下载过的图片链接记录\r\n visited_img.add(img_url)\r\n print(r\"已下载:\",img_url)\r\n except:\r\n pass\r\n folderDict[title] = n\r\n\r\n#utf8编码转换\r\ndef utf8_transfer(html):\r\n try:\r\n charset = chardet.detect(html)['encoding']\r\n #print(r\"★★★★★网页编码形式★★★★★\" + charset)\r\n if chardet.detect(html)['encoding'].lower() == 'gb2312':\r\n html = html.decode(\"gb2312\", 'ignore')\r\n elif chardet.detect(html)['encoding'].lower() == 'utf-8':\r\n html = html.decode('utf-8', 'ignore')\r\n elif chardet.detect(html)['encoding'].lower() == 'gbk':\r\n html = html.decode('gbk', 'ignore')\r\n except:\r\n #print('utf8_transfer error')\r\n pass\r\n return html\r\n\r\n#用re抽取网页Title\r\ndef get_html_title(Html):\r\n compile_rule = r'.*'\r\n title_list = re.findall(compile_rule, Html)\r\n if title_list is None:\r\n title = ''\r\n else:\r\n title = title_list[0][7:-8]\r\n return title\r\n\r\n#创建文件夹\r\ndef mkDir(path):\r\n isExists = os.path.exists(path)\r\n if not isExists:\r\n os.makedirs(path)\r\n return True\r\n else:\r\n #print(path + 'is already Exists')\r\n return False\r\n \r\n#文件大小过滤器\r\ndef filesize_filter(filePath):\r\n #获取文件大小(KB)\r\n filesize = os.path.getsize(filePath)/1024\r\n #删除小于90KB的文件\r\n if filesize < 90:\r\n os.remove(filePath)\r\n return False\r\n return True\r\n\r\n#去除url扩展名\r\ndef get_no_ext_url(url):\r\n if \".html\" in url.lower() or \\\r\n \".aspx\" in url.lower():\r\n return url[:-5]\r\n elif \".htm\" in url.lower() or \\\r\n \".jsp\" in url.lower() or \\\r\n \".php\" in url.lower() or \\\r\n \".asp\" in url.lower():\r\n return url[:-4]\r\n else:\r\n return url\r\n\r\n#dfs算法遍历全站\r\ndef dfs(pages):\r\n #无法获取新的url说明遍历完成,即可结束dfs\r\n if pages==None or len(pages) == 0:\r\n #print(\"★★★pages==None:return\")\r\n return\r\n global url\r\n global domain\r\n global visited_url\r\n \r\n for page in pages:\r\n if page not in visited_url:\r\n #print(\"Visiting\",page)\r\n print(\"正在爬取链接:\",page)\r\n visited_url.add(page)\r\n url = page\r\n pages = get_local_pages(url, domain)\r\n html = get_html(url)\r\n if html == \"\":\r\n continue\r\n get_img(html,url)\r\n dfs(pages)\r\n\r\n #print(\"sucess\")\r\n\r\npre_process()\r\nvisited_url.add(url)\r\npages = get_local_pages(url, domain)\r\nhtml = get_html(url)\r\nget_img(html,url)\r\n#递归开始\r\ndfs(pages)\r\n\r\n","sub_path":"ImageSpider/bak/ImageSpider1.2.py","file_name":"ImageSpider1.2.py","file_ext":"py","file_size_in_byte":9093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"393927521","text":"#!/usr/bin/python3\n#coding:utf-8\nimport numpy as np\ndef readdata():\n f = open('/home/young/Documents/battery_capacity.log','r')\n text = f.readlines()\n f.close()\n for i in range(len(text)):\n text[i] = text[i].split(',')\n for j in range(len(text[i])):\n text[i][j] = int(text[i][j])\n return text\ntext = readdata()\nx = []\ny = []\nfor i in range(len(text)):\n y += [text[i][0]]\n x += [24*60*60*text[i][3]+60*60*text[i][4]+60*text[i][5]+text[i][6]-24*60*60*text[0][3]-60*60*text[0][4]-60*text[0][5]-text[0][6]]\nx = np.array(x)[:]\ny = np.array(y)[:]\ncof = np.polyfit(x,y,1)\nprint('The battery has %.2f hours capacity.' %(-cof[1]/cof[0]/3600))\n","sub_path":"battery_capacity/echo_battery_time.py","file_name":"echo_battery_time.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"137175790","text":"# TO-DO: complete the help function below to merge 2 sorted arrays\ndef merge(arrA, arrB):\n # find the length of the arrays combined\n elements = len(arrA) + len(arrB)\n # create an array with the number of index equal to the length of elements\n merged_arr = [0] * elements\n # create variable counters, one for the left, one for the right and one for the merged_arr. They will be used to match our looped index's to the merged array index count\n i = j = k = 0\n # while the i and j counters are less then the length of the arrays\n while i < len(arrA) and j < len(arrB):\n # if left is less than the right\n if arrA[i] < arrB[j]:\n # the merged_arr array index is equal to the lesser valued index\n merged_arr[k] = arrA[i]\n # increment i to keep track of the index relative to the left array and the index being looped\n i += 1\n else:\n # the merged_arr array index is equal to the lesser valued index\n merged_arr[k] = arrB[j]\n # incrment j to keep track of the index relative to the right array and the index being looped\n j += 1\n # for every i or j index, k needs to be iterated to keep track of the number of indexes in the final array\n k += 1\n # if either array only has one index, this will default add it to the end of the array as it will be the largest valued index left\n while i < len(arrA):\n merged_arr[k] = arrA[i]\n i += 1\n k += 1\n while j < len(arrB):\n merged_arr[k] = arrB[j]\n j += 1\n k += 1\n # return the final array\n return merged_arr\n\n\n# TO-DO: implement the Merge Sort function below USING RECURSION\ndef merge_sort(arr):\n # base case to make sure the list has at least one index\n if len(arr) <= 1:\n return arr\n\n # find the mid point (what does // do?)\n mid = len(arr) // 2\n # inside a recursive call, slice blank to mid, is default(zero index to the mid point)\n left = merge_sort(arr[:mid])\n # inside a recursive call, slice from mid to blank, is default(mid to end of list)\n right = merge_sort(arr[mid:])\n # return and sort the recursively seperated arrays\n return merge(left, right)\n\n\n# Version 2\n# def merge(left, right):\n# # 4 variables, 1 list 3 counters, a counter for each loop, and counter for result list\n\n# i, j = 0, 0\n# result = []\n# #loop until i or j is larger equal to the length of the lists\n# while i < len(left) and j < len(right):\n# # if value of left is less than the value of right append the lower value to result\n# if left[i] < right[j]:\n# result.append(left[i])\n# # increment i so we stay within the while loop condition\n# i+=1\n# # else the lesser value is on the right\n# else:\n# result.append(right[j])\n# # increment i so we stay within the while loop condition\n# j+=1\n# # for any single indexed list default to add them to the end of the list\n# result += left[i:]\n# result += right[j:]\n# # return result\n# return result\n\n\n# def merge_sort(arr):\n# #need a Base\n# if len(arr) <= 1:\n# return arr\n# # need to find the min\n# mid = len(arr) // 2\n# # slice the beginning\n# left = merge_sort(arr[:mid])\n# # slice the end\n# right = merge_sort(arr[mid:])\n# # merge with helper function\n# # print(left, right)\n\n# return merge(left, right)\n\n# STRETCH: implement an in-place merge sort algorithm\n\n\ndef merge_in_place(arr, start, mid, end):\n # TO-DO\n\n return arr\n\n\ndef merge_sort_in_place(arr, l, r):\n # TO-DO\n\n return arr\n\n\n# STRETCH: implement the Timsort function below\n# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt\ndef timsort(arr):\n\n return arr\n","sub_path":"src/recursive_sorting/recursive_sorting.py","file_name":"recursive_sorting.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288368608","text":"from bs4 import BeautifulSoup\n\nfrom mwscanner import BASE_URL\nfrom mwscanner.Mixins import TableReaderMixin, UrlLoaderMixin\nfrom mwscanner.builders.ClassBuilder import ClassBuilder\nfrom mwscanner.Discipline import Discipline\n\n\nclass DisciplinesBuilder(TableReaderMixin, UrlLoaderMixin):\n\n def getDisciplineOfferURL(self, code, department):\n # This method take the url of the\n # disciplines from the department code\n return BASE_URL + 'graduacao/oferta_dados.aspx?cod={}&dep={}'.format(\n code, department)\n\n def getDisciplineURL(self, code):\n # This method take the url of the\n # disciplines from the department code\n return BASE_URL + 'graduacao/disciplina.aspx?cod={}'.format(\n code)\n\n def getCredits(self, code, department):\n # This method get the credits from current disciplines\n\n response = self.getFromUrl(\n self.getDisciplineOfferURL(code, department))\n\n if response.status_code != 200:\n return\n\n # Get the pattern in html evidenced by xxx-xxx-xxx-xxx\n raw_html = BeautifulSoup(response.content, 'lxml')\n credits_th = raw_html.findAll(\n 'small', text='(Teor-Prat-Ext-Est)')\n\n if len(credits_th) == 0:\n return\n\n # Get the td respected pattern from text filtered\n credits_tr = credits_th[0].parent.parent\n discipline_credits_td = credits_tr.findAll('td')\n discipline_credits = discipline_credits_td[0].text\n\n return discipline_credits\n\n def getClassesData(self, code, department, name):\n\n response = self.getFromUrl(\n self.getDisciplineOfferURL(code, department))\n\n # Verify if the status cod is ok\n if response.status_code != 200:\n return\n\n # Make the parse for html\n # And read the table indentify in parse html\n raw_html = BeautifulSoup(response.content, 'lxml')\n\n classes_tables = raw_html.find_all(\n 'table',\n {\n 'id': 'datatable',\n }\n )\n\n if len(classes_tables) <= 0:\n return\n\n # The first element is always a table with discipline informations\n # it can be discarded before the next step\n del classes_tables[0]\n\n classes_names = []\n\n classes = []\n\n for class_table in classes_tables:\n c = ClassBuilder().buildFromHtml(raw_html=class_table,\n discipline=code, department=department)\n classes.append(c)\n classes_names.append(c.getName())\n\n print('[Discipline {}] finished with classes {}'.format(\n name, classes_names))\n\n return classes\n\n def getRequirements(self, code):\n # This method get all the requirements from the current discipline\n\n response = self.getFromUrl(self.getDisciplineURL(code))\n\n if response.status_code != 200:\n return\n\n raw_html = BeautifulSoup(response.content, 'lxml')\n\n # Search in html all the table heads with text \"Pré requisito\"\n requirements_table_row = raw_html.findAll(\n 'th', text='Pré-requisitos')[0].parent\n\n found_requirements = []\n append_next = False\n\n # Use the strong elements to guide the requirement to be\n # U or E\n for req in requirements_table_row.findAll('strong'):\n\n req = req.text.strip()\n\n if req == '' or req == 'OU':\n continue\n\n # If append next is true we get and append the current\n # requirement to list of requeriment\n if append_next:\n found_requirements[-1].append(req)\n append_next = False\n\n # If it is E, only append the current to the last element\n # from list of found requirements\n elif req == 'E':\n if type(found_requirements[-1]) is not list:\n found_requirements[-1] = [found_requirements[-1]]\n append_next = True\n # If there is no element, or is not OU or E, only\n # add it in list\n else:\n found_requirements.append(\n req\n )\n\n return found_requirements\n\n def buildDiscipline(self, code, name, department):\n\n discipline = Discipline()\n discipline.setCredits(self.getCredits(code, department))\n discipline.setClasses(self.getClassesData(code, department, name))\n discipline.setRequirements(self.getRequirements(code))\n discipline.setName(name)\n discipline.setCode(code)\n discipline.setDepartment(department)\n\n return discipline\n","sub_path":"mwscanner/builders/DisciplinesBuilder.py","file_name":"DisciplinesBuilder.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198895503","text":"from cvxopt.solvers import qp\nfrom cvxopt.base import matrix\nfrom collections import namedtuple\n\nimport matplotlib.pyplot as plt\nimport numpy, pylab, random, math\nimport kernels as k\n\nPLOT_TESTDATA=False\nPRINT_P_MATRIX=False\nPLOT_OUPUT =True\n\n\nclass DataPoint():\n\t\"\"\"container for data values\"\"\"\n\tdef __init__(self, x,y,target,alpha):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.target = target\n\t\tself.alpha = alpha\n\n\tdef __str__(self):\n\t\treturn \"x:\"+str(self.x)+\" y:\"+str(self.y)+\" target:\" + str(self.target) + \" alpha:\" + str(self.alpha)\n\n\t\t\nclass supportVectorMachine():\n\tdef __init__(self, kernel):\n\t\tself.kernel = kernel\n\n\n\tdef plotData(self,idx, data, xrange, yrange, grid, show_contour):\n\t\tclassA = []\n\t\tclassB = []\n\t\tfor d in data:\n\t\t\tif d.target == 1:\n\t\t\t\tclassA.append( (d.x,d.y) )\n\t\t\telse:\n\t\t\t\tclassB.append( (d.x,d.y) )\n\t\tprint(\"idx\",idx)\n\t\tplt.subplot(2,2,idx)\n\n\t\tplt.hold(True)\n\t\tplt.plot([p[0] for p in classA], [p[1] for p in classA], 'bo')\n\t\tplt.plot([p[0] for p in classB], [p[1] for p in classB], 'ro')\n\t\tif(show_contour):\n\t\t\tplt.contour(xrange,yrange,grid,\n\t\t\t\t\t\t(-1.0,0.0,1.0),\n\t\t\t\t\t\tcolors=('red','black','blue'),\n\t\t\t\t\t\tlinewidths=(1,3,1))\n\n\n\t\"\"\"Function to generate the i,j element in the matrix\n\ti = DataPoint for the i:th datapoint\n\tj = DataPoint for the j:th datapoint\n\tkernel = function containing a kernel function\n\n\t\"\"\"\n\tdef getPelem(self,i,j,kernel):\n\t\treturn i.target*j.target*self.kernel((i.x, i.y),(j.x, j.y))\n\n\tdef getPelemNoDP(self,xi,yi,ti, xj,yj,tj, kernel):\n\t\treturn ti*tj*self.kernel((xi,yi),(xj,yj))\n\n\t\"\"\"Function to print a 2 dimensional array \n\ttakes an array of size NxN\"\"\"\n\tdef printTwoDimensional(self,matrix):\n\t\tfor i in range(0,len(matrix)+1): print(i,\"\\t\", end=\"\")\n\t\tprint()\n\t\tfor idx, x in enumerate(matrix):\n\t\t\tprint(idx+1,\":\\t\", end=\"\")\n\t\t\tfor y in x:\n\t\t\t\tprint(\"{0:.2f}\\t\".format(y), end=\"\")\n\t\t\tprint()\n\n\tdef indicator(self,x, y,data_set):\n\t\tacc=0\n\t\tfor elem in data_set:\n\t\t\tacc+= elem.alpha * self.getPelemNoDP(x,y,1, elem.x,elem.y, elem.target, self.kernel)\n\t\treturn acc\n\n\tdef findSplittingPlane(self,Mid, data,show_contour, C=-1):\n\t\t# having a default value on C is horrible but im to lazy to make it neat for such a small program\n\t\tP_Matrix = [[0.0 for x in range(len(data))] for x in range(len(data))] \n\t\t# build p P_matrix\n\t\tfor idi, i in enumerate(data):\n\t\t\tfor idj, j in enumerate(data):\n\t\t\t\t# if i==j: is diagonal a special case?\n\t\t\t\t# \tcontinue\n\t\t\t\tP_Matrix[idi][idj] = self.getPelem(i,j,self.kernel)\n\t\tif(PRINT_P_MATRIX): self.printTwoDimensional(P_Matrix)\n\t\t# Build vectors for qp\n\t\tq_vec = [-1.0 for x in range(len(data))]\n\t\th_vec = [0.0 for x in range(len(data))]\n\t\tG_Matrix = [[ -1.0 if x==y else 0.0 for y in range(len(data))] for x in range(len(data))]\n\n\t\t# call qp\n\t\tr = qp(matrix(P_Matrix), matrix(q_vec), matrix(G_Matrix), matrix(h_vec))\n\t\talpha = list(r['x'])\n\t\talphaValues =[]\n\t\t# pick out the non zero alpha values\n\t\tfor idx,elem in enumerate(alpha):\n\t\t\tif(elem > pow(10,-5)):\n\t\t\t\tif( C!=-1 and elem <= C):\n\t\t\t\t\tcontinue\n\t\t\t\tdata[idx].alpha = elem\n\t\t\t\talphaValues.append(data[idx])\n\n\t\t# implemnt the indication function\n\t\txrange = numpy.arange(-4,4,0.05)\n\t\tyrange = numpy.arange(-4,4,0.05)\n\t\tgrid=matrix([[self.indicator(x,y, alphaValues)\n\t\t\t\t\tfor y in yrange]\n\t\t\t\t\tfor x in xrange])\n\n\t\tif(PLOT_OUPUT): self.plotData(Mid, data,xrange,yrange,grid, show_contour)","sub_path":"lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"402250928","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 21 12:23:03 2020\n\n@author: Collin Guo\n\"\"\"\n\nimport math\n\nimport random as random\n\n\nclass Rat(object):\n def __init__(self, decks, winrates, loserates, counterrates, players, initialDist):\n \"\"\"\n Initialize the chain instance.\n \n Players each have a randomly generated deck. Iterating the chain makes a random pair of players face each other.\n \n The losing player has a chance to change (typically high% for related decks, counters to winning deck, and the winning deck)\n \n As number of players approaches infinity, approaches equivalent to continuous \"powers\" of a 3-dimensional tensor (see https://www.overleaf.com/read/xhrfpsswwgpm for tensor conversions)\n \n We'll have two matrices - one for the movement chance of the losing player, based on their own deck, and one based\n on the winner's. It is assumed these are unrelated.\n \n Initialize: Randomly generate possible player ratios and the current ratio based on initialDist.\n \n Parameters\n ----------\n players: int\n Number of players\n \n decks: int\n number of decks\n \n winrates: list of lists\n n x n matrix of ints 0 to 1 giving winrate (first index chance to win vs second index).\n \n loserates: list of lists\n n x n matrix of ints 0 to 1: if you lose while playing x deck, you will switch to y deck at this rate.\n \n counterrates: list of lists\n n x n matrix of ints 0 to 1: if you lose while playing against x deck, you will switch to y deck at this rate.\n \n initialDist: list\n List of probabilities of being in a given deck.\n \n playerCount: list\n List of the number of players with a given deck (indexed as decks)\n \n \"\"\"\n self.decks = decks\n self.winrates = winrates\n self.loserates = loserates\n self.counterrates = counterrates\n self.players = players\n self.initialDist = initialDist.copy()\n self.playerCount = []\n self.bumps = 0\n for deckI in range(decks):\n self.playerCount.append(0)\n for playerI in range(players):\n deckRand = random.uniform(0,1)\n deckI = 0\n distI = 0\n while distI < decks:\n deckI += initialDist[distI]\n if deckRand < deckI:\n self.playerCount[distI] += 1\n distI = decks+1\n distI += 1\n \n def advance(self, playerrat):\n \"\"\"\n playerrat: float\n proportion of players being advanced\n \"\"\"\n self.playerrat = playerrat\n gamecount = math.floor(self.playerrat*self.players)\n tempCount = self.playerCount.copy()\n \n for gameI in range(gamecount):\n rand = random.randint(1,self.players)\n deck1 = 0\n deck2 = 0\n deck = 0\n while deck < self.decks:\n rand -= self.playerCount[deck]\n if rand <= 0:\n deck1 = deck\n deck = 1000000000000\n deck += 1\n deck = 0\n rand = random.randint(1,self.players)\n while deck < self.decks:\n rand -= self.playerCount[deck]\n if rand <= 0:\n deck2 = deck\n deck = 1000000000000\n deck += 1\n gameRoll = random.uniform(0,1)\n if gameRoll < self.winrates[deck1][deck2]:\n winDeck = deck1\n loseDeck = deck2\n else:\n winDeck = deck2\n loseDeck = deck1\n loserateRoll = random.uniform(0,1)\n counterrateRoll = random.uniform(0,1)\n loseI = 0\n rollI = 0\n counterI = 0\n flag = 0\n while rollI < self.decks:\n counterI += self.counterrates[winDeck][rollI]\n if counterrateRoll < counterI:\n if tempCount[loseDeck] > 0:\n tempCount[loseDeck] -= 1\n tempCount[rollI] += 1\n else:\n self.bumps += 1\n rollI = self.decks+1\n flag = 1\n rollI += 1\n rollI = 0\n while rollI < self.decks:\n loseI += self.loserates[loseDeck][rollI]\n if loserateRoll < loseI: \n if flag == 0:\n if tempCount[loseDeck] > 0:\n tempCount[loseDeck] -= 1\n tempCount[winDeck] += 1\n else:\n self.bumps += 1\n rollI = self.decks+1\n rollI += 1\n self.playerCount = tempCount\n \n def generate_states(self, runLength, playerrat):\n \"\"\"\n Generates states for a run of length runLength.\n \n Parameters\n ----------\n \n runLength: int\n The number of future states to generate.\n \"\"\"\n self.runLength = runLength\n self.playerrat = playerrat\n runList = []\n for i in range(runLength):\n runList.append(self.playerCount.copy())\n self.advance(playerrat)\n return runList\n \n def average(self, initialDist, runLength, runs, playerrat):\n \"\"\"\n Generates a number of runs and finds the average ratio of players over time.\n \n Parameters\n ----------\n \n runs: int\n The number of runs to generate.\n \n \"\"\"\n self.runs = runs\n self.runLength = runLength\n self.playerrat = playerrat\n \n longAverage = []\n blankState = []\n for deck in range (self.decks):\n blankState.append(0)\n for run in range(runLength):\n longAverage.append(blankState.copy())\n for run in range(runs):\n self.playerCount = []\n for deckI in range(self.decks):\n self.playerCount.append(0)\n for playerI in range(self.players):\n deckRand = random.uniform(0,1)\n deckI = 0\n distI = 0\n while distI < self.decks:\n deckI += initialDist[distI]\n if deckRand < deckI:\n self.playerCount[distI] += 1\n distI = self.decks+1\n distI += 1\n runList = self.generate_states(runLength, playerrat)\n for state in range(runLength):\n for deckRat in range(self.decks):\n longAverage[state][deckRat] += runList[state][deckRat]\n for deck in range(self.decks):\n for state in range(runLength):\n longAverage[state][deck] = longAverage[state][deck]/runs/self.players\n return(longAverage)\n \n","sub_path":"ratioRun.py","file_name":"ratioRun.py","file_ext":"py","file_size_in_byte":7095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550574520","text":"\n\"\"\"\n\tScript to read the rule-based SV-gene pairs and split these into pathogenic & non-pathogenic pairs based on the z-scores.\n\n\tWe use this mainly for plotting fig 2e.\n\n\"\"\"\n\nimport sys\nimport numpy as np\n\noutDir = sys.argv[1]\n\n#get the pairs identified with the rules, and the z-scores\nsvGenePairs = np.loadtxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_', dtype='object')\nzScores = np.loadtxt(outDir + '/tadDisruptionsZScores/zScores.txt', dtype='object')\n\n\n#also load the mutation pairs.\n#In the current zScore setup, some genes will be affected by an overlapping duplication AND CNV AMP,\n#but sometimes we find no evidence that the duplication actually functions in a non-coding way.\n#so for those genes, we need to remove the ones that have only CNV AMP and no non-coding duplication,\n#because otherwise there would be too many false positives.\nmutDir = outDir + '/patientGeneMutationPairs/'\ncnvPatientsAmp = np.load(mutDir + 'cnvPatientsAmp.npy', allow_pickle=True, encoding='latin1').item()\n\n#then also split the pairs so that we can easily check for cases without non-coding dups.\nsplitSVGenePairs = []\nfor pair in svGenePairs:\n\tsplitPair = pair[0].split('_')\n\n\tsplitSVGenePairs.append(splitPair[7] + '_' + splitPair[0] + '_' + splitPair[12])\n\npositivePairsFeatures = []\nnegativePairsFeatures = []\n\nfor pair in svGenePairs:\n\n\tsplitPair = pair[0].split('_')\n\tshortPair = splitPair[7] + '_' + splitPair[0]\n\n\tif shortPair in zScores[:,0]:\n\n\t\tzPairInfo = zScores[zScores[:,0] == shortPair][0]\n\n\t\tif float(zPairInfo[5]) > 1.5 or float(zPairInfo[5]) < -1.5:\n\t\t#if float(zPairInfo[5]) > 2 or float(zPairInfo[5]) < -2:\n\n\t\t\t#only add to the positive set if there is no CNV amp without duplication.\n\t\t\tif splitPair[0] in cnvPatientsAmp[splitPair[7]] and shortPair + '_DUP' not in splitSVGenePairs:\n\t\t\t\tnegativePairsFeatures.append(pair)\n\t\t\telse:\n\t\t\t\tpositivePairsFeatures.append(pair)\n\t\telse:\n\t\t\tnegativePairsFeatures.append(pair)\n\n\npositivePairsFeatures = np.array(positivePairsFeatures)\nnegativePairsFeatures = np.array(negativePairsFeatures)\n\nprint(positivePairsFeatures.shape)\nprint(negativePairsFeatures.shape)\n\nnp.savetxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_pathogenicPairsFeatures.txt', positivePairsFeatures, fmt='%s', delimiter='\\t')\nnp.savetxt(outDir + '/linkedSVGenePairs/nonCoding_geneSVPairs.txt_nonPathogenicPairsFeatures.txt', negativePairsFeatures, fmt='%s', delimiter='\\t')","sub_path":"src/linkSVsGenes/splitPairsPathogenicNonPathogenic.py","file_name":"splitPairsPathogenicNonPathogenic.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621560843","text":"import pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\ndf1 = pd.read_csv('./WIthoutNekara/memcached_coverage_5k_run.txt')\n\ntrace1 = go.Scatter(\n x=df1['x'],\n y=df1['y'],\n name='Without Nekara'\n)\n\ndf2 = pd.read_csv('./WithNekara/memcached_coverage_portfolio.txt')\n\ntrace2 = go.Scatter(\n x=df2['x'],\n y=df2['y'],\n name='With Nekara, Portfolio strategy'\n)\n\n\nfig = make_subplots(1,1);\nfig.update_layout(title_text=\"Memcached coverage. Hash of only slab only with just 10 deletes and 10 sets\");\n\nfig.add_trace(trace1);\nfig.add_trace(trace2);\n\nfig.show()\n","sub_path":"coyotest/NewTest4Core/OnlySlab10S10D/plot_script_portfolio.py","file_name":"plot_script_portfolio.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99118794","text":"import math\n \nmod = 1000000007\n \ndef lcm(x, y):\n return (x * y) // math.gcd(x, y)\n \nn=int(input())\n \na = list(map(int,input().split()))\n \nmemo = a[0]\n \nfor i in range(1,n):\n memo = lcm(memo,a[i])\n \nans = 0\n \nfor i in range(n):\n ans += memo*pow(a[i], mod-2, mod)%mod\n ans %= mod\n \nprint(ans)","sub_path":"Python_codes/p02793/s712104888.py","file_name":"s712104888.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"51652963","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\n\n\ndef create_linked_list(arr):\n head = None\n tail = None\n\n for ele in arr:\n newNode = Node(ele)\n\n if head is None:\n head = newNode\n tail = newNode\n\n else:\n tail.next = newNode\n tail = newNode\n\n return head\n\ndef print_linked_list(head):\n while head is not None:\n if head.next is None:\n print(head.data)\n break\n else:\n print(head.data, end = \" ->\")\n head = head.next\n\ndef remove_duplicates(head):\n head1 = head\n if head.next is None:\n return head1\n while head.next is not None:\n if head.data == head.next.data:\n head.next = head.next.next\n else:\n head = head.next\n\n\narr = [int(x) for x in input().split()]\nhead = create_linked_list(arr)\nprint(\"Initial sorted linked list\")\nprint_linked_list(head)\nremove_duplicates(head)\nprint(\"Final distinct sorted linked list\")\nprint_linked_list(head)","sub_path":"Linked list/Remove duplicates from sorted linked list.py","file_name":"Remove duplicates from sorted linked list.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"551335669","text":"class Movie:\n \"\"\" Class represent a movie\n\n Attributes:\n title: Title of the movie\n poster_image_url: Url to an Image of\n the movie\n trailer_youtube_url: youtube like to the trailer of the movie\n \"\"\"\n def __init__(self, title, poster_image_url, trailer_youtube_url):\n \"\"\" Inits Movie Instance with the minimal informations \"\"\"\n self.title = title # Title of the movie\n\n # Url to the image of the movie\n self.poster_image_url = poster_image_url\n\n # Url to a youtube trailer video\n self.trailer_youtube_url = trailer_youtube_url\n","sub_path":"classes/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"439282425","text":"\nclass Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def getData(self):\n return self.data\n\n def setData(self, data):\n self.data = data\n\n def getNext(self):\n return self.next\n\n def setNext(self, newNode):\n self.next = newNode\n\nclass UnorderedList:\n\n def __init__(self):\n self.head = None\n\t\n def __str__(self):\n \tr = \"\"\n \tcurr = self.head\n \twhile curr.getNext() != None:\n \t\tr = r + str(curr.getData()) + \"->\"\n \t\tcurr = curr.getNext()\n \tr = r + str(curr.getData())\n \treturn r\n\n def size(self):\n curr = self.head\n count = 0\n while curr != None:\n count += 1\n curr = curr.getNext()\n\n def isEmpty(self):\n return self.head == None\n\n def insert(self, data):\n newNode = Node(data)\n if not self.head:\n self.head = newNode\n else:\n newNode.setNext(self.head)\n self.head = newNode\n\n def search(self, data):\n curr = self.head\n found = False\n while curr != None and not found:\n if curr.getData() == data:\n found = True\n else:\n curr = curr.getNext()\n return found\n\n def delete(self, data):\n \tprev = None\n \tcurr = self.head\n \tfound = False\n \tif curr == None:\n \t\treturn\n \twhile curr != None and not found:\n \t\tif curr.getData() == data:\n \t\t\tfound = True\n \t\telse:\n \t\t\tprev = curr\n \t\t\tcurr = curr.getNext()\n \tif found:\n \t\tif prev == None:\n \t\t\tself.head = curr.getNext()\n \t\telse:\n \t\t\tprev.setNext(curr.getNext())\n\nmylist = UnorderedList()\nmylist.insert(31)\nmylist.insert(77)\nmylist.insert(17)\nmylist.insert(93)\nmylist.insert(26)\nmylist.insert(54)\nprint (mylist.search(17))\nprint (mylist.search(100))\nprint (mylist)\n","sub_path":"src/linkedlists.py","file_name":"linkedlists.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221531975","text":"# Copyright 2018 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nfrom pathlib import Path\nfrom typing import List, Tuple, Dict\n\nimport numpy as np\n\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.data.dataset_reader import DatasetReader\n\n\n@register('ubuntu_dstc7_mt_reader')\nclass UbuntuDSTC7MTReader(DatasetReader):\n \"\"\"\n DatasetReader for Ubuntu Dialogue Corpus Dataset (version 3), prepared for DSTC 7 competition Track 1 Subtrack 1.\n\n https://github.com/IBM/dstc7-noesis\n\n Args:\n data_path (str): A path to a folder with dataset json files.\n num_context_turns (int): A maximum number of dialogue ``context`` turns.\n num_responses (int): A number of responses for each context; default is equal to all 100 responses,\n it can be reduced to 10 (1 true response + 9 random wrong responses) to adapt with succeeding pipeline\n padding (str): \"post\" or \"pre\" context sentences padding\n \"\"\"\n\n def read(self,\n data_path: str,\n num_context_turns: int = 10,\n num_responses: int = 100,\n padding: str = \"post\",\n seed: int = 42,\n *args, **kwargs) -> Dict[str, List[Tuple[List[str], int]]]:\n\n self.num_turns = num_context_turns\n self.padding = padding\n self.num_responses = num_responses\n self.np_random = np.random.RandomState(seed)\n\n dataset = {}\n dataset[\"train\"] = self._create_dialog_iter(Path(data_path) / 'ubuntu_train_subtask_1.json', \"train\")\n dataset[\"valid\"] = self._create_dialog_iter(Path(data_path) / 'ubuntu_dev_subtask_1.json', \"valid\")\n dataset[\"test\"] = self._create_dialog_iter(Path(data_path) / 'ubuntu_test_subtask_1.json', \"test\")\n return dataset\n\n def _create_dialog_iter(self, filename, mode=\"train\"):\n \"\"\"\n Read input json file with test data and transform it to the following format:\n [\n ( [context_utt_1, ..., context_utt_10, response_utt_1, ..., response_utt_N], label ),\n ( [context_utt_1, ..., context_utt_10, response_utt_1, ..., response_utt_N], label ),\n ...\n ]\n\n where\n * [context_utt_1, ..., context_utt_10, response_utt_1, ..., response_utt_N] - list that consists of\n ``num_context_turn`` utterances, followed by ``num_responses`` responses.\n Where\n * label - label of the sample\n\n Args:\n filename (Path): filename to read\n mode (str): which dataset to return. Can be \"train\", \"valid\" or \"test\"\n\n Returns:\n list of contexts and responses with their labels. More details about the format are provided above\n \"\"\"\n data = []\n with open(filename, encoding='utf-8') as f:\n json_data = json.load(f)\n for entry in json_data:\n\n dialog = entry\n utterances = [] # all the context sentences\n for msg in dialog['messages-so-far']:\n utterances.append(msg['utterance'])\n\n true_response = \"\" # true response sentence\n if mode != \"test\":\n true_response = dialog['options-for-correct-answers'][0]['utterance']\n\n fake_responses = [] # rest (wrong) responses\n target_id = \"\"\n if mode != \"test\":\n correct_answer = dialog['options-for-correct-answers'][0]\n target_id = correct_answer['candidate-id']\n for i, utterance in enumerate(dialog['options-for-next']):\n if utterance['candidate-id'] != target_id:\n fake_responses.append(utterance['utterance'])\n\n # aligned list of context utterances\n expanded_context = self._expand_context(utterances, padding=self.padding)\n\n if mode == 'train':\n data.append((expanded_context + [true_response], 1))\n data.append(\n (expanded_context + list(self.np_random.choice(fake_responses, size=1)), 0)) # random 1 from 99\n\n elif mode == 'valid':\n # NOTE: labels are useless here...\n data.append((expanded_context + [true_response] + list(\n self.np_random.choice(fake_responses, self.num_responses - 1)), 0))\n\n elif mode == 'test':\n data.append((expanded_context + fake_responses, 0))\n\n return data\n\n def _expand_context(self, context: List[str], padding: str) -> List[str]:\n \"\"\"\n Align context length by using pre/post padding of empty sentences up to ``self.num_turns`` sentences\n or by reducing the number of context sentences to ``self.num_turns`` sentences.\n\n Args:\n context (List[str]): list of raw context sentences\n padding (str): \"post\" or \"pre\" context sentences padding\n\n Returns:\n List[str]: list of ``self.num_turns`` context sentences\n \"\"\"\n if padding == \"post\":\n sent_list = context\n res = sent_list + (self.num_turns - len(sent_list)) * \\\n [''] if len(sent_list) < self.num_turns else sent_list[:self.num_turns]\n return res\n elif padding == \"pre\":\n sent_list = context[-(self.num_turns + 1):-1]\n if len(sent_list) <= self.num_turns:\n tmp = sent_list[:]\n sent_list = [''] * (self.num_turns - len(sent_list))\n sent_list.extend(tmp)\n return sent_list\n","sub_path":"deeppavlov/dataset_readers/ubuntu_dstc7_mt_reader.py","file_name":"ubuntu_dstc7_mt_reader.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609195991","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nfrom enum import Enum\nfrom azure.core import CaseInsensitiveEnumMeta\n\n\nclass ActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"The type of the action.\"\"\"\n\n EMAIL_CONTACTS = \"EmailContacts\"\n AUTO_RENEW = \"AutoRenew\"\n\n\nclass DeletionRecoveryLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"Reflects the deletion recovery level currently in effect for keys in the current vault. If it\n contains 'Purgeable' the key can be permanently deleted by a privileged user; otherwise, only\n the system can purge the key, at the end of the retention interval.\n \"\"\"\n\n PURGEABLE = \"Purgeable\"\n \"\"\"Soft-delete is not enabled for this vault. A DELETE operation results in immediate and\n #: irreversible data loss.\"\"\"\n RECOVERABLE_PURGEABLE = \"Recoverable+Purgeable\"\n \"\"\"Soft-delete is enabled for this vault; A privileged user may trigger an immediate, irreversible\n #: deletion(purge) of a deleted entity.\"\"\"\n RECOVERABLE = \"Recoverable\"\n \"\"\"Soft-delete is enabled for this vault and purge has been disabled. A deleted entity will remain\n #: in this state until recovered, or the end of the retention interval.\"\"\"\n RECOVERABLE_PROTECTED_SUBSCRIPTION = \"Recoverable+ProtectedSubscription\"\n \"\"\"Soft-delete is enabled for this vault, and the subscription is protected against immediate\n #: deletion.\"\"\"\n\n\nclass JsonWebKeyCurveName(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"Elliptic curve name. For valid values, see JsonWebKeyCurveName.\"\"\"\n\n P256 = \"P-256\"\n \"\"\"The NIST P-256 elliptic curve, AKA SECG curve SECP256R1.\"\"\"\n P384 = \"P-384\"\n \"\"\"The NIST P-384 elliptic curve, AKA SECG curve SECP384R1.\"\"\"\n P521 = \"P-521\"\n \"\"\"The NIST P-521 elliptic curve, AKA SECG curve SECP521R1.\"\"\"\n SECP256_K1 = \"SECP256K1\"\n \"\"\"The SECG SECP256K1 elliptic curve.\"\"\"\n\n\nclass JsonWebKeyEncryptionAlgorithm(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"algorithm identifier.\"\"\"\n\n RSA_OAEP = \"RSA-OAEP\"\n RSA_OAEP256 = \"RSA-OAEP-256\"\n RSA1_5 = \"RSA1_5\"\n\n\nclass JsonWebKeyOperation(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"JSON web key operations. For more information, see JsonWebKeyOperation.\"\"\"\n\n ENCRYPT = \"encrypt\"\n DECRYPT = \"decrypt\"\n SIGN = \"sign\"\n VERIFY = \"verify\"\n WRAP_KEY = \"wrapKey\"\n UNWRAP_KEY = \"unwrapKey\"\n\n\nclass JsonWebKeySignatureAlgorithm(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"The signing/verification algorithm identifier. For more information on possible algorithm\n types, see JsonWebKeySignatureAlgorithm.\n \"\"\"\n\n PS256 = \"PS256\"\n PS384 = \"PS384\"\n PS512 = \"PS512\"\n RS256 = \"RS256\"\n RS384 = \"RS384\"\n RS512 = \"RS512\"\n RSNULL = \"RSNULL\"\n ES256 = \"ES256\"\n ES384 = \"ES384\"\n ES512 = \"ES512\"\n ECDSA256 = \"ECDSA256\"\n\n\nclass JsonWebKeyType(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"JsonWebKey key type (kty).\"\"\"\n\n EC = \"EC\"\n EC_HSM = \"EC-HSM\"\n RSA = \"RSA\"\n RSA_HSM = \"RSA-HSM\"\n OCT = \"oct\"\n\n\nclass KeyUsageType(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"KeyUsageType.\"\"\"\n\n DIGITAL_SIGNATURE = \"digitalSignature\"\n NON_REPUDIATION = \"nonRepudiation\"\n KEY_ENCIPHERMENT = \"keyEncipherment\"\n DATA_ENCIPHERMENT = \"dataEncipherment\"\n KEY_AGREEMENT = \"keyAgreement\"\n KEY_CERT_SIGN = \"keyCertSign\"\n C_RL_SIGN = \"cRLSign\"\n ENCIPHER_ONLY = \"encipherOnly\"\n DECIPHER_ONLY = \"decipherOnly\"\n","sub_path":"sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_generated/v2016_10_01/models/_key_vault_client_enums.py","file_name":"_key_vault_client_enums.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498125655","text":"#coding=utf-8\nfrom libs.db.mongodb import db\nfrom datetime import datetime\n\n\nfrom libs.unit.tools import Tools\nfrom libs.rongcloud.RongCloudUser import rc_user\nimport time\n\n\n#用户实体类\n\nclass Users(object):\n def __init__(self):\n self.collection= db['User']\n\n def getOne(self, json):\n filter = {\"token\": json[\"token\"]}\n result = list(self.collection.find(filter).limit(1))\n if len(result):\n res = rc_user.register_user(result[0][\"token\"], result[0][\"username\"], result[0][\"portrait\"])\n\n if res['code'] == 200:\n result[0][\"_id\"] = \"\"\n result[0][\"im_token\"] = res[\"token\"]\n\n return {\"status\": 1, \"msg\": \"success\", \"result\": result}\n else:\n return {\"status\": -1, \"msg\": \"没有找到该信息\"}\n\n\n\n def register(self, json):\n\n is_found = list(self.collection.find({\"username\": json[\"username\"]}).limit(1))\n\n if len(is_found) > 0 :\n return {\"status\": -1, \"msg\": \"用户名已存在\"}\n else:\n is_found = list(self.collection.find({\"mobile\": json[\"mobile\"]}).limit(1))\n if len(is_found) > 0:\n return {\"status\": -2, \"msg\": \"用户电话已存在\"}\n else:\n\n data = {\n \"username\": json['username'],\n \"password\": Tools.md5Encode(json['password']),\n \"create_date\": time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),\n \"state\" : json[\"state\"],\n \"mobile\": json[\"mobile\"],\n \"token\": Tools.create_token(),\n #头像链接\n \"portrait\": json[\"portrait\"]\n }\n\n res = rc_user.register_user(data['token'], data['username'], data['portrait'])\n\n if res['code'] == 200:\n aid = self.collection.insert(data)\n if aid:\n return {\"status\": 1, \"msg\": \"success\", 'res': res}\n else:\n return {\"status\": 0, \"msg\": \"新增数据失败\"}\n else:\n return {\"status\": -1, \"msg\": \"在融云增加用户失败\"}\n\n def edit(self, json):\n\n where={\"token\": json[\"token\"]}\n\n data={\"$set\": {\n \"state\": json[\"state\"],\n \"update_date\": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),\n \"portrait\": json['portrait']\n }\n }\n\n if json[\"password\"] != '':\n data[\"$set\"][\"password\"] = Tools.md5Encode(json[\"password\"])\n\n is_found = list(self.collection.find({\"$or\": [\n {\"$and\": [{\"username\": json[\"username\"]}, {\"token\": json[\"token\"]}]},\n {\"$and\": [{\"mobile\": json[\"mobile\"]}, {\"token\": json[\"token\"]}]},\n ]}).limit(1))\n\n if len(is_found) == 0:\n is_user = list(self.collection.find({\"username\": json[\"username\"]}).limit(1))\n is_mobile = list(self.collection.find({\"mobile\": json[\"mobile\"]}).limit(1))\n\n if len(is_user):\n return {\"status\": -3, \"msg\": \"用户名已存在\"}\n else:\n data['$set']['username'] = json['username']\n if len(is_mobile):\n return {\"status\": -4, \"msg\": \"手机号码已存在\"}\n else:\n data['$set']['mobile'] = json['mobile']\n\n res = rc_user.update_user(json['token'], json['username'], json['portrait'])\n\n if res['code'] == 200:\n\n uid = self.collection.update(where, data)\n\n if uid:\n return {\"status\": 1, \"msg\": \"success\", \"res\": res}\n else:\n return {\"status\": -1, \"msg\": \"修改失败\"}\n else:\n return {\"status\": -2, \"msg\": \"修改融云信息失败\"}\n\n # 删除数据\n def remove(self, data):\n if ',' in data:\n temp_arr = data['token'].split(',')\n else:\n temp_arr = [data['token']]\n\n did = 0\n for i in temp_arr:\n res = self.collection.remove({'token': str(i)})\n if res:\n did += 1\n\n if did > 0:\n return {\"status\": 1, \"msg\": \"success\"}\n else:\n return {\"status\": -1, \"msg\": \"没有找到该信息\"}\n\n def getList(self, condition='', page=1, rows=10, date_from='', date_to=''):\n filter={}\n if condition != '':\n filter[\"$or\"] = [{\"username\": {\"$regex\": \"condition\"}}, {\"id\": {\"$regex\": \"condition\"}}]\n\n if date_from != '' and date_to != '':\n filter[\"create_date\"] = {\"$gte\": date_from, \"$lte\": date_to}\n elif date_from != '':\n filter[\"create_date\"] = {\"$gte\": date_from}\n elif date_to != '':\n filter[\"create_date\"] = {\"$lte\": date_to}\n\n result = list(self.collection.find(filter).sort(\"create_date\", -1).skip(int(rows)*(int(page)-1) if int(page)>1 else 0).limit(int(rows)))\n count = self.collection.find(filter).count()\n if len(result):\n for item in result:\n item[\"_id\"] = \"\"\n if \"phone_token\" in item:\n phone = list(db[\"Phone\"].find({\"token\": item[\"phone_token\"]}).limit(1))\n if len(phone) > 0:\n item[\"phone_token\"] = phone[0][\"info\"]\n else:\n item[\"phone_token\"] = \"\"\n\n return {\"total\": count, \"rows\": result}\n\n\n\n\n\n\nusers=Users()","sub_path":"Projects/芮爸&芮妈/server/models/admin/users_model.py","file_name":"users_model.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245482262","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@author: jvbo\n@contact: email(programmer.jv.bo@gmail.com)\n@time: 2019/4/30\n\"\"\"\n\n\"\"\"\n有道云笔记导出,后续上传github\n\"\"\"\nimport requests\nimport time\nimport re\nimport os\nimport shutil\nimport hashlib\n\n# 导出文件加\nHEXO_DIR = \"F:\\software\\jvbo\\ynote_xuehaibotic_github\"\n\n# 登录有道云笔记后存储在 Cookie 里的值\nYNOTE_PERS = 'v2|urstoken||YNOTE||web||-1||1498882067319||116.226.216.207||go_songs@163.com||k5RfzEOMqFRPy0Hq4hLzG0gBnfY5PMgL06ZhMz5nH64RQyOMYWnHqS0YM64eBkM6B0e4nfq4nMUf0QB6LJK6LzWR'\nYNOTE_SESS = 'v2|8r8rH29TFWpFhHgZ6MeL0wFPLPS6LgyRwLkfzfOMY50kf0LqFkMzERlEnfzWk4gLRUY0Lwz64pz0qLhLPFP4quRlY6LPz6MU50'\nYNOTE_LOGIN = '3||1498882067334'\nCSTK = 'Jukb45Yq'\n\nHEADERS = {\n 'Accept-Encoding':\n 'gzip, deflate, br',\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Cookie':\n 'YNOTE_PERS={YNOTE_PERS}; YNOTE_SESS={YNOTE_SESS}; YNOTE_LOGIN={YNOTE_LOGIN}; YNOTE_CSTK={YNOTE_CSTK}'.\n format(\n YNOTE_PERS=YNOTE_PERS,\n YNOTE_SESS=YNOTE_SESS,\n YNOTE_LOGIN=YNOTE_LOGIN,\n YNOTE_CSTK=CSTK),\n 'Accept':\n 'application/json, text/plain, */*',\n 'Host':\n 'note.youdao.com',\n 'Origin':\n 'https://note.youdao.com',\n 'Referer':\n 'https://note.youdao.com/web/',\n 'Content-Type':\n 'application/x-www-form-urlencoded;charset=UTF-8'\n}\n\n\n# 获取所有的笔记本\ndef get_all_books():\n data = {'path': '/', 'dirOnly': True, 'f': True, 'cstk': CSTK}\n url = 'https://note.youdao.com/yws/api/personal/file?method=listEntireByParentPath&cstk={CSTK}&keyfrom=web'.format(\n CSTK=CSTK)\n res = requests.post(url, data=data, headers=HEADERS)\n if res.status_code == 200:\n resJson = res.json()\n books = []\n for i in resJson:\n # _私密, _开头的笔记本认为是私密笔记, 跳过\n if i['fileEntry']['name'][0] != '_':\n books.append({\n 'name': i['fileEntry']['name'],\n 'id': i['fileEntry']['id']\n })\n return books\n else:\n exit('get_all_books')\n\n\n# 根据笔记本下的笔记\ndef get_all_notes(book):\n url = 'https://note.youdao.com/yws/api/personal/file/{id}?all=true&cstk={CSTK}&f=true&isReverse=false&keyfrom=web&len=30&method=listPageByParentId&sort=1'.format(\n id=book['id'], CSTK=CSTK)\n res = requests.get(url, headers=HEADERS)\n if res.status_code == 200:\n resJson = res.json()\n notes = []\n for i in resJson['entries']:\n\n # 选出后缀名为md的文件\n if i['fileEntry']['name'][-2:] == 'md' and i['fileEntry']['name'][0] != '_':\n notes.append({\n 'name':\n i['fileEntry']['name'],\n 'id':\n i['fileEntry']['id'],\n 'createTime':\n i['fileEntry']['createTimeForSort'],\n 'modifyTime':\n i['fileEntry']['modifyTimeForSort'],\n 'tag':\n book['name']\n })\n return notes\n else:\n exit('get_all_notes')\n\n\n# 根据笔记信息获取笔记内容\ndef get_note_detail(note):\n url = 'https://note.youdao.com/yws/api/personal/file/{id}?method=download&read=true&cstk={CSTK}'.format(\n id=note['id'], CSTK=CSTK)\n res = requests.get(url, headers=HEADERS)\n if res.status_code:\n resCon = res.content\n\n time = ''\n if note['modifyTime']: # 优先选用修改时间\n time = parse_ts(note['modifyTime'])\n else:\n time = parse_ts(note['createTime'])\n\n detail = {\n 'name': filter_mark(note['name']),\n 'time': time,\n 'content': resCon,\n 'tag': note['tag']\n }\n return detail\n else:\n exit('get_note_detail')\n\n\n# 写入文档\ndef write_md(detail):\n print('写入: {name}'.format(name=detail['name']))\n with open('note_xuehaibotic_github/' + detail['name'], 'w') as f:\n f.write('---\\n')\n f.write('title: {title}\\n'.format(title=detail['name'][:-3]))\n f.write('date: {data}\\n'.format(data=detail['time']))\n f.write('tags: {tag}\\n'.format(tag=detail['tag']))\n f.write('---\\n\\n\\n')\n f.write(detail['content'])\n f.write('\\n')\n\n\n# 将10位时间戳转为 2017-06-29 10:00:00 的格式\ndef parse_ts(ts):\n timeArr = time.localtime(ts)\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArr)\n\n\n# 过滤特殊字符, 移除原有后缀后重新添加.md\ndef filter_mark(s):\n # s = s.decode(\"utf8\")\n res = re.sub(\n \"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()()]+\".decode(\"utf8\"),\n \"\".decode(\"utf8\"), s)\n res = res.replace(' ', '')\n return res[:-2] + '.md'\n\n\n# MD5 加密\ndef md5(str):\n md5 = hashlib.md5()\n md5.update(str)\n return md5.hexdigest()\n\n\n# 退出程序\ndef exit(why):\n print('{why} 出错了'.format(why=why))\n os._exit(0)\n\n\ndef deploy_github():\n ## TODO\n pass\n\n\ndef deal():\n if os.path.exists('note_xuehaibotic_github'):\n shutil.rmtree(r'note_xuehaibotic_github')\n os.mkdir(r'note_xuehaibotic_github')\n\n books = get_all_books()\n for i in books:\n notes = get_all_notes(i)\n for j in notes:\n detail = get_note_detail(j)\n write_md(detail)\n deploy_github()\n\n\nif __name__ == '__main__':\n deal()\n","sub_path":"crawler/ynote2github/ynote2github.py","file_name":"ynote2github.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220335529","text":"## for APIs instructions see http://ec.europa.eu/eurostat/web/json-and-unicode-web-services ###\n\nimport urllib.request, urllib.parse, urllib.error\nimport json\n\n\nserviceurl = 'http://ec.europa.eu/eurostat/wdds/rest/data/v2.1/json/en/'\ndataset = \"cens_11ag_r3?\"\ngeo = \"geo=\" + input('Geo (UK, FR, ...): ')\nsex = \"&sex=\" + input('Gender (T,M,F): ')\nage = \"&age=Y\" + input('Age(18,19...): ')\nrest_of_url = \"&precision=1&unit=NR&time=2011&unitLabel=label\"\n\n\nurl = serviceurl + dataset + geo + sex + age + rest_of_url\n\n\nuh = urllib.request.urlopen(url)\ndata = uh.read().decode()\n\njs = json.loads(data)\n\nvalues = js['value']\n \nfor key,value in values.items():\n print(\"Result: \"+ str(value))\n","sub_path":"eurostats.py","file_name":"eurostats.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609545693","text":"# Time of long requests\nfrom socket import *\nimport time\n\n\nsock = socket(AF_INET, SOCK_STREAM)\nsock.connect(('localhost', 25000))\n\nwhile True:\n try:\n start = time.time()\n sock.send(b'30')\n resp = sock.recv(100)\n end = time.time()\n print(end - start)\n except KeyboardInterrupt:\n break\n\n\"\"\"\nSingle thread\n 0.204999923706\n 0.228000164032\n 0.189999818802\n 0.223000049591\n 0.181999921799\n 0.176000118256\n 0.207999944687\n 0.167000055313\n--------------\nTwo threads\n 1.37599992752\n 1.29200005531\n ..\n--------------\nIf there is a big query (fib(50)) it will affect all other running threads,\nbecause the GIL doesn't give priority to short tasks (like interactive ones)\nIf we separate the smaller tasks and larger tasks to different processes,\nthe OS gives priority to the shorter ones, so it barley affects the amount of queries/sec\nthe webserver can consume. (shown in the video ~12min)\n\"\"\"\n","sub_path":"lectures/concurrency_ground_up/perf1.py","file_name":"perf1.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"538924291","text":"#!/usr/bin/env python\r\nimport os\r\nimport binascii\r\nimport datetime\r\nimport hashlib\r\nimport random\r\nimport copy\r\nimport ast\r\nimport sys\r\nfrom flask import Flask, request, json, Response, render_template\r\nfrom werkzeug.utils import secure_filename\r\napp = Flask(__name__)\r\n# ----------------------- BLOCKCHAIN CLASS ---------------------------------- #\r\nclass Blockchain:\r\n\r\n def __init__(self):\r\n self.chain = []\r\n self.difficulty = 3\r\n self.wallets = {}\r\n self.mempool = {}\r\n self.state = 0\r\n self.add()\r\n NULL_WALLET = {\r\n 'public_key': 'f1f91c30722f64de1c004423c091ce33',\r\n 'balance': 0.0,\r\n }\r\n self.wallets[NULL_WALLET['public_key']] = NULL_WALLET\r\n\r\n###################### ADD CODE ONLY BETWEEN THESE LINES! #####################\r\n #This wallet is here to accept tokens contracts being uploaded, it has no private key because it cannot send tokens\r\n # any tokens sent here will be essentially gone forever\r\n\r\n \r\n def create_transaction(self, from_, to, amount, private_key, message=None):\r\n if not self._validate_transaction(from_, to, amount, private_key):\r\n return {'error': 'invalid transaction'}\r\n\r\n transaction = {\r\n 'time': datetime.datetime.utcnow().timestamp(),\r\n 'from': from_,\r\n 'to': to,\r\n 'amount': float(amount),\r\n 'message': {},\r\n }\r\n\r\n transaction_id = self._hash_data(transaction)\r\n self.mempool[transaction_id] = transaction\r\n\r\n return {transaction_id: transaction}\r\n\r\n def create_wallet(self, contract_=None, priv=None, pub=None):\r\n if contract_==None:\r\n wallet = {\r\n 'public_key': binascii.b2a_hex(os.urandom(16)).decode('utf-8'),\r\n 'private_key': binascii.b2a_hex(os.urandom(16)).decode('utf-8'),\r\n 'balance': 10.0,\r\n }\r\n self.wallets[wallet['public_key']] = wallet\r\n return wallet\r\n elif contract_ is not None:\r\n wallet = {\r\n 'public_key': binascii.b2a_hex(os.urandom(16)).decode('utf-8'),\r\n 'contract_code': contract_,\r\n }\r\n self.wallets[wallet['public_key']] = wallet \r\n print(contract_)\r\n gas_price = self._calculate_gas(contract_)\r\n self.create_transaction(pub, 'f1f91c30722f64de1c004423c091ce33', gas_price, priv, contract_)\r\n return wallet \r\n\r\n\r\n def _validate_transaction(self, from_, to, amount, private_key):\r\n\r\n # Check that values actually exist\r\n if not from_ or not to or not amount or not private_key:\r\n return False\r\n\r\n # Check that addresses exist and are not the same\r\n if from_ not in self.wallets.keys() \\\r\n or to not in self.wallets.keys() \\\r\n or from_ == to:\r\n return False\r\n\r\n # Check that transaction generator is owner\r\n if not private_key == self.wallets[from_]['private_key']:\r\n return False\r\n\r\n # Check that amount is float or int\r\n try:\r\n amount = float(amount)\r\n except ValueError:\r\n return False\r\n\r\n # Check amount is valid and spendable\r\n if not amount > 0 \\\r\n or not amount <= self.wallets[from_]['balance']:\r\n return False\r\n\r\n return True\r\n\r\n def _choose_transactions_from_mempool(self, block_num):\r\n processed_transactions = {}\r\n contract_states = {}\r\n while len(processed_transactions) < 10 and len(self.mempool) > 0:\r\n transaction_id = random.choice(list(self.mempool))\r\n transaction = copy.deepcopy(self.mempool[transaction_id])\r\n if type(transaction['message']) is dict:\r\n prev_block = self.chain[block_num -1]\r\n contract_code = self.wallets[transaction['to']]['contract_code']\r\n contract_state = prev_block['contract_states']\r\n try:\r\n state = contract_state[self.wallets[transaction['to']]]\r\n except:\r\n state = 0\r\n sys.argv = [state]\r\n contract_states[transaction['to']] = \\\r\n exec(self.wallets[transaction['to']]['contract_code'])\r\n transaction['to'] = 'f1f91c30722f64de1c004423c091ce33'\r\n if transaction['amount'] <= self.wallets[transaction['from']]['balance']:\r\n self.wallets[transaction['from']]['balance'] -= transaction['amount']\r\n self.wallets[transaction['to']]['balance'] += transaction['amount']\r\n processed_transactions[transaction_id] = transaction\r\n del sys.argv\r\n del self.mempool[transaction_id]\r\n return processed_transactions, contract_states\r\n\r\n def _calculate_merkle_root(self, transactions):\r\n\r\n if len(transactions) == 0:\r\n return None\r\n\r\n if len(transactions) == 1:\r\n return transactions[0]\r\n\r\n new_transactions = []\r\n\r\n for i in range(0, len(transactions), 2):\r\n\r\n if len(transactions) > (i+1):\r\n new_transactions.append(\r\n self._hash_data(transactions[i] + transactions[i+1])\r\n )\r\n else:\r\n new_transactions.append(transactions[i])\r\n\r\n return self._calculate_merkle_root(new_transactions)\r\n\r\n def _calculate_state_merkle_root(self, contracts):\r\n \r\n if len(contracts) == 0:\r\n return None\r\n\r\n if len(contracts) == 1:\r\n return contracts[0]\r\n\r\n new_contracts = []\r\n\r\n for i in range(0, len(contracts), 2):\r\n\r\n if len(contracts) > (i+1):\r\n new_contracts.append(\r\n self._hash_data(contracts[i] + contracts[i+1])\r\n )\r\n else:\r\n new_contracts.append(contracts[i])\r\n\r\n return self._calculate_state_merkle_root(new_contracts) \r\n\r\n def _check_merkle_root(self, block):\r\n return self._calculate_merkle_root(list(block['transactions'])) \\\r\n == block['header']['merkle_root']\r\n\r\n def _calculate_gas(self, message):\r\n gas_price = sys.getsizeof(message) * 0.001\r\n return gas_price\r\n###############################################################################\r\n\r\n @property\r\n def length(self):\r\n return len(self.chain)\r\n\r\n def add(self):\r\n block = self._create_block()\r\n return self._mine_block(block)\r\n\r\n def check(self):\r\n\r\n results = []\r\n\r\n for block in reversed(self.chain):\r\n\r\n block_number = block['header']['number']\r\n\r\n if not block['hash'] == self._hash_data(block['header']):\r\n results.append(f'block-{block_number}: invalid hash')\r\n\r\n if block_number > 0:\r\n\r\n previous_block = self.chain[block_number - 1]\r\n\r\n if not block['header']['previous_block'] == previous_block['hash']:\r\n results.append(f'block-{block_number}: invalid block pointer')\r\n\r\n if not self._check_merkle_root(block):\r\n results.append(f'block-{block_number}: invalid merkle root')\r\n\r\n return \"ok\" if not results else results\r\n\r\n def _create_block(self):\r\n self.state = self.state + 1\r\n return {\r\n 'header': {\r\n 'number': len(self.chain),\r\n 'time': datetime.datetime.utcnow().timestamp(),\r\n 'nonce': None,\r\n 'previous_block': self._get_last_block_hash(),\r\n 'merkle_root': None,\r\n 'statemerkle': None,\r\n },\r\n 'transactions': {},\r\n 'contract_states': {self.state},\r\n 'hash': None\r\n }\r\n def _get_last_block_hash(self):\r\n return self.chain[-1]['hash'] if len(self.chain) > 0 else None\r\n \r\n def _mine_block(self, block):\r\n block['transactions'], block['contract_states'] = self._choose_transactions_from_mempool(block['header']['number'])\r\n block['header']['merkle_root'] = \\\r\n self._calculate_merkle_root(list(block['transactions']))\r\n #print(list(block['contract_states']))\r\n block['header']['statemerkle'] = \\\r\n self._calculate_state_merkle_root(list(block['contract_states']))\r\n #print(block['header']['statemerkle'])\r\n print(\"block['header']['statemerkle']: \", block['header']['statemerkle'])\r\n while True:\r\n block['header']['nonce'] = binascii.b2a_hex(os.urandom(16)).decode('utf-8')\r\n block['hash'] = self._hash_data(block['header'])\r\n if block['hash'][:self.difficulty] == '0' * self.difficulty:\r\n break\r\n self.chain.append(block)\r\n return block\r\n\r\n def _hash_data(self, data):\r\n\r\n hashId = hashlib.sha256()\r\n\r\n if isinstance(data, dict):\r\n hashId.update(repr(data).encode('utf-8'))\r\n return self._hash_data(str(hashId.hexdigest()))\r\n else:\r\n hashId.update(data.encode('utf-8'))\r\n return str(hashId.hexdigest())\r\n\r\n\r\n# ------------------------------ FLASK ROUTES ------------------------------- #\r\n\r\n@app.route('/api/blockchain', methods=['GET'])\r\ndef get_blockchain_info():\r\n return Response(\r\n response=json.dumps({\r\n 'length': blockchain.length,\r\n 'difficulty': blockchain.difficulty,\r\n 'validity': blockchain.check(),\r\n }),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n@app.route('/api/blockchain/block/', methods=['GET'])\r\ndef get_block(number):\r\n return Response(\r\n response=json.dumps(\r\n blockchain.chain[number] if number < len(blockchain.chain) else None\r\n ),\r\n status=200, mimetype='application/json'\r\n )\r\n@app.route('/api/blockchain/block', methods=['GET'])\r\ndef get_all_blocks():\r\n return Response(\r\n response=json.dumps(blockchain.chain),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n@app.route('/api/blockchain/block', methods=['POST'])\r\ndef add_block():\r\n return Response(\r\n response=json.dumps(blockchain.add()),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n\r\nALLOWED_EXTENSIONS = {'.py', '.txt'}\r\n\r\ndef allowed_file(filename):\r\n foo = '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\r\n if foo == False:\r\n return \"False\"\r\n else:\r\n return \"True\"\r\n@app.route('/api/blockchain/wallet', methods=['GET', 'POST'])\r\ndef add_wallet():\r\n if request.method == 'GET':\r\n return render_template('addwallet.html')\r\n elif request.method == 'POST':\r\n if 'file' not in request.files:\r\n return Response(\r\n response=json.dumps(blockchain.create_wallet()),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n else:\r\n file_ = request.files['file']\r\n priv_key = request.form['priv_key']\r\n pub_key = request.form['pub_key']\r\n if file_.filename == '':\r\n return Response(\r\n response=json.dumps(blockchain.create_wallet()),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n elif file_ and allowed_file(file_.filename):\r\n return Response(\r\n response=json.dumps(blockchain.create_wallet(contract_=file_.read(), priv=priv_key, pub=pub_key)),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n@app.route('/api/blockchain/balances', methods=['GET'])\r\ndef get_wallet_balances():\r\n return Response(\r\n response=json.dumps(\r\n {key: blockchain.wallets[key]['balance']\r\n for key in blockchain.wallets.keys()}\r\n ),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n@app.route('/api/blockchain/transaction', methods=['POST'])\r\ndef add_transaction():\r\n if not all(k in request.form for k in ['from', 'to', 'amount', 'private_key']):\r\n return Response(\r\n response=json.dumps({'error': 'missing required parameter(s)'}),\r\n status=400,\r\n mimetype='application/json'\r\n )\r\n\r\n return Response(\r\n response=json.dumps(\r\n blockchain.create_transaction(\r\n request.form['from'],\r\n request.form['to'],\r\n request.form['amount'],\r\n request.form['private_key']\r\n )\r\n ),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n@app.route('/api/blockchain/mempool', methods=['GET'])\r\ndef get_mempool():\r\n return Response(\r\n response=json.dumps(blockchain.mempool),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\nif __name__ == '__main__':\r\n blockchain = Blockchain()\r\n app.run(host='127.0.0.1', port=8080, debug=1)","sub_path":"Group-2-Final-Project_bak.py","file_name":"Group-2-Final-Project_bak.py","file_ext":"py","file_size_in_byte":13147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"481129019","text":"from django.shortcuts import render, HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom question.models import Question\nfrom answer.models import Answer\nfrom topic.models import Topic\nfrom django.core import serializers\nfrom userprofile.models import UserProfile\nfrom django.db.models import Count\nfrom django.conf import settings\nfrom utils.qa_attributes import utils_is_answer, utils_get_yo_info\n\n\nFEED_COUNT = 100\nMAX_TOPICS = 10\n\n\n@login_required\ndef Home(request):\n topics = Topic.objects.annotate(\n follower_count=Count('followers')).order_by('-follower_count')[:MAX_TOPICS]\n user_topics = request.user.topic_followers.all()\n return render(request, 'home/home.html', {'topics': topics,\n 'user_topics': user_topics})\n\n\n@login_required\ndef Exams(request):\n return render(request, 'home/exams.html')\n\n\n@login_required\ndef LatestQA(request):\n user = request.user\n latest_questions = Question.objects.all().order_by('-time')[:FEED_COUNT]\n latest_answers = Answer.objects.all().order_by(\n '-time')[:FEED_COUNT].prefetch_related('question_url')\n latest_qa = list(latest_questions) + list(latest_answers)\n latest_qa.sort(key=lambda x: x.time, reverse=True)\n\n yo_list, yo_count_list = utils_get_yo_info(latest_qa, user)\n latest_qa_with_yos = zip(latest_qa, yo_list, yo_count_list)\n\n return render(request, 'home/latestqa.html', {\n 'latest_qa_with_yos': latest_qa_with_yos,\n 'user': user,\n 'domain': settings.DOMAIN_NAME})\n\n\n@login_required\ndef TopicsYouLike(request):\n topics = request.user.topic_followers.all()\n topic_questions = list(set(Question.objects.filter(\n topics__in=topics).order_by('-time')[:FEED_COUNT]))\n topic_questions.sort(key=lambda x: x.time, reverse=True)\n\n yo_list, yo_count_list = utils_get_yo_info(topic_questions, request.user)\n topic_questions_with_yos = zip(topic_questions, yo_list, yo_count_list)\n\n return render(request, 'home/topicsyoulike.html',\n {'topic_questions_with_yos': topic_questions_with_yos})\n\n\n@login_required\ndef PeopleYouFollow(request):\n following = request.user.following.all()\n answers = list(set(Answer.objects.filter(\n answered_by__in=following).order_by('-time')[:FEED_COUNT]))\n answers.sort(key=lambda x: x.time, reverse=True)\n\n yo_list, yo_count_list = utils_get_yo_info(answers, request.user)\n answers_with_yos = zip(answers, yo_list, yo_count_list)\n\n return render(request, 'home/peopleyoufollow.html',\n {'answers_with_yos': answers_with_yos})\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"199273824","text":"# converts grams to cups, tbsp, and tsp of a dry ingredient\n# conversion depends on whether or not it's: flour, sugar, powdered sugar,\n# cornstartch, cocoa powder, baking powder, or baking soda\n# returns those three conversions\n# grams: amount in grams given\n# ing: the ingredient\ndef convertDry(grams, ing):\n \n grams = int(grams) # gotta do this for some reason\n \n if ing == 'flour': # 1tbsp = 10g\n \n tbsp = grams / 10\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups:\", cup)\n print(\"Tbsp:\", tbsp)\n print(\"Tsp:\", tsp)\n \n elif ing == 'brown sugar': # 1tbsp = 16g\n \n tbsp = grams / 16\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'sugar': # 1tbsp = 14g\n \n tbsp = grams / 14\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp) \n \n elif ing == 'powdered sugar': # 1tbsp = 10g\n \n tbsp = grams / 10\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'cornstarch': # 1tbsp = 7g\n \n tbsp = grams / 7\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'cocoa powder': # 1tbsp = 6g\n \n tbsp = grams / 6\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'baking powder': # 1tbsp = 12g\n \n tbsp = grams / 12\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'baking soda': # 1tbsp = 18g\n \n tbsp = grams / 18\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n\n# converts grams to cups, tbsp, and tsp of a dry ingredient\n# conversion depends on whether or not it's: oil, milk, butter, heavy whipping cream, buttermilk, or water\n# returns those three conversions\n# grams: amount in grams given\n# ing: the ingredient\ndef convertWet(grams, ing):\n \n grams = int(grams)\n \n if ing == 'oil': # 1tbsp = 13g\n \n tbsp = grams / 13\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'milk': # 1tbsp = 15g\n \n tbsp = grams / 15\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'butter': # 1tbsp = 14g\n \n tbsp = grams / 14\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'water': # 1tbsp = 15g\n \n tbsp = grams / 15\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'heavy whipping cream': # 1tbsp = 14.44g\n \n tbsp = grams / 14.44\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n elif ing == 'buttermilk': # 1tbsp = 15.1g\n\n tbsp = grams / 15.1\n tsp = tbsp / 3\n cup = tbsp / 16\n \n print(\"Cups: \", cup)\n print(\"Tbsp: \", tbsp)\n print(\"Tsp: \", tsp)\n \n \n \n# main function\nif __name__ == '__main__':\n \n print(\"Grams to Cups/Tablespoon/Teaspoon Converter\")\n print(\"Can take dry or wet ingredients!\")\n \n keepConverting = True\n \n # while there's user input\n while keepConverting is True:\n \n grams = input(\"Enter the amount of grams to convert (just the number): \")\n \n # gets whether or not it's a dry or wet ingridient\n dw = input(\"Is it a wet or dry ingredient? (dry/wet): \")\n \n if dw == 'dry':\n \n print(\"Available conversions for dry ingredients: \")\n print(\"flour, brown sugar, sugar, powdered sugar, cornstarch, cocoa powder, baking powder, baking soda\")\n ing = input(\"Which ingredient will be converted? (enter the ingredient like how it's written on here): \")\n \n convertDry(grams, ing)\n \n else:\n \n print(\"Available conversions for wet ingredients: \")\n print(\"oil, water, milk, butter, heavy whipping cream, buttermilk\")\n ing = input(\"Which ingredient will be converted? (enter the ingredient like how it's written on here): \")\n\n \n convertWet(grams, ing)\n \n \n cont = input(\"Would you like to convert something else? (y/n): \")\n \n if cont == 'n': # exit out of loop\n keepConverting = False\n \n \n \n ","sub_path":"conversions.py","file_name":"conversions.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461669241","text":"import math\nfound = 0\nc = 998\nwhile not found:\n c-=1\n a=1\n b=1000-c-a\n if b > c:\n b = c-1\n a = 1000- b - c\n while a < b:\n if math.pow(a,2) + math.pow(b,2) == math.pow(c, 2):\n found = 1\n break\n else:\n a+=1\n b-=1\n \nprint(a, b, c)\n","sub_path":"euler9.py","file_name":"euler9.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74347434","text":"#!/usr/bin/env python3\n\n'''A unit fraction contains 1 in the numerator. The decimal representation of the unit fractions with denominators 2 to 10 are given:\n\n1/2= 0.5\n1/3= 0.(3)\n1/4= 0.25\n1/5= 0.2\n1/6= 0.1(6)\n1/7= 0.(142857)\n1/8= 0.125\n1/9= 0.(1)\n1/10= 0.1\nWhere 0.1(6) means 0.166666..., and has a 1-digit recurring cycle. It can be seen that 1/7 has a 6-digit recurring cycle.\n\nFind the value of d < 1000 for which 1/d contains the longest recurring cycle in its decimal fraction part.'''\n\n# each unique remainder implies a unique digit\n\nlongest = (0, 0) # digit, length\n\nfor i in range(2, 1000) :\n curr = 1\n remains = list()\n new_rem = curr % i\n while new_rem not in remains :\n remains.append(new_rem)\n curr = new_rem * 10\n new_rem = curr % i\n if len(remains) > longest[1] :\n longest = (i, len(remains))\n\nprint(longest[0])\n","sub_path":"python/p26.py","file_name":"p26.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3452343","text":"from selenium import webdriver\nimport time\n# 구글지도(https://www.google.com/maps/)에 카페를 검색해서 검색된 카페들이 이름, 평점, 주소 수집\n\n# 크롬창(웹드라이버) 열기\ndriver = webdriver.Chrome(\"./chromedriver\")\n\n# 구글 지도 접속\ndriver.get(\"https://www.google.com/maps/\")\n\n# 검색창에 \"카페\" 입력\nsearchbox = driver.find_element_by_css_selector(\"input#searchboxinput\")\nsearchbox.send_keys(\"카페\")\n\n# 검색버튼 누르기\nsearchbutton = driver.find_element_by_css_selector(\"button#searchbox-searchbutton\")\nsearchbutton.click()\n\n# 여러 페이지(999)에서 반복\nfor i in range(999):\n # 시간 지연\n time.sleep(3)\n\n # 컨테이너(가게) 데이터 수집 // div.section-result-content\n stores = driver.find_elements_by_css_selector(\"div.section-result-content\")\n\n for s in stores:\n # 가게 이름 데이터 수집 // h3.section-result-title\n title = s.find_element_by_css_selector(\"h3.section-result-title\").text\n\n # 평점 데이터 수집 // span.cards-rating-score\n # 평점이 없는 경우 에러 처리\n try:\n score = s.find_element_by_css_selector(\"span.cards-rating-score\").text\n except:\n score = \"평점없음\"\n\n # 가게 주소 데이터 수집 // span.section-result-location\n addr = s.find_element_by_css_selector(\"span.section-result-location\").text\n\n print(title, \"/\", score, \"/\", addr)\n\n # 다음페이지 버튼 클릭 하기\n # 다음페이지가 없는 경우(데이터 수집 완료) 에러 처리\n try:\n nextpage = driver.find_element_by_css_selector(\"button#n7lv7yjyC35__section-pagination-button-next\")\n nextpage.click()\n except:\n print(\"데이터 수집 완료.\")\n break\n\n# 크롬창 닫기\n# driver.close()","sub_path":"Part01/GoogleMap 데이터수집기.py","file_name":"GoogleMap 데이터수집기.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"141554444","text":"import torch\nfrom torch.utils.data import Dataset\n\nclass Corpus(Dataset):\n def __init__(self, corpus_path, tokenizer):\n self.corpus = []\n self.tokenizer = tokenizer\n\n with open(corpus_path, 'r', encoding='utf8') as reader:\n for li, line in enumerate(reader):\n self.corpus.append(line.strip())\n\n def __getitem__(self, index):\n tokens_indices = self.tokenizer.tokenize_and_transform(self.corpus[index])\n tokens_indices = torch.tensor(tokens_indices)\n return tokens_indices[:-1], tokens_indices[1:]\n\n def __len__(self):\n return len(self.corpus)","sub_path":"dataset_utils.py","file_name":"dataset_utils.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"236051725","text":"\n\nimport sqlite3\n\nimport pandas as pd\nfrom prompt_toolkit import PromptSession\nfrom prompt_toolkit.completion import WordCompleter\n\nfrom colorama import init\nfrom termcolor import colored\n\nfrom cmx import config as cfg\n\n\ninit()\n\nmy_completer = WordCompleter(['back', 'help', 'smb', 'list', 'creds',\n 'hosts', 'users', 'exit'], ignore_case=True)\n\n\ngenHelp = \"\"\"Available Commands:\n help - Show Help Menu\n smb - Enter the SMB Database\n exit - Exits CMXDB\n \"\"\"\n\nsmbHelp = \"\"\"Available Commands:\n back - Go back one level\n help - Show Help for this protocol\n help - Show Help for command\n list - show available tables\n creds - List Credentials Stored in Database\n hosts - List Hosts Stored in Database\n add host - Add a host to the database\n add cred - Add a credential to the database\n\"\"\"\n\naddHostHelp = \"\"\"Adding a Host to the DB:\n All values are required\nadd host \n\"\"\"\n\naddCredHelp = \"\"\"Adding a Cred to the DB:\n All values are required\nadd cred \n\"\"\"\n\n\nclass CMXDB():\n\n def __init__(self):\n self.connection = None\n self.proto = ''\n self.workspace = cfg.WORKSPACE\n self.prompt_str = 'cmxdb {} {}> '.format(self.workspace, self.proto)\n self.proto_db_path = None\n self.session = PromptSession(completer=my_completer)\n self.working = True\n\n def run(self):\n while self.working:\n try:\n text = self.session.prompt(self.prompt_str)\n except KeyboardInterrupt:\n continue # Control-C pressed. Try again.\n except EOFError:\n break # Control-D pressed.\n\n self.do_work(text.strip().lower())\n\n def connect_db(self, protocol=''):\n\n proto_db_path = (cfg.WS_PATH / cfg.WORKSPACE / protocol).with_suffix('.db')\n\n if proto_db_path.is_file():\n self.connection = sqlite3.connect(proto_db_path)\n self.proto = protocol\n return\n else:\n print('No database found for {}'.format(protocol))\n return\n\n def show_help(self, command):\n global genHelp\n global smbHelp\n global addHostHelp\n global addCredHelp\n\n if command == 'help' and self.proto == '':\n print(genHelp)\n elif command == 'help' and self.proto == 'smb':\n print(smbHelp)\n elif command.startswith('help add cred'):\n print(addCredHelp)\n elif command.startswith('help add host'):\n print(addHostHelp)\n elif command.startswith('help smb'):\n print(smbHelp)\n else:\n print(\"There's no help for you\")\n\n def list_tables(self):\n\n if self.connection:\n with self.connection:\n try:\n messages = self.connection.execute(\n \"SELECT name FROM sqlite_master WHERE type ='table' \"\n \"AND name NOT LIKE 'sqlite_%';\")\n except Exception as e:\n print(repr(e))\n else:\n for message in messages:\n print(message)\n else:\n print('Not connected to a database yet')\n\n def do_back(self):\n\n if self.connection:\n self.proto = ''\n self.connection = None\n else:\n print('Nowhere to back out of')\n\n def show_creds(self, filterTerm=None, credType=None):\n\n pd.set_option('display.max_colwidth', 68)\n if self.connection:\n with self.connection:\n try:\n # if we're returning a single credential by ID\n if self.is_credential_valid(filterTerm):\n print(colored(pd.read_sql_query(\n \"SELECT * FROM users WHERE id=?\", [filterTerm])))\n\n elif credType:\n print(colored(pd.read_sql_query(\n \"SELECT * FROM users WHERE credtype=?\", [credType])))\n\n # if we're filtering by username\n elif filterTerm and filterTerm != '':\n print(colored(pd.read_sql_query(\n \"SELECT * FROM users WHERE LOWER(username) \"\n \"LIKE LOWER(?)\", ['%{}%'.format(filterTerm)])))\n\n # otherwise return all credentials\n else:\n print(colored(pd.read_sql_query(\n \"SELECT id, domain, username, password FROM users WHERE password IS NOT NULL AND password !='' \",\n self.connection, index_col='id'), \"green\"))\n except Exception as e:\n print(repr(e))\n else:\n # for result in results:\n print('')\n else:\n print('Not connected to a database yet')\n\n def show_users(self, filterTerm=None, credType=None):\n\n pd.set_option('display.max_colwidth', 68)\n if self.connection:\n with self.connection:\n try:\n # if we're returning a single credential by ID\n if self.is_credential_valid(filterTerm):\n print(colored(pd.read_sql_query(\n \"SELECT * FROM users WHERE id=?\", [filterTerm])))\n\n elif credType:\n print(colored(pd.read_sql_query(\n \"SELECT * FROM users WHERE credtype=?\", [credType])))\n\n # if we're filtering by username\n elif filterTerm and filterTerm != '':\n print(colored(pd.read_sql_query(\n \"SELECT * FROM users WHERE LOWER(username) \"\n \"LIKE LOWER(?)\", ['%{}%'.format(filterTerm)])))\n\n # otherwise return all credentials\n else:\n print(colored(pd.read_sql_query(\n \"SELECT id, domain, username FROM users\",\n self.connection, index_col='id'), \"green\"))\n except Exception as e:\n print(repr(e))\n else:\n # for result in results:\n print('')\n else:\n print('Not connected to a database yet')\n\n def show_hosts(self, filterTerm=None, credType=None):\n\n pd.set_option('display.max_colwidth', 68)\n if self.connection:\n with self.connection:\n try:\n # if we're returning a single credential by ID\n if self.is_credential_valid(filterTerm):\n print(colored(pd.read_sql_query(\n \"SELECT * FROM computers WHERE id=? LIMIT 1\", [filterTerm])))\n\n elif credType:\n print(colored(pd.read_sql_query(\n \"SELECT * FROM computers WHERE credtype=?\", [credType])))\n\n # if we're filtering by username\n elif filterTerm and filterTerm != '':\n print(colored(pd.read_sql_query(\n \"SELECT * FROM computers WHERE LOWER(hostname) \"\n \"LIKE LOWER(?)\", ['%{}%'.format(filterTerm)])))\n\n # otherwise return all credentials\n else:\n print(colored(pd.read_sql_query(\n \"SELECT * FROM computers\",\n self.connection, index_col='id'), \"green\"))\n except Exception as e:\n print(repr(e))\n else:\n # for result in results:\n print('')\n else:\n print('Not connected to a database yet')\n\n\n def is_credential_valid(self, credentialID):\n \"\"\"\n Check if this credential ID is valid.\n \"\"\"\n\n if self.connection:\n with self.connection:\n try:\n results = self.connection.execute(\n \"SELECT * FROM users WHERE id=? AND password IS NOT \"\n \"NULL LIMIT 1\", [credentialID])\n except Exception as e:\n print(repr(e))\n return False\n else:\n result = results.fetchall()\n return len(result) > 0\n else:\n print('Not connected to a database yet')\n return False\n\n def do_work(self, command=''):\n\n if command == '':\n return\n\n if command.startswith('help ') or command == 'help':\n self.show_help(command)\n return\n\n if command == 'smb':\n self.connect_db('smb')\n return\n\n if command == 'list':\n self.list_tables()\n return\n\n if command == 'back':\n self.do_back()\n return\n\n if command == 'exit':\n self.working = False\n return\n\n if command == 'creds':\n self.show_creds()\n return\n\n if command == 'users':\n self.show_users()\n return\n\n if command == 'hosts':\n self.show_hosts()\n return\n\n else:\n print(\"Unknown Command\")\n return\n\n\ndef main():\n\n dbnav = CMXDB()\n dbnav.run()\n print('GoodBye!')\n","sub_path":"cmx/CMXDB2/cmxdb.py","file_name":"cmxdb.py","file_ext":"py","file_size_in_byte":9596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154365534","text":"import jieba\r\nwords=input('shuru')\r\nfor ch in ',;:,、:;,, ;, 、...?...\"\"''.../...\\\\\\\\\\n.。---_---!()':\r\n words=words.replace(ch,'')\r\nls=jieba.lcut(words)\r\ncounts={}\r\nfor word in ls:\r\n counts[word]=counts.get(word,0)+1\r\nlss=list(counts.items())\r\nlss.sort(key=lambda x:x[1],reverse=True)\r\nprint(lss[0][0],lss[0][1])\r\nfor word in lss:\r\n print('{}cishu shi{}'.format(word[0],word[1]))\r\n","sub_path":"py二级/coding/count_words.py","file_name":"count_words.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"489960455","text":"\"\"\"Toggles visibility of scope boxes in current 3D view\"\"\"\n\nfrom pyrevit import framework\nfrom pyrevit import revit, DB\n\n\n@revit.carryout('Toggle Scope Boxs')\ndef toggle_scopebox():\n # activate the show hidden so we can collect\n # all elements (visible and hidden)\n activeview = revit.activeview\n activeview.EnableRevealHiddenMode()\n view_elements = DB.FilteredElementCollector(revit.doc, activeview.Id)\\\n .OfCategory(DB.BuiltInCategory.OST_VolumeOfInterest)\\\n .ToElements()\n\n # find section boxes, and try toggling their visibility\n # usually more than one section box shows up on the list but not\n # all of them can be toggled. Whichever that can be toggled,\n # belongs to this view\n for scope_box in [x for x in view_elements\n if x.CanBeHidden(activeview)]:\n if scope_box.IsHidden(activeview):\n activeview.UnhideElements(\n framework.List[DB.ElementId]([scope_box.Id])\n )\n else:\n activeview.HideElements(\n framework.List[DB.ElementId]([scope_box.Id])\n )\n\n activeview.DisableTemporaryViewMode(\n DB.TemporaryViewMode.RevealHiddenElements\n )\n\n\ntoggle_scopebox()\n","sub_path":"pyMapes.extension/_deprecated tools/ScopeboxBox.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"18554765","text":"# Copyright (c) Facebook, Inc. and its affiliates.\nfrom mmf.common.registry import registry\nfrom mmf.datasets.builders.coco2017.caption_dataset import (\n CVLGCoco2017Dataset,\n TracedCaptionCoco2017Dataset,\n)\nfrom mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder\n\n\n@registry.register_builder(\"caption_coco2017\")\nclass CaptionCoco2017Builder(MMFDatasetBuilder):\n def __init__(\n self,\n dataset_name=\"caption_coco2017\",\n dataset_class=TracedCaptionCoco2017Dataset,\n *args,\n **kwargs\n ):\n super().__init__(dataset_name, dataset_class, *args, **kwargs)\n\n @classmethod\n def config_path(cls):\n return \"configs/datasets/coco2017/traced_caption.yaml\"\n\n\n@registry.register_builder(\"cvlg_coco2017\")\nclass CaptionCoco2017Builder(MMFDatasetBuilder):\n def __init__(\n self,\n dataset_name=\"cvlg_coco2017\",\n dataset_class=CVLGCoco2017Dataset,\n *args,\n **kwargs\n ):\n super().__init__(dataset_name, dataset_class, *args, **kwargs)\n\n @classmethod\n def config_path(cls):\n return \"configs/datasets/coco2017/traced_caption.yaml\"\n","sub_path":"mmf/datasets/builders/coco2017/caption_builder.py","file_name":"caption_builder.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"556176414","text":"\ndevice_with_switch = [ \"0.5.0.0\", \"0.18.0.0\", \"0.17.0.0\", \"1.25.56.0\", \"1.25.64.0\", \"1.32.64.0\", \"1.32.65.0\", \"1.9.43.0\", \"2.42.67.0\" ]\ndevice_with_brightness = [ \"1.1.53.0\", \"1.9.43.0\", \"1.14.58.0\", \"1.14.65.0\", \"1.25.56.0\", \"1.25.64.0\", \"1.32.64.0\", \"1.32.65.0\" ]\ndevice_is_thermostat = [ \"5.11.11.0\"]\ndevice_is_wireless_button = [ \"0.17.0.0\" ]\ndevice_is_on_off_only = [ \"2.6.65.0\", \"2.55.70.0\", \"2.42.67.0\" ]\ndevice_is_keypad_button = [ \"1.9.43.0\" ]\n\ndef has_switch(device_type):\n return device_type in device_with_switch\n\ndef has_brightness(device_type):\n return device_type in device_with_brightness\n\ndef is_thermostat(device_type):\n return device_type in device_is_thermostat\n\ndef is_wireless_button(device_type):\n return device_type in device_is_wireless_button\n\ndef keypad_load_button(device):\n return device[\"address\"] == device['pnode'] and device[\"@flag\"] == 128\n\n\nclass InsteonDefinitions():\n \n controllerMap = { \n \"node\": {\n \"state/on\": {\"PowerController\": [\"powerState\"] }, \n \"state/bri\": {\"BrightnessController\": [\"brightness\"] } \n }\n }\n\n \n commands = {}\n \n thermostatModesByName = { 'OFF':'0','HEAT':'1','COOL':'2','AUTO':'3','FAN':'4','PROGRAM HEAT':'5','PROGRAM COOL':'6','PROGRAM AUTO':'7' } \n\n busyStates={\n \"0\":\"Not Busy\",\n '1':'Busy',\n '2':'Idle',\n '3':'Safe Mode'\n }\n\n triggerEvents={\n '0':'Event Status',\n '1':'Get Status',\n '2':'Key Changed',\n '3':'Info String',\n '4':'IR Learn Mode',\n '5':'Schedule Status Changed',\n '6':'Variable Status Changed',\n '7':'Variable Initialized',\n 'X':'Unknown'\n }\n \n nodeChanges={\n 'NN':'Node Renamed',\n 'NR':'Node Removed',\n 'ND':'Node Added',\n 'MV':'Node Moved (into a scene)',\n 'CL':'Link Changed (in a scene)',\n 'RG':'Removed From Group (scene)',\n 'EN':'Enabled',\n 'PC':'Parent Changed',\n 'PI':'Power Info Changed',\n 'DI':'Device ID Changed',\n 'DP':'Device Property Changed',\n 'GN':'Group Renamed',\n 'GR':'Group Removed',\n 'GD':'Group Added',\n 'FN':'Folder Renamed',\n 'FR':'Folder Removed',\n 'FD':'Folder Added',\n 'NE':'Node Error (Comm. Errors)',\n 'CE':'Clear Node Error (Comm. Errors Cleared)',\n 'SN':'Discovering Nodes (Linking)',\n 'SC':'Node Discovery Complete',\n 'WR':'Network Renamed',\n 'WH':'Pending Device Operation',\n 'WD':'Programming Device',\n 'RV':'Node Revised (UPB)'\n }\n \n deviceTypes={\n \"0.18.0.0\":\"button\",\n \"0.17.0.0\":\"button\",\n \"0.5.0.0\":\"button\",\n \"5.11.11.0\":\"thermostat\",\n \"1.9.43.0\":\"light\",\n \"1.0.51.0\":\"light\",\n \"1.1.53.0\":\"light\",\n \"1.6.51.0\":\"light\",\n \"1.14.58.0\":\"light\",\n \"1.14.65.0\":\"light\",\n \"1.25.56.0\":\"lightswitch\",\n \"1.25.64.0\":\"lightswitch\",\n \"1.28.57.0\":\"light\",\n \"1.32.64.0\":\"lightswitch\",\n \"1.32.65.0\":\"lightswitch\",\n \"2.6.65.0\":\"light\",\n \"2.9.0.0\":\"light\",\n \"2.42.67.0\":\"lightswitch\",\n \"2.56.67.0\":\"light\",\n \"2.31.65.0\":\"device\",\n \"3.13.0.0\":\"device\",\n \"2.55.70.0\":\"device\"\n }\n\n wirelessDeviceTypes=['0.5.0.0', '0.17.0.0', \"0.18.0.0\"]\n \n\n","sub_path":"definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"272169741","text":"class Digua:\n\tdef __init__(self,name):\n\t\tself.name = name\n\tprint(\"**********我的地瓜名**********\")\n\tdef printName(self):\n\t\tprint(\"地瓜名字为:%s\"%self.name)\ndef myPrint(Digua):\n\tDigua.printName()\ns = Digua(\"辣酱地瓜\")\nmyPrint(s)\np = Digua(\"变态辣地瓜\")\nmyPrint(p)\n\nclass Digua:\n\tdef __init__(self,typa):\n\t\tself.typa = typa\n\tprint(\"*********地瓜种类**********\")\n\tdef printTypa(self):\n\t\tprint(\"地瓜种类有:%s\"%self.typa)\ndef myPrint(Digua):\n\tDigua.printTypa()\nq = Digua(\"大的一块\")\nmyPrint(q)\nr = Digua(\"小的五毛\")\nmyPrint(r)\n\nclass Diguaa:\n\n\tdef __init__(self):\n\t\tself.cookedLever = 0\n\t\tself.cookedStr = \"生的\"\n\t\tself.condliments = []\n\tdef cook(self,time):\n\t\tcookedStr = int(input(\"请输入① 键进行烘烤\\n 请按键:\"))\n\t\tself.cookedLever += time\n\t\tif self.cookedLever>8:\n\t\t\tself.cookedStr = \"烤糊了\"\n\t\tif self.cookedLever>5:\n\t\t\tself.cookedStr = \"烤好了\"\n\t\tif self.cookedLever>3:\n\t\t\tself.cookedStr = \"中熟\"\n\t\telse:\n\t\t\tself.cookedStr = \"生的\"\n\tdef __str__(self):\n\t\tg = self.cookedStr+\"地瓜\"\n\t\tif len(self.condliments)>0:\n\t\t\tg = g + \"(\"\n\t\t\tfor temp in self.condliments:\n\t\t\t\tg = g + temp + \",\"\n\t\t\tg = g.strip(\",\")\n\t\t\tg = g + \")\"\n\t\treturn g\n\tdef addcondiments(self,condliments):\n\t\tself.condliments.append(condliments)\n\n\nmyDiguaa = Diguaa()\nprint(\"----有了地瓜还没烤----\")\nprint(myDiguaa.cookedLever)\n#print(myDiguaa.cookedStr)\nprint(myDiguaa.condliments)\n\nprint(\"----接下来要进行烤地瓜了----\")\nprint(\"----开始烤了----\")\nmyDiguaa.cook(4)\nprint(myDiguaa)\nmyDiguaa.addcondiments(\"加番茄酱\")\nprint(\"----地瓜又烤了5分钟----\\n 请翻面\\n\")\nmyDiguaa.cook(3)\nprint(myDiguaa)\nprint(\"---接下来添加辣酱----\")\nmyDiguaa.addcondiments(\"加辣酱\")\nprint(myDiguaa)\nprint(\"----地瓜又烤了5分钟----\")\nprint(\"烤好了 烤好了\")\n\n","sub_path":"烤地瓜.py","file_name":"烤地瓜.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63829907","text":"from django.urls import path\nfrom . import views\nfrom .views import (\n PostListView,\n \n DriverCreateView,\n #DriverUpdateView,\n OwnerCreateView,\n SharerCreateView,\n)\nurlpatterns = [\n path('', views.home, name='request-home'),\n #path('driver/', views.driverInfo, name='request-driverInfo'),\n\tpath('driver/new/', DriverCreateView.as_view(), name='driver-create'),\n #path('driver//update/', DriverUpdateView.as_view(), name='driver-update'),\n path('driver//claim/',views.driverclaim, name='driver-claim'),\n path('driver/update/', views.driverupdate, name='driver-update'),\n path('driver/pick/', views.allorders, name='driver-pick'),\n path('test/', views.test, name='test'),\n path('owner/new/', OwnerCreateView.as_view(), name='owner-create'),\n #path('wait/', views.wait, name='owner-waiting'),\n path('sharer/new/', SharerCreateView.as_view(), name='sharer-create'),\n path('owner/orders/',views.orders, name='owner-orders'),\n #path('owner/ordersupdate',views.ordersupdate, name='owner-orders-update'),\n path('owner//detail',views.detail, name='owner-orders-update'),\n #view history\n path('driver/history/',views.driverhistory, name='driver-history'),\n path('driver//complete/',views.drivercomplete, name='driver-complete'),\n #view rides for sharer \n path('sharer/join/view/',views.sharerjoinview, name='sharer-join-view'),\n path('sharer//join/',views.sharerjoin, name='sharer-join'),\n path('sharer//cancel/',views.sharercancel, name='sharer-cancel'),\n path('sharer/history',views.sharerhistory, name='sharer-history'),\n]\n","sub_path":"Ride Sharing/erss-hwk1-zl254-wz125/docker-deploy/web-app/request/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"274221465","text":"import logging\nlogger = logging.getLogger(__name__)\n\nimport os\nimport smtplib\nfrom datetime import datetime, timedelta\n\nfrom django.core.mail import send_mail\n\nfrom . import utils\nfrom .. import settings\n\nEMAIL_DIR = os.path.join(settings.BASE_DIR, 'apiserver/api/emails/')\n\ndef send_welcome_email(member):\n vetting_date = member.application_date + timedelta(days=28)\n\n def replace_fields(text):\n return text.replace(\n '[name]', member.first_name,\n ).replace(\n '[username]', member.user.username,\n ).replace(\n '[date]', vetting_date.strftime('%A, %B %d'),\n )\n\n with open(EMAIL_DIR + 'welcome.txt', 'r') as f:\n email_text = replace_fields(f.read())\n\n with open(EMAIL_DIR + 'welcome.html', 'r') as f:\n email_html = replace_fields(f.read())\n\n try:\n send_mail(\n subject='Welcome to Protospace!',\n message=email_text,\n from_email=None, # defaults to DEFAULT_FROM_EMAIL\n recipient_list=[member.user.email, 'portal@tannercollin.com'],\n html_message=email_html,\n )\n\n logger.info('Sent welcome email:\\n' + email_text)\n except smtplib.SMTPException as e:\n msg = 'Problem sending welcome email to ' + member.user.email + ': ' + str(e)\n utils.alert_tanner(msg)\n logger.exception(msg)\n","sub_path":"apiserver/apiserver/api/utils_email.py","file_name":"utils_email.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270670807","text":"import sys,os\nimport argparse\nimport netCDF4\nimport numpy as np\nimport metpy.calc as mpcalc\nfrom metpy.units import units\n\n\ndef usage(name=None):\n return '''CM1toSHARPpy.py input-netCDF-file output-sounding-file [options] [--help]\n\nExample:\n python3 CM1toSHARPpy.py cm1out.nc CM1SHARP.out -x 0 -y 0 -t 52 -u 12.0 -v 8.0\n'''\n\n#Define command line arguments\nparser = argparse.ArgumentParser(description='Generate SHARPpy from CM1 data.', usage=usage())\nparser.add_argument('inputfile', help='Input netCDF file', default='blank')\nparser.add_argument('outputfile', help='Output sounding file name', default='CM1out.snd')\nparser.add_argument('-x', help='x position to generate sounding data from dataset', type=float, default=0.)\nparser.add_argument('-y', help='y position to generate sounding data from dataset', type=float, default=0.)\nparser.add_argument('-t', help='time to generate sounding data from in dataset', type=float, default=0.)\nparser.add_argument('-u', help='X movement speed of domain in m/s', type=float, default=0.)\nparser.add_argument('-v', help='Y movement speed of domain in m/s', type=float, default=0.)\nargs = parser.parse_args()\ndatafile = netCDF4.Dataset(args.inputfile, 'r')\n\n#Define model data variables and other input variables.\nPRS = datafile.variables['prs'][args.t,:,args.y,args.x] / 100.00 * units.mbar\nTIME = datafile.variables['time'][:]\nTH = datafile.variables['th'][args.t,:,args.y,args.x] * units.degK\nQV = datafile.variables['qv'][args.t,:,args.y,args.x] * units('g/kg')\nTMPK = mpcalc.temperature_from_potential_temperature(PRS, TH)\nU = np.array(datafile.variables['uinterp'][args.t,:,args.y,args.x]) + args.u\nV = np.array(datafile.variables['vinterp'][args.t,:,args.y,args.x]) + args.v\n\n#Define output variables.\nHGHT = datafile.variables['zh'][args.t,:,args.y,args.x] * 1.00 * units.meters\nTMPC = TMPK.to('degC')\nDWPC = mpcalc.dewpoint(mpcalc.vapor_pressure(PRS, QV))\nWDIR = mpcalc.wind_direction(U, V)\nWSPD = mpcalc.wind_speed(U, V) * 1.94384\n\n# Write data to file.\nfile_header = \"\"\"%TITLE%\nXaxis_{0}_Yaxis_{1}_Time_{2} 010101/0000\n\n LEVEL HGHT TEMP DWPT WDIR WSPD\n-------------------------------------------------------------------\n%RAW%\"\"\".format(args.x, args.y, int(args.t))\n\nnp.savetxt(args.outputfile, np.c_[PRS,HGHT,TMPC,DWPC,WDIR,WSPD], fmt='%10.20f', header=file_header, footer='%END%', delimiter=', ', comments='',)\n","sub_path":"CM1scripts/CM1toSHARPpy.py","file_name":"CM1toSHARPpy.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270748434","text":"from collections import Counter\nclass Solution(object):\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n numLen = k\n frequency = {}\n for num in nums:\n frequency[num] = frequency.get(num,0) + 1\n \n bucket = [None] * (len(nums)+1)\n for (num,freq) in frequency.items():\n if bucket[freq] == None:\n bucket[freq] = []\n bucket[freq].append(num)\n \n result = []\n for i in reversed(range(len(nums) + 1)):\n if k > 0:\n if bucket[i] != None:\n result.extend(bucket[i])\n k -= len(bucket[i])\n else:\n break\n \n return result[0:numLen]","sub_path":"347-Top-K-Frequent-Elements/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"408149503","text":"import os\nimport pickle\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\n\nTRAIN_SAMPLES = 70000\nTEST_SAMPLES = 10000\n\n\ndef load_samples(samples, split):\n # Load dataset into a list\n dataset = tfds.load(name=\"svhn_cropped\", split=split)\n dataset_list = list(tfds.as_numpy(dataset))\n\n # Split inputs and outputs\n x, y = list(), list()\n for pair in dataset_list[:samples]:\n x.append(pair['image'])\n y.append(pair['label'])\n\n # Change data format\n x = np.array(x) / 255\n y = tf.keras.utils.to_categorical(np.array(y))\n\n return x, y\n\nif __name__ == '__main__':\n # Load training and test data\n x_train, y_train = load_samples(TRAIN_SAMPLES, split=tfds.Split.TRAIN)\n x_test, y_test = load_samples(TEST_SAMPLES, split=tfds.Split.TEST)\n\n # Put it into a single object\n save_target = (x_train, y_train), (x_test, y_test)\n\n # Pickle it to a file\n filename = 'dataset_train{train_samples}_test{test_samples}.pickle'.format(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES\n )\n with open(os.path.join('datasets', filename), 'wb') as f:\n pickle.dump(save_target, f)","sub_path":"prepare_dataset.py","file_name":"prepare_dataset.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"91962358","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom caffe2.python import workspace\nfrom ml.rl.test.gym.open_ai_gym_environment import ModelType\nfrom ml.rl.training.training_data_page import TrainingDataPage\n\n\nclass OpenAIGymMemoryPool:\n def __init__(self, max_replay_memory_size):\n \"\"\"\n Creates an OpenAIGymMemoryPool object.\n\n :param max_replay_memory_size: Upper bound on the number of transitions\n to store in replay memory.\n \"\"\"\n self.replay_memory = []\n self.max_replay_memory_size = max_replay_memory_size\n self.memory_num = 0\n self.skip_insert_until = self.max_replay_memory_size\n\n def sample_memories(self, batch_size, model_type):\n \"\"\"\n Samples transitions from replay memory uniformly at random.\n\n :param batch_size: Number of sampled transitions to return.\n :param model_type: Model type (discrete, parametric).\n \"\"\"\n cols = [[], [], [], [], [], [], [], [], []]\n indices = np.random.permutation(len(self.replay_memory))[:batch_size]\n for idx in indices:\n memory = self.replay_memory[idx]\n for col, value in zip(cols, memory):\n col.append(value)\n\n possible_next_actions_lengths = np.array(cols[7], dtype=np.int32)\n next_states = np.array(cols[3], dtype=np.float32)\n\n if model_type in (\n ModelType.PARAMETRIC_ACTION.value,\n ModelType.PYTORCH_PARAMETRIC_DQN.value,\n ):\n possible_next_actions = []\n for pna_matrix in cols[6]:\n for row in pna_matrix:\n possible_next_actions.append(row)\n\n tiled_states = np.repeat(next_states, possible_next_actions_lengths, axis=0)\n possible_next_actions = np.array(possible_next_actions, dtype=np.float32)\n next_state_pnas_concat = np.concatenate(\n (tiled_states, possible_next_actions), axis=1\n )\n else:\n possible_next_actions = np.array(cols[6], dtype=np.float32)\n next_state_pnas_concat = None\n\n return TrainingDataPage(\n states=np.array(cols[0], dtype=np.float32),\n actions=np.array(cols[1], dtype=np.float32),\n propensities=None,\n rewards=np.array(cols[2], dtype=np.float32),\n next_states=np.array(cols[3], dtype=np.float32),\n next_actions=np.array(cols[4], dtype=np.float32),\n possible_next_actions=possible_next_actions,\n episode_values=None,\n not_terminals=np.logical_not(np.array(cols[5]), dtype=np.bool),\n time_diffs=np.array(cols[8], dtype=np.int32),\n possible_next_actions_lengths=possible_next_actions_lengths,\n next_state_pnas_concat=next_state_pnas_concat,\n )\n\n def sample_and_load_training_data_c2(self, num_samples, model_type):\n \"\"\"\n Loads and preprocesses shuffled, transformed transitions from\n replay memory into the training net.\n\n :param num_samples: Number of transitions to sample from replay memory.\n :param model_type: Model type (discrete, parametric).\n \"\"\"\n tdp = self.sample_memories(num_samples, model_type)\n workspace.FeedBlob(\"states\", tdp.states)\n workspace.FeedBlob(\"actions\", tdp.actions)\n workspace.FeedBlob(\"rewards\", tdp.rewards.reshape(-1, 1))\n workspace.FeedBlob(\"next_states\", tdp.next_states)\n workspace.FeedBlob(\"not_terminals\", tdp.not_terminals.reshape(-1, 1))\n workspace.FeedBlob(\"time_diff\", tdp.time_diffs.reshape(-1, 1))\n workspace.FeedBlob(\"next_actions\", tdp.next_actions)\n workspace.FeedBlob(\"possible_next_actions\", tdp.possible_next_actions)\n workspace.FeedBlob(\n \"possible_next_actions_lengths\", tdp.possible_next_actions_lengths\n )\n\n def insert_into_memory(\n self,\n state,\n action,\n reward,\n next_state,\n next_action,\n terminal,\n possible_next_actions,\n possible_next_actions_lengths,\n time_diff,\n ):\n \"\"\"\n Inserts transition into replay memory in such a way that retrieving\n transitions uniformly at random will be equivalent to reservoir sampling.\n \"\"\"\n item = (\n state,\n action,\n reward,\n next_state,\n next_action,\n terminal,\n possible_next_actions,\n possible_next_actions_lengths,\n time_diff,\n )\n\n if self.memory_num < self.max_replay_memory_size:\n self.replay_memory.append(item)\n elif self.memory_num >= self.skip_insert_until:\n p = float(self.max_replay_memory_size) / self.memory_num\n self.skip_insert_until += np.random.geometric(p)\n rand_index = np.random.randint(self.max_replay_memory_size)\n self.replay_memory[rand_index] = item\n self.memory_num += 1\n","sub_path":"ml/rl/test/gym/open_ai_gym_memory_pool.py","file_name":"open_ai_gym_memory_pool.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"207556887","text":"import time\nfrom lxml import etree\ntry:\n from django.conf import settings\nexcept ImportError:\n raise ImportError(\"Could not import django settings\")\n\n\nclass XMLGenerator:\n\n @staticmethod\n def to_xml(fields):\n\n root = etree.Element(\"order\", attrib={\n 'type': \"card\",\n 'id': fields['order_id'],\n 'timestamp': str(time.time())\n })\n\n try:\n signature = etree.SubElement(root, \"signature\")\n if hasattr(settings, \"PAYMENT_SIGNATURE\") and settings.PAYMENT_SIGNATURE:\n signature.text = settings.PAYMENT_SIGNATURE\n else:\n raise AttributeError(\"Please provide sinchronization key for the payment process\")\n\n invoice = etree.SubElement(root, \"invoice\", attrib={\n 'currency': fields['currency'],\n 'amount': str(fields['amount']),\n 'customer_type': \"2\",\n 'customer_id': str(fields['customer_id'])\n })\n details = etree.SubElement(invoice, \"details\")\n details.text = \"Payment Details\"\n\n url = etree.SubElement(root, \"url\")\n\n confirm_url = etree.SubElement(url, \"confirm\")\n confirm_url.text = fields['url_confirm']\n\n return_url = etree.SubElement(url, \"return\")\n return_url.text = fields['url_return']\n\n except KeyError:\n\n print(\"Some parameters were not passed correctly\")\n\n else:\n return etree.tostring(root, encoding='utf8', method='xml',\n pretty_print=True)\n\n @staticmethod\n def load_xml(xml_file):\n pass\n","sub_path":"src/payment/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602250644","text":"import socket\n\n\nif __name__ == '__main__':\n buffer_size = 25\n max_allow_conn = 30\n hostname = socket.gethostname()\n ip = socket.gethostbyname(hostname)\n print(\"ip: \", ip)\n socket_addr = ip\n port = 9487\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n sock.bind((socket_addr, port))\n sock.listen(max_allow_conn)\n print(\"Server start listening...\")\n while True:\n conn, client_addr = sock.accept()\n print(client_addr, \" is connected!!\")\n while True:\n data = conn.recv(buffer_size)\n print(\"data: \", data)\n conn.sendall(\"Got the data: \" + data)\n","sub_path":"tcp_socket/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"336357855","text":"\"\"\"\nGiven a binary matrix A, we want to flip the image horizontally, \nthen invert it, and return the resulting image.\n\nTo flip an image horizontally means that each row of the image is reversed. \nFor example, flipping [1, 1, 0] horizontally results in [0, 1, 1].\n\nTo invert an image means that each 0 is replaced by 1, and each 1 is replaced by 0. \nFor example, inverting [0, 1, 1] results in [1, 0, 0].\n\nExample 1:\nInput: [[1,1,0],[1,0,1],[0,0,0]]\nOutput: [[1,0,0],[0,1,0],[1,1,1]]\nExplanation: First reverse each row: [[0,1,1],[1,0,1],[0,0,0]].\n Then, invert the image: [[1,0,0],[0,1,0],[1,1,1]]\n\"\"\"\n\n\ndef flipAndInvertImage(matrix):\n flip = []\n for items in matrix:\n flip.append(items[::-1])\n\n for i in range(len(flip)):\n for j in range(len(flip[i])):\n if flip[i][j] == 1:\n flip[i][j] = 0\n else:\n flip[i][j] = 1\n return flip\n\n\nmatrix = [[1, 1, 0, 0], [1, 0, 0, 1], [0, 1, 1, 1], [1, 0, 1, 0]]\nprint(flipAndInvertImage(matrix))\n","sub_path":"flipping_image.py","file_name":"flipping_image.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88104475","text":"def isDiopter(x):\n if x[0] != '+' or x[len(x)-1] != 'D':\n return False\n if len(x) > 6 and len(x) < 2:\n return False\n return True\n\n# Process Diopter checks if there is an additional 'D' or other stuff in the string, if there is, remove it\ndef processDiopter(x):\n print(x)\n while True:\n if len(x) == 1:\n return x\n elif x[len(x)-1] == 'D' or x[len(x)-1].isdigit() == False:\n x = x[:-1]\n else:\n break\n return x","sub_path":"utils/isDiopter.py","file_name":"isDiopter.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"135907700","text":"import json\nimport boto3\nimport logging\nfrom urllib.request import urlopen\n\n\n# Initialize logger and set log level\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nsqs = boto3.resource('sqs', region_name='us-east-1')\nqueue = sqs.get_queue_by_name(QueueName='dining_orders')\n\ndynamodb = boto3.resource('dynamodb', region_name='us-east-1')\ntable = dynamodb.Table('dining_suggestions')\n\n# Initialize SNS client for Virginia region\nsession = boto3.Session(region_name=\"us-east-1\")\nsns_client = session.client('sns')\n\n\ndef lambda_handler(event, context):\n \n response = queue.receive_messages(MaxNumberOfMessages=10)\n logger.info(\"Triggered !!\")\n \n for message in response:\n # Print out the body\n print('Message body : {}'.format(message.body))\n \n params = message.body.split(\";\")\n location = params[0]\n cuisine = params[1]\n phone_number = \"+1\" + params[2]\n #Call Google API\n contents = urlopen(\"https://maps.googleapis.com/maps/api/place/textsearch/json?query=\"+cuisine+\"+restaurants+in+\"\n +location+\"&key=AIzaSyB3npaZaiPppP_bb8H5UXRqc-d9xcU8jqE\").read()\n jsonList = json.loads(contents)\n \n #Store in Dynamo DB\n suggestions = \"\"\n ctr = 1\n for restaurant in jsonList['results']:\n name = restaurant['name']\n addr = restaurant['formatted_address'].split(\",\")[0]\n response = table.put_item(\n Item={\n 'user_id' : message.message_id+\"-\"+str(ctr),\n 'restaurant_name' : name,\n 'address' : addr,\n 'rating' : round(restaurant['rating']),\n 'cuisine' : cuisine\n }\n )\n suggestions += str(ctr)+\". \"+name+\" at \"+addr+\" \"\n ctr += 1\n if ctr == 3:\n break\n \n # Send message\n msg = \"Hello! Here are my \"+ cuisine+\" restaurant suggestions for you. \" + suggestions\n response = sns_client.publish(\n PhoneNumber=phone_number,\n Message=msg,\n MessageAttributes={\n 'AWS.SNS.SMS.SenderID': {\n 'DataType': 'String',\n 'StringValue': 'DineOut'\n },\n 'AWS.SNS.SMS.SMSType': {\n 'DataType': 'String',\n 'StringValue': 'Promotional'\n }\n }\n )\n \n #delete message from queue\n message.delete()\n \n \n \n \n ","sub_path":"LF2.py","file_name":"LF2.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478074378","text":"from typing import Tuple, List, Dict, Optional, Union, Iterator\nimport itertools\n\nfrom analytics.entities.entity import Entity, EntityWithId\nfrom analytics.entities.primitive import Primitive\nfrom analytics.entities.references.data import DataReference\nfrom analytics.entities.references.document import DocumentReference\nfrom analytics.misc.utils import (\n enforce_field,\n has_path,\n process_json_glob,\n seq_to_map,\n process_json,\n)\n\n\nclass Pipeline(EntityWithId):\n \"\"\"\n An object representation of fields from a pipeline JSON document that\n we care about for analysis.\n \"\"\"\n\n def __init__(self, pipeline_dict: dict, **kwargs):\n self.name: str = pipeline_dict.get(\"name\")\n\n enforce_field(pipeline_dict, \"digest\")\n self.digest: str = pipeline_dict[\"digest\"]\n self.id: str = pipeline_dict.get(\"id\")\n self.source_name: Optional[str] = None\n if has_path(pipeline_dict, [\"source\", \"name\"]):\n self.source_name = pipeline_dict[\"source\"][\"name\"]\n\n self.inputs: List[str] = []\n for input_dict in pipeline_dict[\"inputs\"]:\n self.inputs.append(input_dict[\"name\"])\n\n self.outputs: List[DataReference] = []\n for output_dict in pipeline_dict[\"outputs\"]:\n self.outputs.append(DataReference(output_dict[\"data\"]))\n\n self.steps: List[Union[Primitive, DocumentReference, Pipeline]] = []\n self.has_subpipeline = False\n for step in pipeline_dict[\"steps\"]:\n step_type = step[\"type\"]\n if step_type == \"PRIMITIVE\":\n self.steps.append(Primitive(step))\n elif step_type == \"SUBPIPELINE\":\n self.has_subpipeline = True\n # This reference will be dereferenced later by the loader\n # once all pipelines are available in memory.\n self.steps.append(DocumentReference(step[\"pipeline\"]))\n else:\n raise Exception(f\"unsupported pipeline_steps type '{step_type}'\")\n\n def post_init(self, entity_maps) -> None:\n \"\"\"\n Dereference any subpipelines and flatten out all steps\n and subpipeline steps into `self.steps`.\n \"\"\"\n self.dereference_subpipelines(entity_maps[\"pipelines\"])\n\n self.flattened_steps: List[Primitive] = []\n for step in self.steps:\n if isinstance(step, Pipeline):\n self.flattened_steps += step.steps\n else:\n self.flattened_steps.append(step)\n\n def get_id(self):\n return self.digest\n\n @property\n def num_steps(self) -> int:\n \"\"\"\n Recursively counts the number of steps in the pipeline,\n including any subpipelines.\n \"\"\"\n num_steps: int = 0\n for step in self.steps:\n if isinstance(step, Pipeline):\n num_steps += step.num_steps\n else:\n num_steps += 1\n return num_steps\n\n def is_tantamount_to(self, pipeline: \"Pipeline\") -> bool:\n \"\"\"\n Returns `True` if `self` has same steps as `pipeline`, which includes\n the same primitive/sub-pipeline and inputs at each step.\n \"\"\"\n\n if not Entity.are_lists_tantamount(self.outputs, pipeline.outputs):\n return False\n\n if len(self.steps) != len(pipeline.steps):\n return False\n\n for i, my_step in enumerate(self.steps):\n their_step = pipeline.steps[i]\n\n if type(my_step) != type(their_step):\n return False\n\n if isinstance(my_step, Primitive) or isinstance(my_step, Pipeline):\n if not my_step.is_tantamount_to(their_step):\n return False\n else:\n raise ValueError(f\"unsupported step type {type(my_step)}\")\n\n return True\n\n def dereference_subpipelines(self, pipelines: dict):\n \"\"\"\n Store an actual object pointer to each of this pipeline's subpipelines,\n rather than just a digest string, so the subpipelines can be easily\n accessed programmatically.\n \"\"\"\n if self.has_subpipeline:\n for i, step in enumerate(self.steps):\n if isinstance(step, DocumentReference):\n subpipeline = pipelines[step.digest]\n # Recurse down in case this pipeline has its own subpipelines\n subpipeline.dereference_subpipelines(pipelines)\n self.steps[i] = subpipeline\n\n def print_steps(self, *, use_short_path: bool = False, indent: int = 0):\n for step in self.steps:\n if isinstance(step, Primitive):\n if use_short_path:\n path = step.short_python_path\n else:\n path = step.python_path\n print((\"\\t\" * indent) + path)\n elif isinstance(step, Pipeline):\n step.print_steps(use_short_path=use_short_path, indent=indent + 1)\n else:\n raise ValueError(f\"unsupported step type {type(step)}\")\n\n def get_num_steps_off_from(self, pipeline: \"Pipeline\") -> int:\n \"\"\"\n Gets the number of steps `pipeline` and `self` have that are not identical.\n \"\"\"\n num_off: int = abs(len(self.steps) - len(pipeline.steps))\n\n for my_step, their_step in zip(self.steps, pipeline.steps):\n\n if isinstance(my_step, Primitive) and isinstance(their_step, Primitive):\n if not my_step.is_tantamount_to(their_step):\n num_off += 1\n\n elif isinstance(my_step, Pipeline) and isinstance(their_step, Pipeline):\n num_off += my_step.get_num_steps_off_from(their_step)\n\n elif isinstance(my_step, Primitive) and isinstance(their_step, Pipeline):\n num_off += their_step.num_steps\n\n elif isinstance(my_step, Pipeline) and isinstance(their_step, Primitive):\n num_off += my_step.num_steps\n\n else:\n raise ValueError(\n f\"unsupported step types {type(my_step)} and {type(their_step)}\"\n )\n\n return num_off\n\n def _get_steps_off_from(\n self, our_steps: List[Primitive], their_steps: List[Primitive]\n ) -> List[Tuple[Optional[Primitive], Optional[Primitive]]]:\n steps_off: List[Tuple[Optional[Primitive], Optional[Primitive]]] = []\n\n for my_step, their_step in itertools.zip_longest(our_steps, their_steps):\n\n if isinstance(my_step, Primitive) and isinstance(their_step, Primitive):\n if not my_step.is_tantamount_to(their_step):\n steps_off.append((my_step, their_step))\n\n elif isinstance(my_step, Pipeline) and isinstance(their_step, Pipeline):\n steps_off += my_step.get_steps_off_from(their_step)\n\n elif not isinstance(my_step, Pipeline) and isinstance(their_step, Pipeline):\n if isinstance(my_step, Primitive):\n steps_off += self._get_steps_off_from([my_step], their_step.steps)\n else:\n steps_off += self._get_steps_off_from([], their_step.steps)\n\n elif isinstance(my_step, Pipeline) and not isinstance(their_step, Pipeline):\n if isinstance(their_step, Primitive):\n steps_off += self._get_steps_off_from(my_step.steps, [their_step])\n else:\n steps_off += self._get_steps_off_from(my_step.steps, [])\n\n elif my_step is None and isinstance(their_step, Primitive):\n steps_off.append((None, their_step))\n\n elif isinstance(my_step, Primitive) and their_step is None:\n steps_off.append((my_step, None))\n\n else:\n raise ValueError(\n f\"unsupported types {type(my_step)} and {type(their_step)}\"\n )\n return steps_off\n\n def get_steps_off_from(\n self, pipeline: \"Pipeline\"\n ) -> List[Tuple[Optional[Primitive], Optional[Primitive]]]:\n \"\"\"\n Gets the python paths of the steps `pipeline` and `self` have that are not identical.\n Returns a list of 2-tuples. Each entry is a pair of primitives that mismatch among\n the pipelines.\n \"\"\"\n return self._get_steps_off_from(self.steps, pipeline.steps)\n\n @classmethod\n def from_json(cls, path: str) -> \"Pipeline\":\n return process_json(path, cls)\n\n @classmethod\n def from_json_glob(cls, glob_pattern: str) -> Dict[str, \"Pipeline\"]:\n \"\"\"\n Goes to all files matching `glob_pattern` and\n tries to treat them like a json pipeline definition\n and load them into a map of pipeline digests to\n constructed `Pipeline` objects.\n \"\"\"\n pipelines: Iterator = process_json_glob(glob_pattern, cls)\n return seq_to_map(pipelines, lambda pipeline: pipeline.get_id())\n","sub_path":"analytics/entities/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":8920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"558506710","text":"import pickle \nimport numpy as np\nimport pandas as pd\nimport argparse\nimport sys\n\n\nfrom calibration_RMSE_pool import experiment as experiment_t\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description='Basic Simulation')\n\n\tparser.add_argument(\"-cat\", \"--Category\", default = 'all', type=str, help=\"Category of Input Data, default is all\")\n\tparser.add_argument(\"-s\", \"--Start\", default = '2020-08-17', type=str, help=\"First Date of Simulation\")\n\tparser.add_argument(\"-sp\", \"--step_pol\",type=int, help=\"first valid date of pol\", required=True) # Required\n\n\tparser.add_argument(\"-epoch\", \"--Epoch\",default=15,type=int, help=\"Iteration Number for the model\")\n\tparser.add_argument(\"-days\", \"--Days\",default=42,type=int, help=\"Length of Simulaiton\") \n\targs = parser.parse_args()\n\n\tcat = args.Category\n\tstart_date = args.Start\n\tsp = args.step_pol\n\tepoch = args.Epoch\n\tdays = args.Days\n \n \n\texperiment_t(cat, sp, epoch, start_date, days)\n\n","sub_path":"calibration/run_simulation_pool.py","file_name":"run_simulation_pool.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"180031356","text":"# AGC 028 A\n# Math\nN, M = map(int, input().split())\nS = input()\nT = input()\n\ndef gcd(a,b):\n if b == 0:\n return a\n return gcd(b,a%b)\ndef lcm(a,b):\n return a*b//gcd(a,b)\n\nif S[0] != T[0]:\n print(-1)\n exit(0)\nif S == T:\n print(N)\n exit(0)\n\nL = lcm(N,M)\np = L//N\nq = L//M\nU = \"\"\nk = 0\n\nif gcd(M,N) > 1:\n for k in range(1,L):\n if k%p == 0 and k%q == 0:\n if S[k//p] != T[k//q]:\n print(-1)\n exit(0)\n print(L)\nelse:\n print(L)\n","sub_path":"AGC028A.py","file_name":"AGC028A.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478702312","text":"import sys\nimport numpy as np\nimport sklearn.base\nimport theano as th\nimport theano.tensor as tns\nfrom utils import adam\n\n\nsys.setrecursionlimit(100000)\n\n\n#############################################################\n# DEFINE THE LAYERS\n#############################################################\n\nclass PReLU(object):\n \"\"\" PReLU activation \"\"\"\n def __init__(self, x):\n i_alpha = 0\n self.alpha = th.shared(value=i_alpha, borrow=True)\n self.result = tns.switch(x < 0, self.alpha * x, x)\n self.params = [self.alpha]\n\n\nclass DilatedConv1D(object):\n \"\"\" Creates a dilated convolutional layer\n Args:\n x: input\n rnd: a random number generator used to initialize weights\n dilation: the dilation factor for each layer\n filter_height: the samples that are included in each convolution, after dilating through height\n filter_width: the samples that are included in each convolution, after dilating through width\n n_filters: how many filters to learn for the dilated convolution\n n_channels: number of channels in input data\n apply_bias: boolean, it is set to True to use bias term by default\n activation: the activation function, `linear` by default\n \"\"\"\n def __init__(self, x, rnd, dilation, filter_height, filter_width, n_filters, n_channels, apply_bias=True,\n activation='linear'):\n self.input = x\n self.dilation = dilation\n self.filterHeight = filter_height\n self.filterWidth = filter_width\n self.nFilters = n_filters\n self.nChannels = n_channels\n\n # Initialization of filter for each layer of size:\n # (n_filters, n_channels in input, filter_height, filter_width)\n if activation == 'tanh':\n i_filters = rnd.uniform(-np.sqrt(6) / np.sqrt(2 * filter_width * n_filters),\n np.sqrt(6) / np.sqrt(2 * filter_width * n_filters),\n [n_filters, n_channels, filter_height, filter_width]).astype(np.float64)\n elif activation == 'sigmoid':\n i_filters = rnd.uniform(-4 * np.sqrt(6) / np.sqrt(2 * filter_width * n_filters),\n 4 * np.sqrt(6) / np.sqrt(2 * filter_width * n_filters),\n [n_filters, n_channels, filter_height, filter_width]).astype(np.float64)\n elif activation == 'relu':\n i_filters = rnd.normal(0, np.sqrt(2) / np.sqrt(filter_width * n_filters),\n [n_filters, n_channels, filter_height, filter_width]).astype(np.float64)\n else:\n i_filters = rnd.uniform(-np.sqrt(6) / np.sqrt(filter_width * n_filters),\n np.sqrt(6) / np.sqrt(filter_width * n_filters),\n [n_filters, n_channels, filter_height, filter_width]).astype(np.float64)\n\n self.filters = th.shared(value=i_filters, borrow=True)\n # Convolve input feature map with filters\n result = tns.nnet.conv2d(self.input, self.filters, border_mode='valid',\n filter_dilation=(1, self.dilation))\n # Check for bias\n if apply_bias:\n # Define bias\n i_bias = np.zeros([n_filters], dtype=np.float64)\n self.bias = th.shared(value=i_bias, borrow=True)\n # Store parameters of this layer\n self.params = [self.filters, self.bias]\n # Apply bias\n result += self.bias[None, :, None, None]\n else:\n self.params = [self.filters]\n\n self.output = result\n\n\n#############################################################\n# BUILD THE MODEL\n#############################################################\n\nclass WaveNetCond(object):\n def __init__(self, input_x, rnd, n_cond, n_stacks, dilations, n_filters, filter_width, n_channels):\n # Input shape is (n_batches = 1, n_channels, 1, N)\n self.result = input_x\n self.params = []\n self.L2 = 0\n\n # Define apply_bias and activation used in DilatedConv1D layer\n apply_bias = True\n activation = 'relu'\n\n for s in range(n_stacks):\n for i in range(len(dilations)):\n original_x = self.result\n\n # Input will have n_channels channels, output will have n_filters channels\n output = DilatedConv1D(self.result, rnd, dilations[i], 1, filter_width, n_filters, n_channels,\n apply_bias, activation)\n self.params += output.params\n # Use regularization -> L2\n self.L2 += 0.5 * tns.sum(tns.sqr(output.params[0]))\n\n output_prelu = PReLU(output.output)\n self.result = output_prelu.result\n\n # Add a residual connection from original_x to output\n output = DilatedConv1D(original_x, rnd, 1, 1, 1, n_filters, n_channels,\n apply_bias, activation)\n self.params += output.params\n # Use regularization -> L2\n self.L2 += 0.5 * tns.sum(tns.sqr(output.params[0]))\n\n original_x = output.output\n n_channels = n_filters\n\n if filter_width == 1:\n self.result += original_x[..., :]\n else:\n self.result += original_x[..., dilations[i]:]\n\n # End with a 1x1 convolution, to reduce n_channels back to n_cond\n output = DilatedConv1D(self.result, rnd, 1, 1, 1, n_cond, n_channels, apply_bias)\n self.params += output.params\n # Use regularization -> L2\n self.L2 += 0.5 * tns.sum(tns.sqr(output.params[0]))\n\n self.resultFull = output.output\n self.result = self.resultFull[..., 0:-1]\n\n\nclass DeepTimeSeriesRegressor(sklearn.base.BaseEstimator):\n def __init__(self, n_stacks=1, normalize=True, use_lag=False, learning_rate=1e-3, reg_rate=1e-3,\n train_iters=20000, random_state=1234, verbose=False):\n self._params = dict(\n n_stacks=n_stacks,\n normalize=normalize,\n use_lag=use_lag,\n learning_rate=learning_rate,\n reg_rate=reg_rate,\n train_iters=train_iters,\n random_state=random_state\n )\n self._predict_fn = None\n self._train_data_tail = None\n self._rec_field = None\n self._n_cond = None\n\n self._lag_init_values = None\n self._train_mean = None\n self._train_std = None\n self.verbose = verbose\n\n def fit(self, x, y):\n if isinstance(x, tuple):\n x, y = x\n\n if hasattr(x, 'shape'): # assume that it is `pandas df` or `np ndarray`\n x = np.asarray(x, dtype=np.float64)\n if x.ndim == 1:\n x = np.expand_dims(x, axis=1)\n\n y = np.asarray(y, dtype=np.float64)\n if y.ndim == 1:\n y = np.expand_dims(y, axis=1)\n\n data = y\n if hasattr(x, 'shape'):\n data = np.concatenate((data, x), axis=1)\n\n self._n_cond = data.shape[1]\n\n train_fn = self._build_model()\n self._train_data_tail = data[-self._rec_field:, ...]\n\n if self._params['use_lag']:\n data = self._to_lag(data)\n\n if self._params['normalize']:\n data = self._to_norm(data, store_moments=True)\n\n data = self._to_data_transform(data)\n for j in range(0, self._params['train_iters']):\n cost = train_fn(data)\n if self.verbose and j % 1000 == 0:\n print(j, cost)\n\n return self\n\n def predict(self, x=None):\n one_step_ahead = True if x is None else False\n y, cut = None, True\n if isinstance(x, tuple):\n x, y, cut = x\n iterative_predict = True if y is None else False\n\n if iterative_predict:\n if one_step_ahead:\n data = self._train_data_tail\n n_test = 1\n else:\n x, y = np.asarray(x, dtype=np.float64), np.zeros(shape=(x.shape[0], 1), dtype=np.float64)\n if x.ndim == 1:\n x = np.expand_dims(x, axis=1)\n data = np.concatenate((y, x), axis=1)\n n_test = data.shape[0]\n data = np.concatenate((self._train_data_tail, data))\n else:\n x, y = np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64)\n if y.ndim == 1:\n y = np.expand_dims(y, axis=1)\n if x.ndim == 1:\n x = np.expand_dims(x, axis=1)\n data = np.concatenate((y, x), axis=1)\n n_test = data.shape[0]\n data = np.concatenate((self._train_data_tail, data))\n\n if self._params['use_lag']:\n data = self._to_lag(data)\n\n if self._params['normalize']:\n data = self._to_norm(data)\n\n data = self._to_data_transform(data, use_train_data_tail=iterative_predict)\n\n if iterative_predict:\n if one_step_ahead:\n predicts = self._predict_fn(data)\n else:\n predicts = self._iterative_predict(data, n_test)\n else:\n predicts = self._predict_fn(data)[..., self._train_data_tail.shape[0]:]\n if cut:\n predicts = predicts[..., :-1]\n\n predicts = self._from_data_transform(predicts)\n\n if self._params['normalize']:\n predicts = self._from_norm(predicts)\n\n if self._params['use_lag']:\n predicts = self._from_lag(predicts)\n\n if predicts.ndim == 2:\n predicts = predicts[:, 0]\n elif predicts.ndim > 2:\n raise ValueError(\"Dimension ERROR, predictions should have 1D or 2D shape\")\n\n if iterative_predict:\n if one_step_ahead:\n predicts = predicts[-1:]\n\n return np.asarray(predicts)\n\n def _iterative_predict(self, data, n_test):\n to_insert = np.zeros(shape=n_test, dtype=np.float64)\n\n base_idx = [0] * (data.ndim - 1)\n value_idx = tuple(base_idx + [-1]) # (0, 0, 0, -1)\n\n train_data = data[..., :data.shape[-1] - n_test]\n output = self._predict_fn(train_data)\n to_insert[0] = output[value_idx]\n\n for t in range(n_test - 1, 0, -1):\n test_data = data[..., :data.shape[-1] - t]\n\n for idx in range(n_test - t):\n # 0, 1, 2 (range=3) -> -3, -2, -1\n idx_to_insert = train_data.shape[-1] + idx\n # print('idx_to_insert', idx_to_insert)\n # idx_to_insert = -(n_test - t + idx)\n test_data[tuple(base_idx + [idx_to_insert])] = to_insert[idx]\n\n test_data = np.append(np.zeros(shape=(data.shape[:-1] + (self._rec_field,)), dtype=np.float64),\n test_data, axis=-1)\n output = self._predict_fn(test_data[..., :])\n\n to_insert[n_test - t] = output[value_idx]\n\n return output\n\n def get_params(self, deep=False):\n return self._params\n\n @staticmethod\n def _compute_receptive_field(n_stacks, dilation, filter_width):\n \"\"\" Helper function to compute receptive field \"\"\"\n if filter_width > 1:\n receptive_field = n_stacks * (dilation * filter_width) - (n_stacks - 1)\n else:\n receptive_field = 1\n return receptive_field\n\n def _to_data_transform(self, data, use_train_data_tail=False):\n n_samples = data.shape[0]\n data = np.swapaxes(data, 0, 1)\n data = data.reshape(1, self._n_cond, 1, n_samples)\n\n if use_train_data_tail:\n return data\n return np.append(np.zeros(shape=(data.shape[:-1] + (self._rec_field,)), dtype=np.float64), data, axis=-1)\n\n def _from_data_transform(self, data):\n data = np.reshape(data, (self._n_cond, data.shape[-1]))\n return np.swapaxes(data, 0, 1)\n\n def _to_norm(self, data, store_moments=False):\n if store_moments:\n self._train_mean = np.mean(data)\n self._train_std = np.std(data)\n if self._train_std < 1e-8:\n self._train_std = 1e-8\n return (data - self._train_mean) / self._train_std\n\n def _from_norm(self, data):\n if self._train_std is None:\n raise Exception(\"Mean & std wasn't stored at training stage!\")\n return data * self._train_std + self._train_mean\n\n def _to_lag(self, data):\n out_data = np.zeros_like(data, dtype=np.float64)\n self._lag_init_values = data[0, ...]\n\n for row in range(1, data.shape[0]):\n prev_values = data[row - 1, ...]\n denominator = np.copy(prev_values)\n denominator[np.abs(denominator) < 1e-8] = 1.\n out_data[row, ...] = (data[row, ...] - prev_values) / denominator\n\n return out_data\n\n def _from_lag(self, data):\n out_data = np.zeros_like(data, dtype=np.float64)\n out_data[0, ...] = self._lag_init_values\n\n for row in range(1, data.shape[0]):\n out_data[row, ...] = (data[row, ...] + 1.) * out_data[row - 1, ...]\n\n return out_data\n\n def _build_model(self):\n # Set intermediate parameters\n rnd = np.random.RandomState(self._params['random_state'])\n n_stacks = self._params['n_stacks']\n reg_rate = self._params['reg_rate']\n lr = self._params['learning_rate']\n n_filters = n_channels = n_cond = self._n_cond\n dilations = [1, 2, 4]\n filter_width = 2\n\n # Define receptive filed to correct the input from the left\n self._rec_field = self._compute_receptive_field(n_stacks, dilations[-1], filter_width)\n\n # Define inputs and the model\n input_x = tns.tensor4('input', dtype='float64')\n model = WaveNetCond(input_x, rnd, n_cond, n_stacks, dilations, n_filters, filter_width, n_channels)\n\n # Define cost function and updates procedure\n cost = tns.sum(tns.abs_(input_x[..., self._rec_field:] - model.result)) + reg_rate * model.L2\n grads = tns.grad(cost, model.params)\n updates = adam(grads, model.params, learning_rate=lr)\n\n # Define the test and train functions\n train_fn = th.function(\n [input_x],\n cost,\n updates=updates,\n on_unused_input='warn'\n )\n\n self._predict_fn = th.function(\n [input_x],\n model.resultFull,\n # updates=updates,\n on_unused_input='warn'\n )\n return train_fn\n","sub_path":"auger_ml/algorithms/ts_dnn.py","file_name":"ts_dnn.py","file_ext":"py","file_size_in_byte":14651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"203982679","text":"for letter in 'Python':\n print('Current letter is: ', letter)\n\nfor veg in ['celery', 'mushroom', 'okra']:\n print('Current vegetable is: ', veg)\n\n#looping tuples!!\ntuplist = [(1, 2), (3, 4), (5, 6)]\nfor (a, b) in tuplist:\n print(a, b)\n\n\nimport os #for administrative automation!!\nfor k, v in os.environ.items():\n print(f\"{k} = {v}\")\n","sub_path":"pythoncbtnuggets/ex8-control-flowp2-1.py","file_name":"ex8-control-flowp2-1.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"289981215","text":"from math import sqrt\nfrom PIL import Image\n\nNUM_SLICES = 151\t\t\t\t#number of slices\nSAMPLE_FREQ = 10\t\t\t\t#frequency at which to output image\nsliceRange = range(NUM_SLICES)\t#frenquently used range throughtout this program\n\n#driver method for this program\ndef main():\n\n\t#get a 3D array of pixels from the different slices\n\tslices = [ [ line.split(' ') for line in open( \"./hw1/slices\" + str(i+1) ).read().splitlines() ] for i in sliceRange ]\n\t\n\t#initialize order list which will maintain the order of slices as it is changed throughout this program\n\torder = [ i for i in sliceRange ]\n\n\t#compute the disagreement matrix. \n\t#This matrix is computed via the two-norm of adjacent pixel vectors for every possible combination of slices.\n\tD = computeD( slices )\n\n\t#initialize hill climbing algorithm to rearrange slices\n\thillClimb( D, order, slices )\n\n#hill climbing algorith to rearrange slices\ndef hillClimb( D, order, slices ):\n\n\t#counters\n\tcounter = 0\n\timgCounter = 0\n\n\t#output an image of how the data looks before algorithm\n\ttoImage( order, slices, imgCounter )\n\n\t#initialize algorithm parameters\n\tminDisagreement = computeOrderDisagreement( D, order )\n\tbestMin = minDisagreement\n\tminOrder = order[:]\n\n\tdone = False\n\twhile not done:\n\n\t\t#determine the neighbor state which minimizes disagreement\n\t\t#a neighbor is considered as any state in which one slice is placed at a different location than the current\n\t\tminOrder, minDisagreement = minNeighbor( D, order, minDisagreement, minOrder )\n\n\t\t#ensure progress is being made\n\t\tif bestMin - minDisagreement > 0:\n\n\t\t\t#store new minima\n\t\t\tbestMin = minDisagreement\n\t\t\torder = minOrder[:]\n\n\t\t\t#if counter reaches 10 output image\n\t\t\tcounter = counter + 1\n\t\t\tif counter == SAMPLE_FREQ:\n\t\t\t\timgCounter = imgCounter + 1\n\t\t\t\ttoImage( order, slices, imgCounter )\n\t\t\t\tcounter = 0\n\n\t\t#is no progress was made, output image and end\n\t\telse:\n\t\t\timgCounter = imgCounter + 1\n\t\t\ttoImage( order, slices, imgCounter )\n\t\t\tdone = True\n\n#determines the neighbor state which minimizes disagreement\n#a neighbor is considered as any state in which one slice is placed at a different location than the current\ndef minNeighbor ( D, order, lastMinDisagreement, lastMinOrder ):\n\n\t#initializations\n\tminDisagreement = lastMinDisagreement\n\tminOrder = lastMinOrder[:]\n\n\t#l will be the size of the block of slices to be moved\n\t#note that the largest block which will effectively achieve a change is half the range\n\tfor l in range( 1, NUM_SLICES ):\n\n\t\t#look for the rearrangement of a slice the minimizes disagreement\n\t\t#i will be the index to be repositioned\n\t\t#j will be the index to which i is being repositioned\n\t\tfor i in range( NUM_SLICES - l + 1 ):\n\t\t\tfor j in sliceRange:\n\t\t\t\tif i == j:\t\t\t\t\t#no change will occur\n\t\t\t\t\tcontinue\n\t\t\t\tif j >= i and j < l + i:\t#filter out j's that will lead to improper indexes\n\t\t\t\t\tcontinue\n\n\t\t\t\t#temp variable\n\t\t\t\tpossibleOrder = order[:]\n\n\t\t\t\t#perform the shift of the slice (or group of slices)\n\t\t\t\tif j > i:\n\t\t\t\t\tfor k in range(i,j-l,1):\n\t\t\t\t\t\tpossibleOrder[k], possibleOrder[k+l] = possibleOrder[k+l], possibleOrder[k]\n\t\t\t\telif j < i:\n\t\t\t\t\tfor k in range(i,j+l,-1):\n\t\t\t\t\t\tpossibleOrder[k-l], possibleOrder[k] = possibleOrder[k], possibleOrder[k-l]\n\n\t\t\t\t#determine disagreement of possible order\n\t\t\t\torderDisagreement = computeOrderDisagreement( D, possibleOrder )\n\n\t\t\t\t#update parameters if necessary\n\t\t\t\tif minDisagreement - orderDisagreement > 0:\n\t\t\t\t\tminDisagreement = orderDisagreement\n\t\t\t\t\tminOrder = possibleOrder[:]\n\n\treturn minOrder, minDisagreement\n\n#compute pairwise disagreement of a particular ordering of slices\ndef computeOrderDisagreement( D, order ):\n\n\torderDisagreement = 0\n\tfor i in range(NUM_SLICES - 1):\n\t\torderDisagreement = orderDisagreement + D[ order[i] ][ order[i+1] ]\n\treturn orderDisagreement\n\n#compute disagreement matrix of any possible pair of slices\ndef computeD( slices ):\n\n\tD = [[0.0 for col in sliceRange] for row in sliceRange]\n\tfor i in sliceRange:\n\t\tfor j in sliceRange:\n\t\t\tD[i][j] = computeDisagreement( slices[i], slices[j] )\n\treturn D\n\n#compute disagreement of slices by computing the two-norm of adjacent pixel vectors\n#slice1 is assumed to be on the left of slice2\ndef computeDisagreement( slice1, slice2 ):\n\t\n\tdisagreement = 0.0\n\theight = len( slice1 )\n\twidth = len( slice1[1] )\n\n\t#sum up the square differences between pixels\n\tfor i in range( height ):\n\t\tdiff = float( slice1[i][width-1] ) - float( slice2[i][1] )\n\t\tdisagreement = disagreement + pow(diff, 2.0)\n\n\t#compute two-norm and return\n\tdisagreement = sqrt( disagreement )\n\treturn disagreement\n\n#output slice order into an image\ndef toImage( order, slices, imgCounter ):\n\n\theight = len( slices[0] )\n\twidth = len( slices[0][0] ) - 1\n\n\t#new grayscale image\n\timg = Image.new( \"L\", (width * NUM_SLICES, height) )\n\tpixels = img.load()\n\n\t#loop through slices in correct order (note first index of slices is given by order[k]) and write pixel values\n\tfor k in sliceRange:\n\t\tfor i in range(height):\n\t\t\tfor j in range(width):\n\t\t\t\tpixels[j + k * width, i] = float( slices[order[k]][i][j+1] )\n\n\t#output image\n\timg.save( \"./snapshot_extra_credit\" + str(imgCounter) + \".jpg\", \"JPEG\" )\n\nmain()","sub_path":"optimization_extra/optimization_extra.py","file_name":"optimization_extra.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"654213599","text":"import sys\nsys.path.insert(0, '..')\n\nfrom flask import Flask, request, abort, jsonify\nfrom sqlalchemy.sql.expression import func\nfrom flask_cors import CORS\nimport json\n\nfrom models import setup_db, Question, Category\n\nQUESTIONS_PER_PAGE = 10\n\ndef create_app(env='PROD'):\n # create and configure the app\n app = Flask(__name__)\n setup_db(app, env)\n\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type, Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET, POST, DELETE, OPTIONS')\n return response\n\n @app.route('/categories', methods=['GET'])\n def get_categories():\n categories = Category.query.order_by(Category.id).all()\n\n return jsonify({\n 'success': True,\n 'categories': {category.id: category.type for category in categories}\n })\n\n @app.route('/questions', methods=['GET'])\n def get_questions():\n questions = Question.query.order_by(Question.id).all()\n current_questions = paginate_questions(request, questions)\n categories = Category.query.order_by(Category.id).all()\n\n if len(current_questions) == 0:\n abort(404)\n\n return jsonify({\n 'success': True,\n 'questions': current_questions,\n 'total_questions': len(questions),\n 'current_category': None,\n 'categories': {category.id: category.type for category in categories}\n })\n\n def paginate_questions(request, questions):\n page = request.args.get('page', 1, type=int)\n start = (page - 1) * QUESTIONS_PER_PAGE\n end = start + QUESTIONS_PER_PAGE\n\n questions = [question.format() for question in questions]\n current_questions = questions[start:end]\n\n return current_questions\n\n @app.route('/questions/', methods=['DELETE'])\n def delete_question(id):\n try:\n question = Question.query.filter(Question.id == id).one_or_none()\n\n if question is None:\n abort(404)\n\n question.delete()\n\n return jsonify({\n 'success': True,\n 'deleted': id,\n 'total_questions': len(Question.query.all())\n })\n\n except Exception as e:\n if e.code == 404:\n abort(404)\n else:\n abort(422)\n\n @app.route('/questions', methods=['POST'])\n def create_question():\n body = request.get_json()\n\n new_question = body.get('question', None)\n new_answer = body.get('answer', None)\n new_difficulty = body.get('difficulty', None)\n new_category = body.get('category', None)\n\n try:\n question = Question(question=new_question, answer=new_answer, difficulty=new_difficulty, category=new_category)\n question.insert()\n\n return jsonify({\n 'success': True,\n 'created': question.id,\n 'total_questions': len(Question.query.all())\n })\n\n except:\n abort(422)\n\n @app.route('/questions/search', methods=['POST'])\n def get_questions_by_search_term():\n body = request.get_json()\n search_term = body.get('searchTerm', None)\n\n questions = Question.query.filter(Question.question.ilike('%{}%'.format(search_term.lower()))).all()\n\n return jsonify({\n 'success': True,\n 'questions': [question.format() for question in questions],\n 'total_questions': len(questions),\n 'current_category': None\n })\n\n @app.route('/categories//questions', methods=['GET'])\n def get_questions_by_category(id):\n questions = Question.query.filter(Question.category == id).all()\n current_questions = paginate_questions(request, questions)\n\n return jsonify({\n 'success': True,\n 'questions': current_questions,\n 'total_questions': len(questions),\n 'current_category': id\n })\n\n @app.route('/quizzes', methods=['POST'])\n def get_quizz_question():\n body = request.get_json()\n\n previous_questions = body.get('previous_questions', None)\n quiz_category = body.get('quiz_category', None)\n\n if quiz_category.get('id') == 0:\n questions = Question.query.filter(Question.id.notin_(previous_questions)).order_by(func.random()).limit(1).all()\n else:\n questions = Question.query.filter(Question.category == quiz_category.get('id'), Question.id.notin_(previous_questions)).order_by(func.random()).limit(1).all()\n questions = [question.format() for question in questions]\n\n return jsonify({\n 'success': True,\n 'question': questions[0] if len(questions) > 0 else None\n })\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"Not found\"\n }), 404\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n @app.errorhandler(405)\n def not_allowed(error):\n return jsonify({\n \"success\": False,\n \"error\": 405,\n \"message\": \"method not allowed\"\n }), 405\n\n @app.errorhandler(500)\n def internal_error(error):\n return jsonify({\n \"success\": False,\n \"error\": 500,\n \"message\": \"Internal Server Error\"\n }), 500\n\n return app\n\nif __name__ == '__main__':\n app = create_app(env='DEV')\n app.run(use_reloader=False)\n \n","sub_path":"projects/02_trivia_api/completed_code/backend/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"61524573","text":"# 1. import Flask and PyMongo\nfrom flask import Flask, render_template\nfrom flask_pymongo import PyMongo\nimport scrape_mars2\n\n# 2. Create an app, being sure to pass __name__\napp = Flask(__name__)\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/mars_app\"\nmongo = PyMongo(app)\n\n@app.route(\"/\")\ndef index():\n mars = mongo.db.mars.find_one()\n return render_template(\"index.html\", mars=mars)\n\n\n# image = scrape_mars.function-name()\n# return image = image\n\n\n@app.route(\"/scrape\")\ndef scrape():\n mars = mongo.db.mars\n mars_data = scrape_mars2.scrape_all()\n mars.update({},mars_data, upsert=True)\n return \"Done\"\n\nif __name__ == \"__main__\":\n app.run()\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"592319343","text":"#-*- coding: utf-8 -*-\n#Created by Hans on 16-9-1\n\nimport os\n\nfrom tornado.options import options,define,parse_command_line,parse_config_file\nfrom scheduler import Scheduler\n\ndefine('hosts',default='127.0.0.1:2181',type=str,help='zookeeper hosts')\ndefine('root',default='/scheduler',type=str,help='zookeeper root node')\n\nif __name__ == '__main__':\n if os.path.exists('/etc/scheduler/scheduler.conf'):\n parse_config_file('/etc/scheduler/scheduler.conf')\n if os.path.exists('./scheduler.conf'):\n parse_config_file('./scheduler.conf')\n\n parse_command_line()\n scheduler = Scheduler(hosts=options.hosts,root=options.root)\n scheduler.start()\n try:\n scheduler.join()\n except KeyboardInterrupt:\n scheduler.shutdown()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245322823","text":"# Process simind listmode or projection file into ARF table in an OOP style\n# Faster way to compute the index\n# Process one photon at a time to reduce memory complexity\n# Python3\n# Jie (Laurie) Zhang\n# 04/16/15\n# e.g. python ARF_v4.py input_name output_name [lower_energy upper_energy]/[size1 size2]\nimport sys\nimport cProfile\nimport csv\nimport struct\nimport re\nfrom math import sqrt, atan, degrees, pi, floor\nimport numpy as np\n\nclass PhotonListMode(object):\n \"\"\"Listmode photon: 'X0','Y0','Z0','XPHANT','YPHANT','ZPHANT','XCRYSTAL','YCRYSTAL','ZCRYSTAL','Energy in Crystal','Photon Weight','Scatter Order'\"\"\"\n\n def __init__(self, locations, energy, weight, scatter):\n # self.X0 = locations[0]\n # self.Y0 = locations[1]\n # self.Z0 = locations[2]\n\n # self.Xp = locations[3]\n # self.Yp = locations[4]\n # self.Zp = locations[5]\n\n # self.Xc = locations[6]\n # self.Yc = locations[7]\n # self.Zc = locations[8]\n\n self.energy = energy\n self.weight = weight\n self.scatter = scatter\n\n self.X_vec = locations[6]-locations[0]\n self.Y_vec = locations[7]-locations[1]\n self.Z_vec = locations[8]-locations[2]\n\n def mod(self):\n travel_dist = sqrt(self.X_vec**2 + self.Y_vec**2 + self.Z_vec**2)\n return travel_dist\n\n def quadrant(self):\n quadrant = 0\n if (self.X_vec > 0 and self.Y_vec >= 0):\n quadrant = 1\n elif (self.X_vec <= 0 and self.Y_vec > 0):\n quadrant = 2\n elif (self.X_vec < 0 and self.Y_vec <= 0):\n quadrant = 3\n elif (self.X_vec >= 0 and self.Y_vec < 0):\n quadrant = 4\n return quadrant\n \n def cos_theta(self):\n cos_theta = self.Z_vec/self.mod()\n return cos_theta\n\n def tan_phi(self):\n tan_phi = None\n try:\n tan_phi = self.Y_vec/self.X_vec\n except ZeroDivisionError:\n tan_phi = float(\"inf\")\n return tan_phi\n\n def cot_phi(self):\n cot_phi = None\n try:\n cot_phi = self.X_vec/self.Y_vec\n except ZeroDivisionError:\n cot_phi = float(\"inf\")\n return cot_phi\n\nclass PhotonBinMode(object):\n \"\"\"Binned photon: 'Photon Weight', (XCRYSTAL_IND, YCRYSTAL_IND)\n \"\"\"\n def __init__(self, weight, multi_index, size1, size2):\n self.weight = weight\n\n self.X_vec = (multi_index[0] - float(size1/2)-0.5) * 0.540\n self.Y_vec = - (multi_index[1] - float(size2/2)-0.5) * 0.540\n self.Z_vec = 200 # cm\n\n def mod(self):\n travel_dist = sqrt(self.X_vec**2 + self.Y_vec**2 + self.Z_vec**2)\n return travel_dist\n\n def quadrant(self):\n quadrant = 0\n if (self.X_vec > 0 and self.Y_vec >= 0):\n quadrant = 1\n elif (self.X_vec <= 0 and self.Y_vec > 0):\n quadrant = 2\n elif (self.X_vec < 0 and self.Y_vec <= 0):\n quadrant = 3\n elif (self.X_vec >= 0 and self.Y_vec < 0):\n quadrant = 4\n return quadrant\n \n def cos_theta(self):\n cos_theta = self.Z_vec/self.mod()\n return cos_theta\n\n def tan_phi(self):\n tan_phi = None\n try:\n tan_phi = self.Y_vec/self.X_vec\n except ZeroDivisionError:\n tan_phi = float(\"inf\")*np.sign(self.Y_vec)\n return tan_phi\n\n def cot_phi(self):\n cot_phi = None\n try:\n cot_phi = self.X_vec/self.Y_vec\n except ZeroDivisionError:\n cot_phi = float(\"inf\")*np.sign(self.X_vec)\n return cot_phi\n\ndef read_listmode(filename, lower_energy, upper_energy, table):\n \"\"\"Read listmode data: 10 int16, 1 float, 1 interger*1\"\"\"\n with open(filename, \"rb\") as openfile:\n positions = openfile.read(2*9)\n while positions != \"\":\n try:\n locations = struct.unpack('h'*9, positions)\n energy = float(struct.unpack('h', openfile.read(2))[0])/10\n weight = float(struct.unpack('d', openfile.read(8))[0])\n scatter = int(struct.unpack('b', openfile.read(1))[0])\n\n # 20% energy window\n if float(lower_energy) <= energy <= float(upper_energy):\n photon = PhotonListMode(locations, energy, weight, scatter)\n index = ARF_table(photon)\n table[index[0], index[1]] += photon.weight\n\n positions = openfile.read(2*9)\n except:\n break\n return table\n\ndef read_projection(filename, size1, size2, table):\n \"\"\"Read projection data: 4 byte floats\"\"\"\n matrix = np.fromfile(filename, dtype = 'f4').reshape(size1, size2)\n it = np.nditer(matrix, flags=['multi_index'])\n while not it.finished:\n # print(\"%f <%s>\" % (it[0], it.multi_index),)\n photon = PhotonBinMode(it[0], it.multi_index, size1, size2)\n index = ARF_table(photon)\n table[index[0], index[1]] += photon.weight\n\n it.iternext()\n return table\n\ndef ARF_table(photon):\n \"\"\"Find the correct position in the table\"\"\"\n theta_ind = phi_ind = None\n quadrant = photon.quadrant()\n cos_theta = photon.cos_theta()\n tan_phi = photon.tan_phi()\n cot_phi = photon.cot_phi()\n\n if quadrant == 0:\n theta_ind = phi_ind = 0\n else:\n if 1 >= cos_theta >= 0.99:\n theta_ind = floor(1023*(cos_theta-1)/(0.99-1))\n elif 0.99 >= cos_theta >= 0.95:\n theta_ind = floor(512*(cos_theta-0.99)/(0.95-0.99)) + 1023\n elif 0.95 >= cos_theta >= 0.75:\n theta_ind = floor(256*(cos_theta-0.95)/(0.75-0.95)) + 512 + 1023\n elif 0.75 >= cos_theta >= 0:\n theta_ind = floor(256*(cos_theta-0.75)/(-0.75)) + 256 + 512 + 1023\n\n if abs(tan_phi) <= 1:\n if quadrant == 1:\n phi_ind = floor(255*tan_phi)\n elif quadrant == 2:\n phi_ind = 768 + floor(255*(tan_phi+1))\n elif quadrant == 3:\n phi_ind = 1024 + floor(255*tan_phi)\n elif quadrant == 4:\n phi_ind = 1792 + floor(255*(tan_phi+1))\n elif abs(cot_phi) <= 1:\n if quadrant == 1:\n phi_ind = 256 + floor(-255*(cot_phi-1))\n elif quadrant == 2:\n phi_ind = 512 + floor(-255*cot_phi)\n elif quadrant == 3:\n phi_ind = 1280 + floor(-255*(cot_phi-1))\n elif quadrant == 4:\n phi_ind = 1536 + floor(-255*cot_phi)\n\n if theta_ind == None or phi_ind == None:\n print(quadrant, cos_theta, tan_phi, cot_phi, theta_ind, phi_ind)\n\n return (theta_ind, phi_ind)\n\ndef normalize_table(table):\n solid_angles = np.zeros((2048, 512*4))\n delta_phi = np.zeros(2048)\n\n cos_list = np.concatenate([np.linspace(1., 0.99, 1025), np.linspace(0.99, 0.95, 1535-1024+2)[1:], np.linspace(0.95, 0.75, 1791-1536+2)[1:], np.linspace(0.75, 0., 2047-1792+2)[1:]]) \n tan_list13 = np.linspace(0., 1., 257)\n cot_list13 = np.linspace(1., 0., 257)\n tan_list24 = np.linspace(-1., 0., 257)\n cot_list24 = np.linspace(0., -1., 257)\n\n for ii in range(len(delta_phi)):\n if ii <= 255:\n delta_phi[ii] = delta_phi[ii+1024] = degrees(abs(atan(tan_list13[ii+1])-atan(tan_list13[ii])))\n elif 255 < ii <= 511:\n if cot_list13[ii%256+1] != 0:\n delta_phi[ii] = delta_phi[ii+1024] = degrees(abs(atan(1/cot_list13[ii%256+1])-atan(1/cot_list13[ii%256])))\n else:\n delta_phi[ii] = delta_phi[ii+1024] = 90 - degrees(atan(1/cot_list13[ii%256]))\n elif 511 < ii <= 767:\n if cot_list24[ii%256] != 0:\n delta_phi[ii] = delta_phi[ii+1024] = degrees(abs(atan(1/cot_list24[ii%256+1])-atan(1/cot_list24[ii%256])))\n else:\n delta_phi[ii] = delta_phi[ii+1024] = degrees(abs(atan(1/cot_list24[ii%256+1]))) - 90\n elif 767 < ii <= 1023:\n delta_phi[ii] = delta_phi[ii+1024] = degrees(abs(atan(tan_list24[ii%256+1])-atan(tan_list24[ii%256])))\n elif ii >= 1024:\n break\n\n for ii in range(2048):\n solid_angles[ii] = abs(cos_list[ii+1]-cos_list[ii]) * delta_phi\n\n table = 4*pi*table/(0.8910*1e6*solid_angles)\n # table = 4*pi*table/(1e6*solid_angles)\n return abs(table) # eliminate -0.0 entries\n\ndef main():\n \"\"\"Bin the photons into a 2048*2048 matrix according to cos_theta and tan_phi/cot_phi. Then normalize the table\n \"\"\"\n # check the mode of the input file\n input_file = sys.argv[1]\n if re.search('lmf', input_file):\n table = np.zeros((2048, 512*4))\n table = read_listmode(sys.argv[1], sys.argv[3], sys.argv[4], table)\n \n table = normalize_table(table)\n np.savetxt(sys.argv[2]+'.txt',table,fmt='%.5f')\n\n elif re.search('bim', input_file):\n size1 = int(sys.argv[3])\n size2 = int(sys.argv[4])\n table = np.zeros((2048, 512*4))\n table = read_projection(sys.argv[1], size1, size2, table)\n \n table = normalize_table(table)\n np.savetxt(sys.argv[2]+'.txt',table,fmt='%.5f')\n \nif __name__ == \"__main__\":\n cProfile.run('main()',sys.argv[2]+'.log')\n # main()","sub_path":"ARF_v4.py","file_name":"ARF_v4.py","file_ext":"py","file_size_in_byte":9188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"644104817","text":"from SQL_Phrase import SQL_Phrase\r\nfrom DataBaseConnect import DataBaseConnect\r\nimport pyodbc\r\nimport datetime\r\n\r\ndef UpdateOperation(TblDef, TblName):\r\n s = SQL_Phrase(TblName)\r\n sql_value = s.SQLPramatersForUpdateValue(TblDef)\r\n sql_primary_key = s.FindPrimaryKeyField(TblDef)\r\n \r\n sql = \"UPDATE \" + TblName + \" SET \" + sql_value + \" WHERE \" + sql_primary_key + \" = @\" + sql_primary_key\r\n decription = \"Insert Operation for Table: \" + TblName\r\n sp_par = s.SpParamatersWithPKey(TblDef)\r\n spName = \"usp_UpdateTBL\" + TblName\r\n \r\n s.CreateSP(decription, spName, sp_par, sql)\r\n del s\r\n return\r\n\r\ndef ReadOperation(TblName):\r\n s = SQL_Phrase(TblName)\r\n sql = \"Select * from \" + TblName\r\n decription = \"Read Operation for Table: \" + TblName\r\n spName = \"usp_ReadTBL\" + TblName\r\n s.CreateSP(decription, spName, '', sql)\r\n del s\r\n return\r\n\r\ndef CreateOperation(TblDef, TblName):\r\n s = SQL_Phrase(TblName)\r\n sql_parm = s.PramatersForSQL(TblDef)\r\n sql_parm = \"(\" + sql_parm + \")\"\r\n sql_value = s.SQLPramatersForValue(TblDef)\r\n sql_value = \"(\" + sql_value + \")\"\r\n\r\n sql = \"INSERT INTO \" + TblName + \" \" + sql_parm + \" VALUES \" + sql_value\r\n sp_par = s.PramatersForSP(TblDef)\r\n decription = \"Insert Operation for Table: \" + TblName\r\n spName = \"usp_InsertTBL\" + TblName\r\n s.CreateSP(decription, spName, sp_par, sql)\r\n del s\r\n return\r\n\r\ndef DeleteOperation(TblDef, TblName):\r\n s = SQL_Phrase(TblName)\r\n sql_primary_key = s.FindPrimaryKeyField(TblDef)\r\n sql = \"DELETE FROM \" + TblName + \" WHERE \" + sql_primary_key + \" = @\" + sql_primary_key\r\n decription = \"Insert Operation for Table: \" + TblName\r\n sp_par = s.SpParamatersWithPKey(TblDef)\r\n spName = \"usp_DeleteElementTBL\" + TblName\r\n s.CreateSP(decription, spName, sp_par, sql)\r\n del s\r\n return\r\n\r\ndef CRUD():\r\n x = DataBaseConnect()\r\n Table = 'PeopleTable' # Enter the name of the table\r\n TblDef = x.ReadTableColumn(Table)\r\n UpdateOperation(TblDef, Table)\r\n ReadOperation(Table)\r\n CreateOperation(TblDef, Table)\r\n DeleteOperation(TblDef, Table)\r\n del x\r\n return\r\n\r\nif __name__ == '__main__':\r\n CRUD()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38024094","text":"#!/usr/bin/env python\n\nfrom tkinter import *\nimport random\n\nclass Application(Frame):\n def __init__(self, master):\n super(Application, self).__init__(master)\n self.grid()\n self.create_widgets()\n self.random_number = None\n self.reset()\n\n def create_widgets(self):\n Label(self,\n text = \"Jaka to liczba?\"\n ).grid(row = 0, column = 0, sticky = W)\n self.number_ent = Entry(self)\n self.number_ent.grid(row = 0, column = 1 , sticky = W)\n\n Button(self, text = \"Ok\",\n command = self.update_results\n ).grid(row = 1, column = 0, sticky = W)\n Button(self, text = \"Reset\",\n command = self.reset\n ).grid(row = 1, column = 1, sticky = W)\n\n self.tries_lbl = Label(self,\n text = \"Pozostała liczba prób: \"\n ).grid(row = 2, column = 0, sticky = W)\n self.tries_left_lbl_var = StringVar()\n self.tries_left_lbl_var.set(\"7\")\n self.tries_left_lbl = Label(self, textvariable = self.tries_left_lbl_var)\n self.tries_left_lbl.grid(row = 2, column = 1, sticky = W)\n\n self.screen_txt = Text(self, width = 40, height = 6, wrap = WORD)\n self.screen_txt.grid(row = 3, column = 0, columnspan = 2)\n\n def update_results(self):\n number = 0\n if self.number_ent.get():\n number = int(self.number_ent.get())\n self.tries -= 1\n if self.tries < 0: self.tries = 0\n if self.tries > 0 and number > self.random_number:\n self.info = \"Szukana liczba jest mniejsza.\"\n elif self.tries > 0 and number < self.random_number:\n self.info = \"Szukana liczba jest większa.\"\n elif number == self.random_number and self.tries > 0:\n self.info = \"Brawo, odgadłeś szukaną liczbę.\"\n elif self.tries == 0:\n self.info = \"Niestety skończyły Ci się próby.\"\n\n self.tries_left_lbl_var.set(self.tries)\n self.screen_txt.delete(0.0, END)\n self.screen_txt.insert(0.0, self.info)\n pass\n\n def reset(self):\n self.random_number = random.randint(1,100)\n self.tries = 7\n self.tries_left_lbl_var.set(self.tries)\n self.info = \"Odgadnij liczbę z zakresu 1-100.\"\n\n self.screen_txt.delete(0.0, END)\n self.screen_txt.insert(0.0, self.info)\n\nroot = Tk()\nroot.title(\"Jaka to liczba?\")\napp = Application(root)\nroot.mainloop()\n","sub_path":"Python_dla_każdego_Podstawy_programowania_Wydanie_III/jaka_to_liczba_GUI.py","file_name":"jaka_to_liczba_GUI.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156635431","text":"import pkg_resources\n\nENTRY_POINT = 'conda.env.installers'\n\n\nclass InvalidInstaller(Exception):\n def __init__(self, name):\n msg = 'Unable to load installer for {}'.format(name)\n super(InvalidInstaller, self).__init__(msg)\n\n\ndef get_installer(name):\n for entry_point in pkg_resources.iter_entry_points(ENTRY_POINT):\n if entry_point.name == name:\n return entry_point.load()\n\n raise InvalidInstaller(name)\n","sub_path":"conda_env/installers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"70180366","text":"# Configuration file for the Sphinx documentation builder.\n# Mastering Plone documentation build configuration file\n\n\n# -- Path setup --------------------------------------------------------------\n\nfrom datetime import datetime\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath(\".\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Plone Training\"\ncopyright = \"\"\"The text and illustrations in this website are licensed\n by the Plone Foundation under a Creative Commons Attribution 4.0\n International license.\"\"\"\nauthor = \"Plone Community\"\ntrademark_name = \"Plone\"\n\nnow = datetime.now()\nyear = str(now.year)\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = year\n# The full version, including alpha/beta/rc tags.\nrelease = year\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = \"\"\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = \"%B %d, %Y\"\n\n\n# -- General configuration ----------------------------------------------------\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# Add any Sphinx extension module names here, as strings.\n# They can be extensions coming with Sphinx (named \"sphinx.ext.*\")\n# or your custom ones.\nextensions = [\n \"myst_parser\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx_copybutton\",\n \"sphinxcontrib.spelling\",\n \"sphinxext.opengraph\",\n]\n\n# For more information see:\n# https://myst-parser.readthedocs.io/en/latest/syntax/optional.html\nmyst_enable_extensions = [\n \"deflist\", # You will be able to utilise definition lists\n # https://myst-parser.readthedocs.io/en/latest/syntax/optional.html#definition-lists\n \"linkify\", # Identify “bare” web URLs and add hyperlinks.\n \"colon_fence\", # You can also use ::: delimiters to denote code fences,\\\n # instead of ```.\n]\n\n# If true, the Docutils Smart Quotes transform, originally based on SmartyPants\n# (limited to English) and currently applying to many languages, will be used\n# to convert quotes and dashes to typographically correct entities.\n# Note to maintainers: setting this to `True` will cause contractions and\n# hyphenated words to be marked as misspelled by spellchecker.\nsmartquotes=False\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = \"sphinx.pygments_styles.PyramidStyle\"\npygments_style = \"sphinx\"\n\n# Options for the linkcheck builder\n# Ignore localhost\nlinkcheck_ignore = [\n r\"http://localhost:\\d+\",\n r\"http://127.0.0.1:8080\",\n r\"http://example.com\",\n r\"https://github.com/plone/training/issues/new/choose\", # requires auth\n r\"https://www.linode.com\", # linkcheck makes a HEAD request, which is 403\n r\"https://www.virtualbox.org\", # times out often\n]\nlinkcheck_anchors = False\nlinkcheck_timeout = 10\n\n# This is our wordlist with know words, like Github or Plone ...\nspelling_word_list_filename = \"spelling_wordlist.txt\"\nspelling_ignore_pypi_package_names = True\n\n# The suffix of source filenames.\nsource_suffix = {\n \".md\": \"markdown\",\n}\n\n# The encoding of source files.\n# source_encoding = \"utf-8-sig\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\n \"spelling_wordlist.txt\",\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\n\nhtml_logo = \"_static/logo.svg\"\nhtml_favicon = \"_static/favicon.ico\"\n\nhtml_css_files = [\"custom.css\",\n (\"print.css\", {\"media\": \"print\"})]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_theme_options = {\n \"repository_url\": \"https://github.com/plone/training\",\n \"repository_branch\": \"main\",\n \"path_to_docs\": \"docs\",\n \"use_repository_button\": True,\n \"use_issues_button\": True,\n \"use_edit_page_button\": True,\n \"extra_navbar\": \"\"\"\n

\n \n \"plone.org\" plone.org\n

\"\"\",\n}\n\n\n# -- Options for myST markdown conversion to html -----------------------------\n\nmyst_enable_extensions = [\n \"deflist\",\n \"linkify\",\n \"colon_fence\"\n]\n\n\n# -- Intersphinx configuration ----------------------------------\n\n# This extension can generate automatic links to the documentation of objects\n# in other projects. Usage is simple: whenever Sphinx encounters a\n# cross-reference that has no matching target in the current documentation set,\n# it looks for targets in the documentation sets configured in\n# intersphinx_mapping. A reference like :py:class:`zipfile.ZipFile` can then\n# linkto the Python documentation for the ZipFile class, without you having to\n# specify where it is located exactly.\n#\n# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html\n#\nintersphinx_mapping = {\n \"plonedocs\": (\"https://docs.plone.org/\", None),\n \"python\": (\"https://docs.python.org/3/\", None),\n}\n\n\n# -- GraphViz configuration ----------------------------------\n\ngraphviz_output_format = \"svg\"\n\n\n# -- OpenGraph configuration ----------------------------------\n\nogp_site_url = \"https://training.plone.org/5/\"\nogp_description_length = 200\nogp_image = \"https://training.plone.org/5/_static/Plone_logo_square.png\"\nogp_site_name = \"Plone Training\"\nogp_type = \"website\"\nogp_custom_meta_tags = [\n '',\n]\n\n# -- sphinx_copybutton -----------------------\ncopybutton_prompt_text = r\"^ {0,2}\\d{1,3}\"\ncopybutton_prompt_is_regexp = True\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"55346303","text":"# -*- coding: utf-8 -*-\nimport unittest\nimport numpy as np\nfrom utils.File.array import JoTiff\n\n\nclass TestJotiff(unittest.TestCase):\n \"\"\" 对 JoTiff 做单元测试 \"\"\"\n\n def test_init(self):\n path = r'/Users/jokker/Documents/Code/Function/Data/tif/a.tif'\n a = JoTiff(path)\n\n def test_get_array(self):\n a = JoTiff(r\"/Users/jokker/Documents/Code/Function/Data/tif/a.tif\").get_array()\n self.assertTrue(isinstance(a, np.ndarray))\n\n\n\nif __name__ == \"__main__\":\n\n unittest.main()\n\n","sub_path":"utils/utils_testing/file_array_Jotiff_test.py","file_name":"file_array_Jotiff_test.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"277512109","text":"\"\"\"\n用于tf keras训练的tf dataset pineline,需要实现生成tf record\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport os\n\n\ndef input_fn_from_tfrecord(tfrecord_filenames_list, tf_example_parser_fn, batch_size=32, map_cores=4, augmentation=False,\n aug_fn=None, debug=False, shuffle_flag=False, shuffle_buffer_size=1000, **kwargs):\n \"\"\"\n simple code pipeline for building input_fn consuming tklib records\n :param tfrecord_filenames_list: dir to a tklib record of list of which\n :param tf_example_parser_fn: fn for parse tklib sample from tklib record\n :param batch_size: batch size\n :param map_cores: num of corse for parallism\n :param augmentation: augmentation flag\n :param aug_fn: augmentation fn\n :param debug: debug flag\n :param shuffle_buffer_size:\n :param kwargs:\n :return:\n \"\"\"\n if type(tfrecord_filenames_list) is str:\n tfrecord_filenames_list = [tfrecord_filenames_list]\n # read data from tklib record\n num_parallel_reads = len(tfrecord_filenames_list)\n dataset = tf.data.TFRecordDataset(tfrecord_filenames_list, num_parallel_reads=num_parallel_reads,\n buffer_size=None)\n dataset = dataset.prefetch(buffer_size=batch_size * 2)\n # avoid shuffle to stabilize val metrics\n if shuffle_flag:\n dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)\n dataset = dataset.repeat()\n # Parse the record into tensors.\n dataset = dataset.map(tf_example_parser_fn, num_parallel_calls=map_cores)\n # augmentation\n if augmentation:\n assert aug_fn is not None, 'augmentation function must be provided if augmentation flag is true'\n dataset = dataset.map(aug_fn, num_parallel_calls=map_cores)\n dataset = dataset.batch(batch_size)\n if debug:\n iterator = dataset.make_one_shot_iterator()\n dataset = iterator.get_next()\n return dataset\n\n\nclass InputFnFromTFRecords:\n def __init__(self, tfrecords, parser_fn, batch_size=32, val_num=None, **kwargs):\n self.tfrecords = [tfrecords] if type(tfrecords) is str else tfrecords\n self.parser_fn = parser_fn\n self.kwargs = kwargs\n self._read_tfrecords(**kwargs)\n\n def _split_train_val(self, val_num=1000):\n self._val_dataset_raw = self._dataset_raw.take(val_num*len(self.tfrecords))\n self._train_dataset_raw = self._dataset_raw.skip(val_num * len(self.tfrecords))\n\n def _read_tfrecords(self, num_parallel_reads=None, buffer_size=None, **kwargs):\n # read data from tklib record\n num_parallel_reads = num_parallel_reads or len(self.tfrecords)\n self._dataset_raw = tf.data.TFRecordDataset(self.tfrecords, num_parallel_reads=num_parallel_reads,\n buffer_size=buffer_size)\n\n def _dataset_iteration_initialization(self):\n dataset = dataset.prefetch(buffer_size=batch_size * 2)\n # avoid shuffle to stabilize val metrics\n # dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)\n dataset = dataset.repeat()\n # Parse the record into tensors.\n dataset = dataset.map(parser_fn, num_parallel_calls=map_cores)\n # augmentation\n if augmentation:\n assert aug_fn is not None, 'augmentation function must be provided if augmentation flag is true'\n dataset = dataset.map(aug_fn, num_parallel_calls=map_cores)\n dataset = dataset.batch(batch_size)\n if debug:\n iterator = dataset.make_one_shot_iterator()\n dataset = iterator.get_next()\n return dataset\n\n def debug(self):\n pass\n\ndef test_single_frame():\n \"\"\"\n 检查单帧的tfrecord是否正常\n :return:\n \"\"\"\n tf_record_path = [r'F:\\heshuai\\proj\\matting_tf\\dataset_utils\\generators\\sp_ful.record',\n r'F:\\heshuai\\proj\\matting_tf\\dataset_utils\\generators\\alpha_unique_id.record',\n r'F:\\heshuai\\proj\\matting_tf\\dataset_utils\\generators\\coco_ful_filter.record',\n r'F:\\heshuai\\proj\\matting_tf\\dataset_utils\\generators\\manual_ful.record',\n r'F:\\heshuai\\proj\\matting_tf\\dataset_utils\\generators\\matting-coarse_ful.record'\n ]\n weights = [1] * len(tf_record_path)\n iterator_train, iterator_val = input_fn_from_tfrecord_binary_test(tf_record_path, batch_size=16, augmentation=1, val_split=True, shape=320, map_cores=4, debug=1)\n idx = 0\n max_num = 1000\n sess = tf.Session()\n while True:\n time_pre = time.time()\n x, y = sess.run(iterator_train)\n print('new batch')\n print(time.time() - time_pre)\n for item_x, item_y in zip(x, y):\n print(idx)\n item_x += 1\n item_x *= 127.5\n item_x = np.uint8(item_x)\n plt.subplot(121)\n plt.imshow(np.squeeze(np.squeeze(item_y[:, :])), 'gray')\n plt.subplot(122)\n plt.imshow(np.squeeze(item_x))\n plt.show()\n # item_x += 1\n # item_x *= 127.5\n # item_y = np.squeeze(item_y)\n # merge = apply_mask(item_x, item_y)\n saved_dir = os.path.join(r'F:\\test', str(idx)+'.png')\n plt.savefig(saved_dir)\n # io.imsave(saved_dir, merge)\n if idx > max_num:\n exit(0)\n idx += 1\n\n\ndef parse_single_frame_tf_record(tf_records):\n \"\"\"\n parse tklib records\n :param tf_records:\n :return:\n usage sample:\n test_dir = r'F:\\heshuai\\proj\\matting_tf\\dataset_utils\\generators\\alpha_train_256_0812.record'\n parse_single_frame_tf_record(test_dir)\n \"\"\"\n def _tf_example_parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'image': tf.FixedLenFeature([], tf.string),\n 'mask': tf.FixedLenFeature([], tf.string)\n })\n image = tf.image.decode_image(features['image'])\n annotation = tf.image.decode_image(features['mask'])\n height = tf.cast(features['height'], tf.int32)\n width = tf.cast(features['width'], tf.int32)\n image_shape = [height, width, 3]\n annotation_shape = [height, width, 1]\n image = tf.reshape(image, image_shape)\n mask = tf.reshape(annotation, annotation_shape)\n # cast dtype\n image = tf.cast(image, dtype=tf.float32)\n mask = tf.cast(mask, dtype=(tf.float32))\n return image, mask\n dataset = tf.data.TFRecordDataset(tf_records)\n # Parse the record into tensors.\n dataset = dataset.map(_tf_example_parser, num_parallel_calls=4)\n iterator = dataset.make_one_shot_iterator()\n data = iterator.get_next()\n with tf.Session() as sess:\n while True:\n img, mask = sess.run(data)\n img = np.uint8(np.squeeze(img))\n mask = np.uint8(np.squeeze(mask))\n plt.subplot(121)\n plt.imshow(img)\n plt.subplot(122)\n plt.imshow(mask, 'gray')\n plt.show()\n\n\nif __name__ == '__main__':\n test_single_frame()\n\n\n","sub_path":"tklib/training/input_fn.py","file_name":"input_fn.py","file_ext":"py","file_size_in_byte":7291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"9919269","text":"#!/usr/bin/env python3\n#\n\"\"\"\n Demonstrate sending asset tokens on the Fusion blockchain to timelock using the raw transaction method. You can use this method\n when you wish to sign the transaction offline and broadcast it later, or if you do not have an unlocked wallet (i.e. you are not using the IPC mode).\n\"\"\"\n#\n#\nimport os\nimport sys\nfrom datetime import datetime\n#import pdb ; pdb.set_trace()\n\n\n\n#web3fusion\nfrom web3fsnpy import Fsn\n\n# Remember to set your environment variable to run this test\n# e.g. export FSN_PRIVATE_KEY=123456789123456789ABCDEF \n\n\n\nlinkToChain = {\n 'network' : 'testnet', # One of 'testnet', or 'mainnet'\n 'provider' : 'WebSocket', # One of 'WebSocket', 'HTTP', or 'IPC'\n 'gateway' : 'default', # Either set to 'default', or specify your uri endpoint\n 'private_key' : os.environ[\"FSN_PRIVATE_KEY\"], # Do not include (comment out) for just read, or signed raw transactions\n}\n\n#\n\nweb3fsn = Fsn(linkToChain)\n\n\npub_key_sender = \"0x1111111111111111111111111111111111111111\"\npub_key_receiver = \"0x2222222222222222222222222222222222222222\"\n\n#asset_Id = '0x5fd3f254ae34bf9bf9dc46f72e4fbbc75844dbe6823f970fa3f7aaedb2925ff6'\nasset_Id = web3fsn.getAssetId('FSN')\n#asset_Id = \"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\nnumber_to_transfer = 2 # The number of tokens you wish to send\n\n# Find out some information about this asset\nasset_dict = web3fsn.getAsset(asset_Id,'latest')\n#print(asset_dict)\nasset_name = asset_dict['Symbol']\nprint('The asset has the symbol ',asset_name,' and decimals ',asset_dict['Decimals'])\n\n\nnToSend = int(number_to_transfer*10**float(asset_dict['Decimals']))\n\nnonce = web3fsn.getTransactionCount(pub_key_sender) # Get the nonce for the wallet\n\n# Construct the transaction\n#\n# Example of valid dates for 'start' and 'end'. Can also use 'now' and 'infinity'.\n#\"2007-03-01T13:00:00+0100\" or UTC = \"2007-03-01T12:00:00\"\n\ntransaction = {\n 'from': pub_key_sender,\n 'to': pub_key_receiver,\n 'nonce': nonce,\n 'asset': asset_Id,\n 'value': nToSend,\n #'start': 'now',\n #'end': '2020-06-01T06:00:59',\n 'start': '2020-12-20T06:01:01',\n 'end': 'infinity',\n}\n\nTxHash = web3fsn.sendRawTimeLock(transaction)\n\n#\nprint('Transaction hash = ',TxHash)\n#\n# We can optionally wait for the transaction to occur and block execution until it has done so, or times out after timeout seconds\nprint('Waiting for transaction to go through...')\nweb3fsn.waitForTransactionReceipt(TxHash, timeout=20)\n#\n#\nres = web3fsn.getTransaction(TxHash)\n#\n#print(res)\n#\n# Show the timelocks for the pub_key_receiver\n#\nasset_timelocks = web3fsn.getTimeLockBalance(asset_Id, pub_key_receiver, 'latest')\n#\nn_items = len(asset_timelocks.Items)\nprint('\\nNumber of timelocked ', asset_name, ' = ',n_items,'\\n')\n#\n#\nfor i in range(n_items):\n print('Asset ',i,'\\n')\n tm = asset_timelocks.Items[i].StartTime\n print('Start Time : ',datetime.fromtimestamp(tm).strftime('%c'))\n tm = asset_timelocks.Items[i].EndTime\n if tm >= web3fsn.BN():\n endtime = 'Infinity'\n else:\n endtime = datetime.fromtimestamp(tm).strftime('%c')\n print('End Time : ',endtime)\n val = int(asset_timelocks.Items[i].Value)\n print(val/(10**asset_dict['Decimals']),' ',asset_name,'\\n')\n#\n\n","sub_path":"fusion_tests/fsnSendToRawTimeLock.py","file_name":"fsnSendToRawTimeLock.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544366361","text":"'''B. Books\ntime limit per test2 seconds\nmemory limit per test256 megabytes\ninput standard input\noutput standard output\nWhen Valera has got some free time, he goes to the library to read some books. Today he's got t free minutes to read. That's why Valera took n books in the library and for each book he estimated the time he is going to need to read it. Let's number the books by integers from 1 to n. Valera needs a i minutes to read the i-th book.\n\nValera decided to choose an arbitrary book with number i and read the books one by one, starting from this book. In other words, he will first read book number i, then book number i + 1, then book number i + 2 and so on. He continues the process until he either runs out of the free time or finishes reading the n-th book. Valera reads each book up to the end, that is, he doesn't start reading the book if he doesn't have enough free time to finish reading it.\n\nPrint the maximum number of books Valera can read.\n\nInput\nThe first line contains two integers n and t (1 ≤ n ≤ 105; 1 ≤ t ≤ 109) — the number of books and the number of free minutes Valera's got. The second line contains a sequence of n integers a 1, a 2, ..., a n (1 ≤ a i ≤ 104), where number a i shows the number of minutes that the boy needs to read the i-th book.\n\nOutput\nPrint a single integer — the maximum number of books Valera can read.'''\n\na = list(int(x) for x in input().split())\nb = list(int(x) for x in input().split())\nb.sort()\nno_of_books = a[0]\nbooks_read = 0\nwhile(len(b)!=no_of_books):\n b=list (int (x) for x in input ().split ())\n continue\ntime = a[1]\nfor i in range(no_of_books):\n if time>=b[i]:\n time -= b[i]\n books_read += 1\n else:\n break\nprint(books_read)","sub_path":"dealingProbs/python/codeforces/279b.py","file_name":"279b.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"73139785","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n#python 2.7.10\n\n'''数据库操作模块'''\n\n__author__ = 'AJ Kipper'\n\n\nimport MySQLdb as mdb\n\nclass Engine(object):\n\tdef __init__(self):\n\t\tpass\n\n\tdef connect(self):\n\t\t#连接数据库,参数分别是数据库地址,用户名,密码,数据库名字\n\t\tcon = mdb.connect(\"localhost\",\"root\",\"ha\",\"crawler\")\n\t\tcur = con.cursor()\n\t\tself.con = con\n\t\treturn cur\n\n\tdef delete(self,statement):\n\t\tcur = self.connect()\n\t\tcur.execute(statement)\n\t\tself.con.commit()\n\n\tdef insert(self,statement):\n\t\tcur = self.connect()\n\t\tcur.execute(statement)\n\t\tself.con.commit()\n\n\tdef select(self,statement):\n\t\tcur = self.connect()\n\t\tcur.execute(statement)\n\t\tvalues = cur.fetchall()\n\t\treturn values\n\n\nif __name__ == '__main__':\n\tEngine = Engine()\n\tEngine.connect()\n","sub_path":"renren-crawler/mdb.py","file_name":"mdb.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"579967163","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# @Time : 2019-11-13 22:01\n# @Author : 冯佳欣\n# @File : FastText.py\n# @Desc : fastText 模型文件\n\nfrom .BasicModule import BasicModule\nimport torch\nimport numpy as np\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass FastText(BasicModule):\n def __init__(self,hp_dict,vectors=None):\n '''\n :param hp_dict:\n :param vectors: 默认是numpy array\n '''\n super(FastText,self).__init__()\n self.vocab_size = hp_dict['vocab_size']\n self.embedding_dim = hp_dict['embedding_dim']\n self.hidden_size = hp_dict['hidden_size']\n self.label_size = hp_dict['label_size']\n\n self.embedding = nn.Embedding(self.vocab_size,self.embedding_dim)\n if vectors is not None:\n self.embedding.weight.data.copy_(torch.from_numpy(vectors))\n\n self.pre = nn.Sequential(\n nn.Linear(self.embedding_dim,self.embedding_dim * 2),\n nn.BatchNorm1d(self.embedding_dim * 2),\n # 修改输入数据\n nn.ReLU(True)\n )\n\n self.fc = nn.Sequential(\n nn.Linear(self.embedding_dim * 2,self.hidden_size),\n nn.BatchNorm1d(self.hidden_size),\n nn.ReLU(True),\n nn.Linear(self.hidden_size,self.label_size)\n\n )\n\n def forward(self,x):\n '''\n 前向传播\n :param x:[batch_size,seq_len] 其中的值都是word_index\n :return: [batch_size,label_size]\n '''\n\n # [batch_size,seq_len,emb_dim]\n embed = self.embedding(x)\n embed_size = embed.size()\n # 在送入到pre 之前,需要将embed reshape [batch_size * seq_len ,emb_dim]\n embed = embed.contiguous().view(-1,self.embedding_dim)\n # [batch_size,emb_dim * 2]\n out = self.pre(embed)\n # reshape [batch_size,seq_len,emb_dim * 2]\n out = out.view(embed_size[0],embed_size[1],-1)\n\n # [batch_size,seq_len,2 * emb_dim]\n print('out size:' + str(out.size()))\n # [batch_size,2 * emb_dim]\n mean_pre_embed = torch.mean(out,dim=1)\n print('mean_pre_embed size:' + str(mean_pre_embed.size()))\n logit = self.fc(mean_pre_embed)\n print(logit.size())\n return logit\n\n","sub_path":"code/models/.ipynb_checkpoints/fastText-checkpoint.py","file_name":"fastText-checkpoint.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606537253","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import MultipleLocator\nimport os\n\n# RMSE\ndef draw_RMSE_and_MAE(data_path):\n # 支持中文\n plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']\n\n # 获得数据集\n RMSE = np.loadtxt(os.path.join(data_path,\"RMSE.csv\"))\n MAE = np.loadtxt(os.path.join(data_path, \"MAE.csv\"))\n\n\n # 对比最优值\n #trraffic: RMSE:0.03853-0.04245 MAE:0.02664-0.02974\n #electricity: RMSE:73.42-83.34 MAE:56.91- 64.66\n #power: RMSE:0.4963- 0.9669 MAE:0.2732 - 52.61\n RMSE_best_line, = plt.plot(np.array([0,MAE.shape[0]-1]), np.array([83.34,83.34]), 'r--')\n MAE_best_line, = plt.plot(np.array([0,MAE.shape[0]-1]), np.array([64.66,64.66]), 'b--')\n\n\n # 创建x\n x = np.arange(1, RMSE.shape[0] + 1, 1)\n\n RMSE_line, = plt.plot(x, RMSE, 'r-')\n MAE_line, = plt.plot(x, MAE, 'b-')\n\n plt.legend(handles=[RMSE_line,MAE_line,RMSE_best_line,MAE_best_line],\n labels=[\"RMSE\",\"MAE\",\"best_RMSE\",\"best_MAE\"],\n loc=\"upper right\",\n fontsize=10)\n\n # 填充最大值最小值\n plt.xlabel(\"epochs\")\n y_major_locator = MultipleLocator(20) # 设置y轴间隔\n ax = plt.gca()\n ax.yaxis.set_major_locator(y_major_locator)\n plt.ylim(0, 200)\n plt.ylabel(\"RMSE\")\n plt.grid(linestyle='--', color='gray', )\n plt.show()\n\n\n# 主函数\ndef main():\n draw_RMSE_and_MAE(\"F:/20210107-实验记录\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"showResult.py","file_name":"showResult.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"231214572","text":"import pandas as pd\nimport numpy as np\n\nfrom Utils.utils_classes import Trial, All_trials\n\n\ndef restructure_trial_data(trial, start_frame, stop_frame, stim_type, idx, vid_num):\n new_trial = Trial()\n\n new_trial.std_tracking['x'] = trial.x[0][start_frame:stop_frame-1]\n new_trial.std_tracking['y'] = trial.y[0][start_frame:stop_frame-1]\n new_trial.std_tracking['orientation'] = trial.orientation[0][start_frame:stop_frame-1]\n new_trial.std_tracking['direction'] = trial.direction[0][start_frame:stop_frame-1]\n new_trial.std_tracking['velocity'] = trial.velocity[0][start_frame:stop_frame-1]\n\n new_trial.name = '{}_{}_{}'.format(stim_type, vid_num, idx)\n\n return new_trial\n\n\ndef collate_cohort_trials(db, processed_session):\n # If we don't have an All_trials column in the database, create it\n if not 'All_trials' in db.keys():\n sLen = len(db[db.keys()[0]])\n db = db.assign(All_trials=pd.Series(np.random.randn(sLen)).values)\n\n # Add all trials to\n trials_d = db['All_trials']['processed']\n\n if isinstance(trials_d, float):\n # Initialise dictionary\n trials_d = All_trials()\n\n for stim_type, trials_l in trials_d.__dict__.items():\n augmented_trials = []\n\n if isinstance(processed_session.tracking_data[stim_type], list): # Trials tracked by traditional tracking\n for trial in processed_session.tracking_data[stim_type]:\n trial.rois = processed_session.video_data['User ROIs']\n augmented_trials.append(trial)\n else:\n for trial_name, trial in processed_session.tracking_data[stim_type].items():\n tr = {\n 'dlc data':trial,\n 'rois':processed_session.video_data['User ROIs']\n }\n augmented_trials.append(tr)\n\n [trials_l.append(x) for x in augmented_trials] # store all the trials\n\n db['All_trials'] = db['All_trials'].replace(['processed'], trials_d, inplace=True) # Not sure why but this line is necessary\n db['All_trials']['processed'] = trials_d\n\n return","sub_path":"Analysis_V2/Utils/Data_rearrange_funcs.py","file_name":"Data_rearrange_funcs.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544599579","text":"#!/usr/bin/env python\n\n# Core\nimport pprint\n\n# Helper\nfrom geobounds import *\nfrom traffic import *\n\n# Update the traffic lights\ndef update():\n\t# Geolocation\n\tlocation = locate( '/var/www/location.txt' )\n\n\t# Display for posterity\n\t# print location['city'] + ', ' + location['region'] + ' (' + str( location['latitude'] ) + ', ' + str( location['longitude'] ) + ')'\n\n\t# Check traffic conditions\n\tconditions = traffic( 24, location['latitude'], location['longitude'] )\n\n\t# Display for posterity\n\t# pprint.pprint( conditions )\n\n\t# Turn on the LED lights\n\tlights( conditions )\n\n# Continuously check once every sixty seconds\nwhile True:\n\tupdate()\n\ttime.sleep( 60 )\n","sub_path":"Indicatinator/python/traffic.d.py","file_name":"traffic.d.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"454603844","text":"# 개미\n# 구현 아이디어 : 먼저 a1을 역으로 배치하고 a3에 a1 + a2 후 a3에서 원소를 하나씩 확인하면서\n# a1에 j, j + 1번째 원소가 하나는 포함되있고 하나는 포함되지 않았을 때 바꾸고 방문 표시\n# 방문 표시를 하는 이유는 1번 시행 시 한 번 바뀐 원소들은 또 한 번 바뀌면 안 되기 때문\n\nimport sys\ninput = sys.stdin.readline\nn1, n2 = map(int, input().split())\na1 = list(map(str, input().strip()))[:: -1]\na2 = list(map(str, input().strip()))\nn = int(input())\na3 = a1 + a2\n\ndef solution():\n for i in range(1, n + 1):\n visited = [1] * len(a3) # 스위칭한 원소들 체크용\n for j in range(len(a3) - 1):\n if visited[j]:\n if a3[j] in a1 and a3[j + 1] not in a1:\n # tmp = a3[j]\n # a3[j] = a3[j + 1]\n # a3[j + 1] = tmp\n a3[j], a3[j + 1] = a3[j + 1], a3[j] # 속도는 이게 더 느리다\n visited[j] = 0\n visited[j + 1] = 0\n print(\"\".join(list(map(str, a3))))\nsolution()","sub_path":"November,2020~July,2021/2020-12-25/3048_이승민_개미.py","file_name":"3048_이승민_개미.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413434112","text":"# Q3\nimport sympy\n\"\"\"\n問題文を読み替えると、2の倍数をめくる、3の倍数をめくる、4の倍数をめくる…となる。\n約数の数が奇数なら裏のまま。1回もめくらない1は当然裏のまま。\n\"\"\"\n\nNUM = 100\nfor i in range(0,NUM):\n if sympy.divisor_count(i+1)%2 !=0:\n print(i+1)","sub_path":"q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"611745990","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\nProject_name:yzmsb\nFile_name:xinjiang_predict \nCreate on 2016/11/3 上午10:29\n@Author: dfsj\n\"\"\"\n\n\"\"\" 新疆自治区验证码识别 \"\"\"\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nimport cv2\nimport numpy as np\nimport logging.config\n\nlogging.config.fileConfig(\"logging.conf\")\nloggerInfo = logging.getLogger(\"infoLogger\")\n\n\"\"\" 数据处理部分 \"\"\"\n\nnp.set_printoptions(threshold='nan', linewidth=10000)\n\n\ndef yzmsb_preprocess(filename):\n loggerInfo.info('图片名称是:' + filename)\n try:\n im = cv2.imread(filename) # 30, 80\n # \"\"\" 切图 \"\"\"\n im1 = im[:, 2:14, ]\n im2 = im[:, 17:34, ]\n im3 = im[:, 35:44, ]\n array1 = np.zeros((30, 17))\n array2 = np.zeros((30, 17))\n array3 = np.zeros((30, 17))\n array21 = np.zeros((15, 10))\n array22 = np.zeros((15, 10))\n array23 = np.zeros((15, 10))\n\n for ii in xrange(30):\n for j1 in xrange(12):\n if im1[ii, j1, 0] < 150 or im1[ii, j1, 1] < 150 or im1[ii, j1, 2] < 150:\n array1[ii, j1] = 1\n for j2 in xrange(17):\n if im2[ii, j2, 0] < 150 or im2[ii, j2, 1] < 150 or im2[ii, j2, 2] < 150:\n array2[ii, j2] = 1\n for j3 in xrange(9):\n if im3[ii, j3, 0] < 150 or im3[ii, j3, 1] < 150 or im3[ii, j3, 2] < 150:\n array3[ii, j3] = 1\n for i in range(30):\n if map(sum, array1)[i] != 0: # 按行求和\n array11 = array1[i:, :]\n break\n for i in range(30):\n if map(sum, array2)[i] != 0:\n array12 = array2[i:, :]\n break\n for i in range(30):\n if map(sum, array3)[i] != 0:\n array13 = array3[i:, :]\n break\n for j in range(17):\n if map(sum, zip(*array11))[j] != 0: # 按列求和\n array21 = array11[:15, j:j + 10]\n break\n for j in range(17):\n if map(sum, zip(*array12))[j] != 0:\n array22 = array12[:15, j:j + 10]\n break\n for j in range(17):\n if map(sum, zip(*array13))[j] != 0:\n array23 = array13[:15, j:j + 10]\n break\n\n array31 = array21.reshape(1, -1)[0]\n array32 = array22.reshape(1, -1)[0]\n array33 = array23.reshape(1, -1)[0]\n result_array = [array31, array32, array33]\n return result_array\n\n except Exception as error:\n loggerInfo.error(filename + str(error))\n\n\ndef yzmsb_model(item, rfclf):\n result = []\n for i in range(len(item)):\n ret = int(rfclf.predict(np.array(item[i]).reshape(1, -1)))\n result.append(ret)\n return result\n\n\ndef yzmsb(filename, rfclf):\n try:\n a = yzmsb_preprocess(filename)\n jieguo = yzmsb_model(a, rfclf)\n if jieguo[1] == -1:\n jisuanjieguo = jieguo[0] + jieguo[2]\n loggerInfo.info('图片 %s 的验证结果是: %r ' % (filename, jisuanjieguo))\n else:\n jisuanjieguo = jieguo[0] * jieguo[2]\n loggerInfo.info('图片 %s 的验证结果是: %r ' % (filename, jisuanjieguo))\n except Exception as error:\n loggerInfo.error('图片 %s 识别错误,原因是: %r ' % (filename, str(error)))\n jisuanjieguo = '0'\n finally:\n return str(jisuanjieguo)\n\n\n","sub_path":"yzmsb/xinjiang.py","file_name":"xinjiang.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353053029","text":"from .network import Network\nfrom .dense import Dense\nfrom .softmax import Softmax\nfrom .costs import MSE\n\ndef build_feedforward_network(\n hidden_sizes=[100],\n hidden_activation=None,\n output_size=10,\n output_activation=Softmax,\n cost=MSE):\n net = Network()\n for i, hidden_size in enumerate(hidden_sizes):\n net.add(Dense(hidden_size))\n if hidden_activation:\n net.add(hidden_activation())\n net.add(Dense(output_size))\n net.add(output_activation())\n net.add(cost())\n return net","sub_path":"loony/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"296712651","text":"from transformers.modeling_roberta import RobertaEmbeddings, RobertaModel\nfrom transformers.modeling_bert import (\n BertModel, \n BertEncoder, \n BertPooler, \n BertLayer, \n BERT_INPUTS_DOCSTRING, \n add_start_docstrings_to_callable,\n BertAttention,\n BertIntermediate,\n BertOutput)\nfrom transformers.modeling_camembert import CamembertModel\nfrom torch import nn\nimport torch\n\n\nclass CustomBertModel(BertModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well\n as a decoder, in which case a layer of cross-attention is added between\n the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,\n Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the\n :obj:`is_decoder` argument of the configuration set to :obj:`True`; an\n :obj:`encoder_hidden_states` is expected as an input to the forward pass.\n\n .. _`Attention is all you need`:\n https://arxiv.org/abs/1706.03762\n\n \"\"\"\n\n def __init__(self, config, custom_embeds_layer_index=None, output_attentions=None):\n super().__init__(config)\n self.config = config\n\n # self.embeddings = RobertaEmbeddings(config)\n if output_attentions is not None:\n config.output_attentions = output_attentions\n self.encoder = CustomBertEncoder(config, custom_embeds_layer_index=custom_embeds_layer_index)\n self.pooler = BertPooler(config)\n self.custom_embeds_layer_index = custom_embeds_layer_index\n\n self.init_weights()\n \n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n additional_states=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n custom_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n ):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during pre-training.\n\n This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import BertModel, BertTokenizer\n import torch\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertModel.from_pretrained('bert-base-uncased')\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n \n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n # Provided a padding mask of dimensions [batch_size, seq_length]\n # - if the model is a decoder, apply a causal mask in addition to the padding mask\n # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder:\n batch_size, seq_length = input_shape\n seq_ids = torch.arange(seq_length, device=device)\n causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]\n causal_mask = causal_mask.to(\n torch.long\n ) # not converting to long will cause errors with pytorch version < 1.3\n extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]\n else:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(\n \"Wrong shape for input_ids (shape {}) or attention_mask (shape {})\".format(\n input_shape, attention_mask.shape\n )\n )\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n # If a 2D ou 3D attention mask is provided for the cross-attention\n # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n\n if encoder_attention_mask.dim() == 3:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]\n elif encoder_attention_mask.dim() == 2:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]\n else:\n raise ValueError(\n \"Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})\".format(\n encoder_hidden_shape, encoder_attention_mask.shape\n )\n )\n\n encoder_extended_attention_mask = encoder_extended_attention_mask.to(\n dtype=next(self.parameters()).dtype\n ) # fp16 compatibility\n encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = (\n head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n ) # We can specify head_mask for each layer\n head_mask = head_mask.to(\n dtype=next(self.parameters()).dtype\n ) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n with memoryview(b'') if any(p.requires_grad for p in self.embeddings.parameters()) else torch.no_grad():\n embedding_output = self.embeddings(\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n custom_embeds=custom_embeds if self.custom_embeds_layer_index >= 0 else None,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n \n outputs = (sequence_output, pooled_output,) + encoder_outputs[\n 1:\n ] # add hidden_states and attentions if they are here\n return outputs # sequence_output, pooled_output, (hidden_states), (attentions)\n\nclass CustomBertLayer(BertLayer):\n def __init__(self, config):\n super().__init__(config)\n self.attention = BertAttention(config)\n self.is_decoder = config.is_decoder\n if self.is_decoder:\n self.crossattention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n custom_embeds=None,\n ):\n self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask)\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n if self.is_decoder and encoder_hidden_states is not None:\n cross_attention_outputs = self.crossattention(\n attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights\n\n intermediate_output = self.intermediate(attention_output)\n if custom_embeds is not None:\n attention_output = attention_output + custom_embeds\n layer_output = self.output(intermediate_output, attention_output)\n outputs = (layer_output,) + outputs\n return outputs\n \nclass CustomBertEncoder(BertEncoder):\n def __init__(self, config, custom_embeds_layer_index=None, output_attentions=None):\n super().__init__(config)\n self.output_attentions = output_attentions if output_attentions is not None else config.output_attentions\n \n self.output_hidden_states = config.output_hidden_states\n self.layer = nn.ModuleList([CustomBertLayer(config) for _ in range(config.num_hidden_layers)])\n self.custom_embeds_layer_index = custom_embeds_layer_index\n print(\"before layer norm\")\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n custom_embeds=None,\n ):\n all_hidden_states = ()\n all_attentions = ()\n for i, layer_module in enumerate(self.layer):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n \n with memoryview(b'') if any(p.requires_grad for p in layer_module.parameters()) else torch.no_grad():\n layer_outputs = layer_module(\n hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask,\n custom_embeds=custom_embeds if i == self.custom_embeds_layer_index and custom_embeds is not None else None\n )\n hidden_states = layer_outputs[0]\n \n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (all hidden states), (all attentions)\n\n\nclass CustomRobertaModel(CustomBertModel, RobertaModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well\n as a decoder, in which case a layer of cross-attention is added between\n the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,\n Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the\n :obj:`is_decoder` argument of the configuration set to :obj:`True`; an\n :obj:`encoder_hidden_states` is expected as an input to the forward pass.\n\n .. _`Attention is all you need`:\n https://arxiv.org/abs/1706.03762\n\n \"\"\"\n\n def __init__(self, config, custom_embeds_layer_index=None):\n super().__init__(config, custom_embeds_layer_index=custom_embeds_layer_index)\n \n self.embeddings = RobertaEmbeddings(config)\n\n self.init_weights()\n \n\nclass CustomCamembertModel(CustomRobertaModel, CamembertModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well\n as a decoder, in which case a layer of cross-attention is added between\n the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,\n Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the\n :obj:`is_decoder` argument of the configuration set to :obj:`True`; an\n :obj:`encoder_hidden_states` is expected as an input to the forward pass.\n\n .. _`Attention is all you need`:\n https://arxiv.org/abs/1706.03762\n\n \"\"\"\n pass","sub_path":"custom_bert.py","file_name":"custom_bert.py","file_ext":"py","file_size_in_byte":16127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"70923723","text":"import tkinter.ttk as ttk\nfrom tkinter import *\nfrom tkinter.ttk import Progressbar\nimport tkinter\nfrom threading import Thread\n\nfrom aprox.reference import mag_aprox, pha_aprox\n\nimport config\nfrom data import *\nfrom utils import random_color\n\n\nclass InfoMenu(ttk.Frame):\n def __init__(self, container):\n super(InfoMenu, self).__init__(container)\n self.total = 0\n\n self.texts = dict()\n\n self.addText(\"Valor de N minimo: \")\n\n def addText(self, title):\n Label(self, text=title, font=data.myFont2, height=2, width=25).grid(column=0, row=self.total)\n self.texts[title] = Label(self, text=\"No disponible\", font=data.myFont2, height=2, width=25)\n self.texts[title].grid(column=1, row=self.total)\n\n self.total += 1\n\n def updateValue(self, textTitle, value):\n self.texts[textTitle]['text'] = value\n\n\nclass OptionsMenu(ttk.Frame):\n def __init__(self, container, mode = \"mag\"):\n super(OptionsMenu, self).__init__(container)\n\n self.bars = dict()\n self.vars = dict()\n\n self.total = 0\n\n self.addTickBox(\"Filtrar incorrectos (Q)\", 0)\n #self.addTickBox(\"Filtrar incorrectos (N)\", 1)\n self.total += 1\n\n self.addBar(\"Q Máximo\", 0.5, 40, 0.1)\n if mode == \"mag\":\n self.addBar(\"Denormalización\", 0, 100)\n self.addBar(\"N mínimo\", 1, 20)\n self.addBar(\"N máximo\", 1, 20)\n\n self.bars[\"Q Máximo\"][\"slide\"].set(5)\n\n def addBar(self, title, min_value, max_value, res=1):\n barTitle = Label(self, text=title, font=data.myFont2, width=25, height=2).grid(column=0, row=self.total)\n barSlide = Scale(self, from_=min_value, to=max_value, orient=HORIZONTAL\n , background=\"dodger blue\",\n troughcolor=\"blue\",\n width=20,\n resolution=res,\n font=data.myFont2, length=300)\n barSlide.grid(column=1, row=self.total)\n self.total += 1\n\n self.bars[title] = {\"title\": barTitle, \"slide\": barSlide}\n\n def addTickBox(self, title, col):\n self.vars[title] = IntVar()\n self.bars[title] = Checkbutton(self, text=title, variable=self.vars[title], width=20, font=data.myFont2)\n self.bars[title].grid(column=col, row=self.total)\n\n\nclass AgregarAproximacionMenu(ttk.Frame):\n optionMenu = None\n\n def __init__(self, tabControl, session_data, tableReference):\n super(AgregarAproximacionMenu, self).__init__(tabControl)\n self.session_data = session_data\n self.tableReference = tableReference\n\n self.leftFrame = ttk.Frame(self)\n self.rightFrame = ttk.Frame(self)\n\n self.grid_columnconfigure(0, weight=1, uniform=\"group1\")\n self.grid_columnconfigure(1, weight=1, uniform=\"group1\")\n\n self.var = StringVar()\n self.var.set(\"None\")\n\n self.downFrame = ttk.Frame(self, height=40)\n\n self.downFrame.pack(side=BOTTOM, fill=X, expand=False)\n\n self.leftFrame.pack(side=LEFT, fill=X)\n self.rightFrame.pack(side=LEFT, fill=BOTH, expand=1)\n\n self.bind(\"\", self.onVisibility)\n self.cont = dict()\n\n self.last = None\n\n def addButtonCommit(self):\n self.cont[\"commitButton\"] = Button(self.downFrame, height=1, width=10, text=\"Agregar aproximación\",\n command=lambda: self.retrieve_input(), font=data.myFont,\n background=\"dark sea green\")\n # command=lambda: retrieve_input() >>> just means do this when i press the button\n self.cont[\"commitButton\"].pack(side=TOP, fill=BOTH)\n\n def destroyButtonCommit(self):\n self.cont[\"commitButton\"].destroy()\n\n def addLoadingBar(self):\n self.cont[\"progress\"] = Progressbar(self.downFrame, orient=HORIZONTAL,\n length=100, mode='determinate',\n style=\"red.Horizontal.TProgressbar\")\n self.cont[\"progress\"].pack(side=TOP, fill=BOTH, expand=1)\n\n def destroyLoadingBar(self):\n self.cont[\"progress\"].destroy()\n\n def addMagButtons(self):\n for aprox in mag_aprox.keys():\n self.cont[aprox] = Radiobutton(self.leftFrame,\n text=aprox,\n indicatoron=0,\n width=20,\n font=data.myFont3,\n variable=self.var,\n command=self.showChoice,\n background=\"cyan2\",\n selectcolor=\"cyan4\",\n value=aprox)\n self.cont[aprox].pack(fill=BOTH, expand=1)\n\n\n def addPhaseButtons(self):\n for aprox in pha_aprox.keys():\n self.cont[aprox] = Radiobutton(self.leftFrame,\n text=aprox,\n indicatoron=0,\n width=20,\n font=data.myFont3,\n variable=self.var,\n command=self.showChoice,\n background=\"cyan2\",\n selectcolor=\"cyan4\",\n value=aprox)\n self.cont[aprox].pack(fill=BOTH, expand=1)\n\n def retrieve_input(self):\n if config.debug:\n print(\"Agregando aproximacion\")\n if self.var.get() == \"None\":\n self.session_data.topBar.setErrorText(\"Ninguna aproximación seleccionada\")\n return 0\n\n self.session_data.topBar.updateText(\"Agregando aproximacion ...\")\n\n plotData = dict()\n\n plotData[\"Q\"] = self.cont[\"optionMenu\"].bars[\"Q Máximo\"][\"slide\"].get()\n plotData[\"maxN\"] = self.cont[\"optionMenu\"].bars[\"N máximo\"][\"slide\"].get()\n plotData[\"minN\"] = self.cont[\"optionMenu\"].bars[\"N mínimo\"][\"slide\"].get()\n\n if plotData[\"minN\"] > plotData[\"maxN\"]:\n self.session_data.topBar.setErrorText(\"Entrada invalida\")\n return 0\n\n if self.last == \"magnitud\":\n plotData[\"D\"] = self.cont[\"optionMenu\"].bars[\"Denormalización\"][\"slide\"].get()\n plotData[\"norm\"] = self.cont[\"optionMenu\"].bars[\"Denormalización\"][\"slide\"].get()\n\n\n plotData[\"aprox\"] = self.var\n plotData[\"filtrarQ\"] = self.cont[\"optionMenu\"].vars[\"Filtrar incorrectos (Q)\"].get()\n #plotData[\"filtrarN\"] = self.cont[\"optionMenu\"].vars[\"Filtrar incorrectos (N)\"].get()\n\n if config.debug:\n print(plotData)\n\n self.destroyButtonCommit()\n self.addLoadingBar()\n thread = Thread(target=self.computarAproximacion, args= (plotData, ))\n thread.start()\n\n def onVisibility(self, event):\n # Actualización cuando el tab es abierto\n\n if not self.session_data.plantilla:\n if self.last != \"none\":\n for k in self.cont.keys():\n self.cont[k].destroy()\n\n self.cont[\"labelNothing\"] = Label(self.rightFrame,text=\"No fue seleccionada ninguna plantilla\", font=data.myFont2)\n self.cont[\"labelNothing\"].pack(side=LEFT, expand=1, fill=X)\n self.last = \"none\"\n\n return 0\n\n if self.last == self.session_data.plantilla.type:\n return 0\n\n for k in self.cont.keys():\n self.cont[k].destroy()\n\n if self.session_data.plantilla.type == \"magnitud\":\n\n self.cont[\"plantillaTitle\"] = Label(self.rightFrame, text=\"Necesidades de plantilla\", font=data.myFont)\n self.cont[\"plantillaTitle\"].pack(side=TOP, fill=X, expand=1)\n self.cont[\"infoMenu\"] = InfoMenu(self.rightFrame)\n self.cont[\"infoMenu\"].pack(side=TOP, fill=X, expand=1)\n self.cont[\"configTitle\"] = Label(self.rightFrame, text=\"Configuraciones\", font=data.myFont)\n self.cont[\"configTitle\"].pack(side=TOP, fill=X, expand=1)\n\n self.cont[\"optionMenu\"] = OptionsMenu(self.rightFrame)\n self.cont[\"optionMenu\"].pack(side=TOP, fill=X, expand=1)\n\n self.addMagButtons()\n self.addButtonCommit()\n self.last = \"magnitud\"\n\n elif self.session_data.plantilla.type == \"fase\":\n self.cont[\"plantillaTitle\"] = Label(self.rightFrame, text=\"Necesidades de plantilla\", font=data.myFont)\n self.cont[\"plantillaTitle\"].pack(side=TOP, fill=X, expand=1)\n\n self.cont[\"infoMenu\"] = InfoMenu(self.rightFrame)\n self.cont[\"infoMenu\"].pack(side=TOP, fill=X, expand=1)\n\n self.cont[\"optionMenu\"] = OptionsMenu(self.rightFrame, \"pha\")\n self.cont[\"optionMenu\"].pack(side=TOP, fill=X, expand=1)\n\n self.addPhaseButtons()\n self.addButtonCommit()\n\n self.last = \"fase\"\n\n def computarAproximacion(self, plotData):\n if config.debug:\n print(\"empezamos el thread\")\n plotData[\"aprox\"] = self.var.get()\n\n minN = plotData[\"minN\"]\n maxN = plotData[\"maxN\"]\n maxQ = plotData[\"Q\"]\n filtrarQ = plotData[\"filtrarQ\"]\n #filtrarN = plotData[\"filtrarN\"]\n\n for i in range(minN, maxN+1):\n actual = (i-minN) / (maxN + 1 - minN) * 100.0\n\n self.updateStatusFunc(actual)\n\n plotData[\"minN\"] = i\n plotData[\"maxN\"] = i\n plotData[\"color\"] = random_color(self.session_data.parent)\n\n number, qData = self.session_data.addPlot(plotData.copy())\n\n if qData != -1 and filtrarQ and qData > maxQ:\n self.session_data.eraseAproximation(number)\n continue\n\n\n\n\n plotData[\"number\"] = number\n\n # self.tableReference.addItem(number, self.var.get(), i, qData,\n # plotData[\"color\"])\n\n n_values = str(minN) + \"-\" + str(maxN)\n\n self.session_data.topBar.setSucessText(\"Aproxmacion agregada: \"+self.var.get()+\" n=\" + n_values )\n if config.debug:\n print(\"Terminamos el thread\")\n\n self.destroyLoadingBar()\n self.addButtonCommit()\n\n def updateStatusFunc(self, value):\n self.cont[\"progress\"][\"value\"] = value\n\n def showChoice(self):\n if self.session_data.plantilla.type == \"magnitud\":\n instance = mag_aprox[self.var.get()](self.session_data.plantilla)\n\n min_n = instance.getMinNValue()\n\n if min_n == -1:\n min_n = \"No disponible\"\n else:\n min_n = str(min_n)\n self.cont[\"infoMenu\"].updateValue(\"Valor de N minimo: \", min_n)\n\n else:\n instance = pha_aprox[self.var.get()](self.session_data.plantilla)\n #instance.computarGamma(self.session_data.plantilla.t0)\n min_n = instance.getMinNValue()\n if min_n == -2:\n min_n = \"No disponible\"\n elif min_n == -1:\n min_n = \"No hay\"\n else:\n min_n = str(min_n)\n\n self.cont[\"infoMenu\"].updateValue(\"Valor de N minimo: \", min_n)\n\n\n","sub_path":"TP4/proyecto_oficial/menus/config_aproximaciones/agregar_aproximacion.py","file_name":"agregar_aproximacion.py","file_ext":"py","file_size_in_byte":11056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317128172","text":"import tensorflow as tf\r\nimport numpy as np\r\nfrom collections import deque\r\nimport random\r\nclass DDPGAgent:\r\n def __init__(self, state_dim, action_dim,sess, action_bound=[], name='brain'):\r\n self.sess = sess\r\n self.state_dim = state_dim\r\n self.action_dim = action_dim\r\n self.action_holder = tf.placeholder(tf.float32, [None, 1], name='action')\r\n self.reward_holder = tf.placeholder(tf.float32, [None, 1], name='reward')\r\n self.state_holder = tf.placeholder(tf.float32, [None, state_dim], name='state')\r\n self.next_state_holder = tf.placeholder(tf.float32, [None, state_dim], name='next_state')\r\n self.done_holder = tf.placeholder(tf.float32, [None, 1], name='done')\r\n self.action_bound = action_bound\r\n self.name = name\r\n self.memory = deque(maxlen=20000)\r\n self.update_step = 0\r\n self.num_experience = 0\r\n self.gamma = 0.9\r\n self.var = 2.\r\n\r\n def buildCriticNetwork(self, ):\r\n init_w = tf.random_normal_initializer(0., 1.)\r\n init_b = tf.constant_initializer(0.01)\r\n with tf.variable_scope('eva-Critic_network' + self.name):\r\n # enc\r\n q_layer1s = tf.layers.Dense(128, activation=tf.nn.elu,\r\n kernel_initializer=init_w, bias_initializer=init_b, name='q_layer1s',\r\n trainable=True)\r\n q_layer1a = tf.layers.Dense(128, activation=tf.nn.elu,\r\n kernel_initializer=init_w, bias_initializer=init_b, name='q_layer1a',\r\n trainable=True)\r\n self.qs = q_layer1s(self.state_holder)\r\n self.qa = q_layer1a(self.action_holder)\r\n q_estimate = tf.layers.Dense(1, activation=None,\r\n kernel_initializer=tf.random_normal_initializer(0., 0.01), bias_initializer=init_b, name='q_estimate',\r\n trainable=True)\r\n self.q = q_estimate(self.qs+self.qa)\r\n self.q_policy_eva = q_estimate(self.qs+q_layer1a(self.pi))\r\n with tf.variable_scope('tar-Critic_network' + self.name):\r\n # enc\r\n q_layer1s_ = tf.layers.Dense(128, activation=tf.nn.relu,\r\n kernel_initializer=init_w, bias_initializer=init_b, name='q_layer1s_',\r\n trainable=False)\r\n q_layer1a_ = tf.layers.Dense(128, activation=tf.nn.relu,\r\n kernel_initializer=init_w, bias_initializer=init_b, name='q_layer1a_',\r\n trainable=False)\r\n qs_ = q_layer1s_(self.next_state_holder)\r\n qa_ = q_layer1a_(self.pi_)\r\n q_estimate_ = tf.layers.Dense(1, activation=None,\r\n kernel_initializer=tf.random_normal_initializer(0., 0.01), bias_initializer=init_b, name='q_estimate_',\r\n trainable=False)\r\n self.tq = tf.stop_gradient(q_estimate_(qs_+qa_))\r\n\t\t\t\r\n self.q_e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eva-Critic_network' + self.name)\r\n self.q_t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='tar-Critic_network' + self.name)\r\n\t\t\t\r\n def buildActorNetwork(self):\r\n init_w = tf.random_normal_initializer(0., 1.)\r\n init_b = tf.constant_initializer(0.01)\r\n with tf.variable_scope('eva-Actor_network' + self.name):\r\n a_layer1 = tf.layers.Dense(128, activation=tf.nn.elu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.01),\r\n kernel_initializer=init_w, bias_initializer=init_b, name='a_layer1',\r\n trainable=True)\r\n a_layer2 = tf.layers.Dense(128, activation=tf.nn.elu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.01),\r\n kernel_initializer=init_w, bias_initializer=init_b, name='a_layer2',\r\n trainable=True)\r\n\r\n h1 = a_layer1(self.state_holder)\r\n h2 = a_layer2(h1)\r\n action_selector = tf.layers.Dense(self.action_dim, activation=tf.nn.tanh,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.01),\r\n kernel_initializer=tf.random_normal_initializer(0., 0.01), bias_initializer=init_b, name='as',\r\n trainable=True)\r\n\r\n self.pi = action_selector(h2)#tf.clip_by_value(1.5*action_selector(h),-1.5,1.)\r\n \r\n with tf.variable_scope('tar-Actor_network' + self.name):\r\n a_layer1_ = tf.layers.Dense(128, activation=tf.nn.elu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.01),\r\n kernel_initializer=init_w, bias_initializer=init_b, name='a_layer1_',\r\n trainable=False)\r\n a_layer2_ = tf.layers.Dense(128, activation=tf.nn.elu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.01),\r\n kernel_initializer=init_w, bias_initializer=init_b, name='a_layer2_',\r\n trainable=True)\r\n\r\n h1_ = a_layer1_(self.next_state_holder)\r\n h2_ = a_layer2_(h1_)\r\n\r\n action_selector_ = tf.layers.Dense(self.action_dim, activation=tf.nn.tanh,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.01), \r\n kernel_initializer=tf.random_normal_initializer(0., 0.01), bias_initializer=init_b, name='as_',\r\n trainable=False)\r\n self.pi_ = action_selector_(h2_)#tf.clip_by_value(1.5*action_selector_(h_),-1.5,1.)\r\n\r\n self.p_e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eva-Actor_network' + self.name)\r\n self.p_t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='tar-Actor_network' + self.name)\r\n\r\n \r\n\r\n\t\r\n def setLearn(self,LR_C = 0.001, LR_A = 0.00025):\r\n self.q_tar = self.reward_holder + self.gamma*self.tq*(1-self.done_holder)\r\n self.q_loss = tf.reduce_mean(tf.squared_difference(self.q, self.q_tar))\r\n self.reg_loss = tf.losses.get_regularization_loss()\r\n self.p_loss = tf.reduce_mean(self.q_policy_eva) \r\n \r\n self.q_trainOp = tf.train.AdamOptimizer(LR_C).minimize(self.q_loss, var_list=self.q_e_params)\r\n self.p_trainOp = tf.train.AdamOptimizer(LR_A).minimize(-self.p_loss, var_list=self.p_e_params)\r\n \r\n def setUpdate(self, TAU = 0.01, update_interval = 1):\r\n self.updateInterval = update_interval\r\n self.Critic_network_update = [tf.assign(tar, (1 - TAU) * tar + TAU * eva) for tar, eva in zip(self.q_t_params, self.q_e_params)]\r\n self.Actor_network_update = [tf.assign(tar, (1 - TAU) * tar + TAU * eva) for tar, eva in zip(self.p_t_params, self.p_e_params)]\r\n \r\n def choose_action(self, state):\r\n a = np.array(self.sess.run(self.pi, feed_dict={self.state_holder:state})).reshape(-1,)\r\n a = np.random.normal(a[0], self.var)\r\n \r\n return a # returns action\r\n\r\n def remember(self, state, action, reward, next_state,done):\r\n self.num_experience+=1\r\n self.memory.append((state, action, reward, next_state,done))\r\n\r\n def learn(self,batch_size=32):\r\n states, next_states, actions, rewards,dones = [], [], [], [],[]\r\n #minibatch = list(self.memory)[:]\r\n minibatch = random.sample(list(self.memory), batch_size)\r\n for state, action, reward, next_state, done in minibatch:\r\n states.append(state)\r\n next_states.append(next_state)\r\n actions.append(action)\r\n rewards.append(reward)\r\n dones.append(done)\r\n actions = np.asarray(actions).reshape(-1,1)\r\n dones = np.asarray(dones).reshape(-1,1)\r\n rewards = np.asarray(rewards)\r\n states = np.array(states).reshape((-1, self.state_dim))\r\n next_states = np.array(next_states).reshape((-1, self.state_dim))\r\n \r\n self.sess.run(self.p_trainOp,feed_dict={self.state_holder: states,self.done_holder:dones})\r\n self.sess.run(self.q_trainOp,feed_dict={self.state_holder: states,self.action_holder: actions,\r\n self.reward_holder: np.array(rewards).reshape(-1, 1),\r\n self.next_state_holder: next_states,\r\n self.done_holder:dones})\r\n self.qloss, self.ploss, reg_loss,Q = self.sess.run(\r\n [ self.q_loss, self.p_loss, self.reg_loss,self.q],\r\n feed_dict={self.state_holder: states,\r\n self.action_holder: actions,\r\n self.reward_holder: np.array(rewards).reshape(-1, 1),self.done_holder:dones,\r\n self.next_state_holder: next_states})\r\n self.update_step += 1\r\n if self.update_step % self.updateInterval == 0:\r\n self.sess.run(self.Actor_network_update)\r\n self.sess.run(self.Critic_network_update)\r\n\r\n return self.ploss, self.qloss, reg_loss,Q\r\n","sub_path":"ddpg_minicity/city_RL.py","file_name":"city_RL.py","file_ext":"py","file_size_in_byte":9330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448975904","text":"from scipy.stats import f, t\r\nimport numpy as np\r\nimport math\r\n\r\ndata_I = [14, 13, 9, 15, 11, 13, 14, 11]\r\ndata_II = [10, 12, 7, 11, 8, 12, 9, 10, 13, 9, 10, 9]\r\ndata_III = [11, 5, 9, 10, 6, 8, 8, 7]\r\n\r\nn_I = len(data_I)\r\nn_II = len(data_II)\r\nn_III = len(data_III)\r\nn = n_I + n_II + n_III\r\ns = 3\r\ndata_all = data_I + data_II + data_III\r\n\r\nST = np.var(data_all, ddof=1) * (n - 1)\r\nprint('ST =', ST)\r\nmean_all = np.mean(data_all)\r\nSA = n_I * (np.mean(data_I) - mean_all) ** 2 + n_II * (np.mean(data_II) - mean_all) ** 2 +n_III * (np.mean(data_III) - mean_all) ** 2 \r\nprint('SA =', SA)\r\nSE = ST - SA\r\nprint('SE =', SE)\r\n\r\nF_test_value = (SA / (s - 1)) / (SE / (n - s))\r\nalpha = 0.05\r\nright_pos = f.isf(alpha, s - 1, n - s)\r\nprint('F test value = ', F_test_value)\r\nprint('F distribution isf = ', right_pos)\r\n\r\nSE_mean = SE / (n - s)\r\n\r\nalpha = 1 - 0.95\r\nrange_1_2_left = np.mean(data_I) - np.mean(data_II) - t.isf(alpha / 2, n - s) * math.sqrt(SE_mean * (1 / n_I + 1 / n_II))\r\nrange_1_2_right = np.mean(data_I) - np.mean(data_II) + t.isf(alpha / 2, n - s) * math.sqrt(SE_mean * (1 / n_I + 1 / n_II))\r\nprint('mu_I - mu_II range = [', range_1_2_left, ',', range_1_2_right, ']')\r\n\r\nrange_1_3_left = np.mean(data_I) - np.mean(data_III) - t.isf(alpha / 2, n - s) * math.sqrt(SE_mean * (1 / n_I + 1 / n_III))\r\nrange_1_3_right = np.mean(data_I) - np.mean(data_III) + t.isf(alpha / 2, n - s) * math.sqrt(SE_mean * (1 / n_I + 1 / n_III))\r\nprint('mu_I - mu_III range = [', range_1_3_left, ',', range_1_3_right, ']')\r\n\r\nrange_2_3_left = np.mean(data_II) - np.mean(data_III) - t.isf(alpha / 2, n - s) * math.sqrt(SE_mean * (1 / n_II + 1 / n_III))\r\nrange_2_3_right = np.mean(data_II) - np.mean(data_III) + t.isf(alpha / 2, n - s) * math.sqrt(SE_mean * (1 / n_II + 1 / n_III))\r\nprint('mu_II - mu_III range = [', range_2_3_left, ',', range_2_3_right, ']')\r\n","sub_path":"Exercise_9.2.py","file_name":"Exercise_9.2.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170683193","text":"import sys\r\nsys.stdin = open('그��프의 삼각형.txt', 'r')\r\n\r\nT = int(input())\r\nfor tc in range(T):\r\n N, M = map(int, input().split())\r\n Table = [[0 for i in range(N+1)] for j in range(N+1)]\r\n for _ in range(M):\r\n x, y = map(int, input().split())\r\n Table[y][x] = Table[x][y] = 1\r\n # print(Table)\r\n\r\n ans = 0\r\n for i in range(1,N+1):\r\n temp = []\r\n for j in range(1,N+1):\r\n if Table[i][j] == 1:\r\n temp.append(j)\r\n # print(temp)\r\n for k in temp:\r\n for l in temp[temp.index(k)+1:]:\r\n if Table[k][l] == 1:\r\n ans += 1\r\n print('#{} {}'.format(tc+1, ans//3))","sub_path":"20200305/그래프의 삼각형.py","file_name":"그래프의 삼각형.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24871423","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass DeployedServiceTypeInfo(Model):\n \"\"\"Information about service type deployed on a node, information such as the\n status of the service type registration on a node.\n\n :param service_type_name:\n :type service_type_name: str\n :param service_manifest_name:\n :type service_manifest_name: str\n :param code_package_name:\n :type code_package_name: str\n :param status: Possible values include: 'Invalid', 'Disabled', 'Enabled',\n 'Registered'\n :type status: str or :class:`enum `\n :param service_package_activation_id:\n :type service_package_activation_id: str\n \"\"\"\n\n _attribute_map = {\n 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'},\n 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'},\n 'code_package_name': {'key': 'CodePackageName', 'type': 'str'},\n 'status': {'key': 'Status', 'type': 'str'},\n 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'},\n }\n\n def __init__(self, service_type_name=None, service_manifest_name=None, code_package_name=None, status=None, service_package_activation_id=None):\n self.service_type_name = service_type_name\n self.service_manifest_name = service_manifest_name\n self.code_package_name = code_package_name\n self.status = status\n self.service_package_activation_id = service_package_activation_id\n","sub_path":"azure-servicefabric/azure/servicefabric/models/deployed_service_type_info.py","file_name":"deployed_service_type_info.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305146974","text":"import requests\n\n\nclass Request(object):\n _methods = ('get', 'post', 'put', 'delete')\n requests = requests\n\n def __init__(self, container, host, https, port=80, timeout=3, retries=5,\n retry_delay=2):\n self._container = container\n self._timeout = timeout\n self._host = host\n self._port = port\n self._retries = retries\n self._retry_delay = retry_delay\n self._https = https\n\n @property\n def auth(self):\n return self._auth\n\n @auth.setter\n def auth(self, value):\n self._auth = value\n\n def request(self, method, url, use_json=True, **kwargs):\n from time import sleep\n from requests.exceptions import Timeout\n headers = self.collect_headers()\n if use_json:\n headers['content-type'] = 'application/json'\n\n https = kwargs.pop('https', self._https)\n if https == 'true':\n schema = 'https://'\n else:\n schema = 'http://'\n\n path = '{0}{1}{2}{3}'.format(\n schema,\n self._host,\n ':{0}'.format(self._port) if self._port != 80 else '',\n '/{0}'.format(url) if not url.startswith('/') else url)\n\n while self._retries:\n try:\n return getattr(self.requests, method)(\n path,\n timeout=self._timeout,\n auth=self.auth,\n headers=headers, verify=False, **kwargs)\n except Timeout:\n self._retries -= 1\n sleep(self._retry_delay)\n\n def collect_headers(self):\n headers = {}\n for id, tags in self._container.find_tagged_service_ids(\n 'request.header').iteritems():\n for tag in tags:\n action = tag['action']\n headers.update(getattr(self._container.get(id), action)())\n\n return headers\n\n def __getattr__(self, name):\n if name in self._methods:\n def request(url, *args, **kwargs):\n return self.request(name, url, *args, **kwargs)\n\n return request\n","sub_path":"core/plugins/request/request_manager.py","file_name":"request_manager.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"231757028","text":"import json\nimport re\nfrom utils import *\nimport discord\nfrom discord import Embed\nimport requests\nimport time\nt = time.time()\n\nr = requests.get(\n\"https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/F-B0053-067?Authorization=CWB-AC16B347-D588-4F17-8878-22F24F23A63C&downloadType=WEB&format=JSON\"\n)\nprint(\"request耗時\",time.time()-t)\nt = time.time()\n#print(r.text)\n\nj = json.loads(r.text)\nprint(\"load耗時\",time.time()-t)\ndata = j[\"cwbopendata\"][\"dataset\"][\"locations\"][\"location\"]\ncities_dict = {\n\"宜蘭縣\":[\"太平山森林遊樂區\"],\n\"南投縣\":[\n\"小風口停車場\",\n\"鳶峰停車場\",\n\"台大梅峰實驗農場\",\n\"新中橫塔塔加停車場\"],\n\"屏東縣\":[\n\"墾丁貓鼻頭\",\n\"墾丁龍磐公園\"],\n\"高雄市\":[\n\"高雄梅山青年活動中心\",\n\"藤枝森林遊樂區\",\n\"高雄都會公園\"],\n\"基隆市\":[\"基隆大武崙砲台停車場\"],\n\"新北市\":[\"五分山\",\"石碇雲海國小\",\"烏來風景特定區\"],\n\"苗栗縣\":[\"觀霧森林遊樂區\"],\n\"嘉義縣\":[\"阿里山遊樂區\",\n\"鹿林天文台\"],\n\"臺中市\":[\n\"武陵農場\",\n\"大雪山國家森林遊樂區\",\n\"福壽山農場\",\n\"臺中都會公園\"],\n\"臺北市\":[\"陽明山國家公園小油坑停車場\",\"陽明山國家公園擎天崗\"],\n\"臺南市\":[\"七股海堤\",\n\"南瀛天文教育園區\",\n\"臺南都會公園\"],}\n\n\ndef get_name_list(s:str=\"all\")->list:\n l = []\n print(s)\n for i in range(26):\n\n if s == \"all\":\n l.append(data[i][\"locationName\"])\n elif s in data[i][\"locationName\"]:\n l.append(data[i][\"locationName\"])\n return l\n\ndef find_place_number(s:str)->int:\n s = s.replace(\"台\",\"臺\")\n for i in range(26):\n if s in data[i][\"locationName\"]:\n return i\n return -1\ndef search_place(s:str)->list:\n \n s = s.replace(\"台\",\"臺\")\n print(s)\n for i in cities_dict.keys():\n if s in i:\n return cities_dict[i]\n\ndef get_embed(chan,token:int,stack:list)->Embed:\n if token == -1:\n send_msg(chan,\"那是哪裡?\")\n return\n MinT = 4 #最低溫\n RH = 2 #相對溼度\n PoP = 9 #降雨機率\n embed=discord.Embed(title=\"歡迎收看浪漫Duke,帶你浪漫看星星\",description=data[token][\"locationName\"])\n temp = data[token][\"weatherElement\"]\n for i in range(7):\n \n embed.add_field(name=\"日期\", value=temp[RH][\"time\"][i][\"startTime\"][5:10], inline=False)\n embed.add_field(name=\"最低溫❄\", value=f'{temp[MinT][\"time\"][i][\"elementValue\"][\"value\"]}度', inline=True)\n if i <3 :\n embed.add_field(name=\"降雨機率☔\", value=f'{temp[PoP][\"time\"][i][\"elementValue\"][\"value\"]}%', inline=True)\n embed.add_field(name=\"相對溼度💧\", value=f'{temp[RH][\"time\"][i][\"elementValue\"][\"value\"]}%', inline=True)\n #send_msg(chan,\"???????\")\n send_msg(chan,emb = embed)\ndef weather_command_handler(channel: TextChannel, args: list, user_stack: list):\n s = ''.join(args)\n s = s.replace(\"看天氣\", \"\")\n print(s)\n get_embed(chan=channel,token = find_place_number(s),stack = user_stack)\n\ndef find_place_handler(channel: TextChannel, args: list, user_stack: list):\n\n s = ''.join(args)\n s = s.replace(\"找地點\", \"\")\n string = \"\" \n embed = discord.Embed(title=\"歡迎收看浪漫Duke,帶你找到屬於你的地點\",description=\"馬上訂閱 Duke 的 Channel開啟小鈴鐺,分享!\")\n if s.replace(\"台\",\"臺\") in cities_dict.keys():\n l = search_place(s.replace(\"台\",\"臺\"))\n for i in l:\n string += (i+\"\\n\")\n else:\n if s==\"\":\n l = get_name_list(\"all\")\n else :\n l = get_name_list(s)\n print(l)\n for i in l:\n string += (i+\"\\n\")\n embed.add_field(name=\"地點\", value=string, inline=True)\n send_msg(channel,emb=embed)\n \n","sub_path":"star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397476728","text":"from win32api import GetSystemMetrics\r\n\r\nx =GetSystemMetrics(0)\r\ny = GetSystemMetrics(1)\r\n\r\n### SCREEN\r\nscreen = dict(\r\n width = 800,\r\n height = 600,\r\n)\r\n\r\n### PYGAME DATA\r\ndelay = 1000\r\nFPS = 60\r\nfont = dict(\r\n smallSize = 30,\r\n smallFamily = \"calibri\",\r\n mediumSize = 50,\r\n mediumFamily = \"calibri\",\r\n bigSize = 80,\r\n bigFamily = \"calibri\",\r\n)\r\n### FOOD\r\n\r\nfood = dict(\r\n startingFoodCount = 5,\r\n freshness = 10,\r\n rottingPeace = 1\r\n)\r\n\r\n\r\nanimalsBeggining = dict(\r\n startingPeacocksCount=10,\r\n startingTigersCount=30\r\n)\r\n\r\npeacocks = dict(\r\n maxCount = 1000,\r\n minHungerPeace = 1,\r\n maxHungerPeace = 5,\r\n minHunger = 0,\r\n maxHunger = 100,\r\n eatingPoints = 5,\r\n moveCost = 1,\r\n startingHunger = 50,\r\n minWeight = 3,\r\n maxWeight = 6,\r\n avgSpeed = 16,\r\n fertilityLvl = 50,\r\n fertilityCost = 15,\r\n minEggCount = 1,\r\n maxEggCount = 4\r\n)\r\n\r\ntigers = dict(\r\n maxCount = 1000,\r\n minHungerPeace = 1,\r\n maxHungerPeace = 5,\r\n minHunger = 0,\r\n maxHunger = 100,\r\n eatingPoints = 10,\r\n moveCost = 5,\r\n startingHunger = 50,\r\n minWeight = 65,\r\n maxWeight = 300,\r\n avgSpeed = 57.5,\r\n fertilityLvl = 60,\r\n fertilityCost = 20,\r\n minCubCount = 1,\r\n maxCubsCount = 2\r\n)\r\n","sub_path":"22.03.20/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"545583024","text":"import datetime\n\nfrom extuser.models import UserActivityLog\n\n\nclass UserActivityLogMiddleware(object):\n def process_request(self, request):\n if request.user.is_authenticated():\n date = datetime.date.today()\n try:\n log = UserActivityLog.objects.get(created=str(date), user=request.user)\n log.save(force_update=True)\n except UserActivityLog.DoesNotExist:\n UserActivityLog.objects.create(created=str(date), user=request.user)\n\n return None\n","sub_path":"extuser/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499885894","text":"import csv\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\n###20개씩 묶음(per day 4bars * 5days = 20bars)###\r\nf = open('./타겟추가데이터2_새로운방식.csv', 'r', newline='')\r\nrdr = csv.reader(f)\r\n\r\nlist = []\r\nfor i in rdr:\r\n list.append(i)\r\n\r\nlist.pop(0)\r\nprint(len(list))\r\n\r\ngram_20 = []\r\nfor i in range(len(list) + 1 - 20):\r\n gram_20.append(list[i:i + 20])\r\ngram_20 = np.array(gram_20)\r\nprint(gram_20.shape)\r\n\r\nf.close()\r\n\r\n###normalization###\r\no = open('./20개씩묶고_컬럼별정규화.csv', 'w', newline='')\r\nwr = csv.writer(o)\r\ntitle = ['time', 'low', 'high', 'open', 'close', 'volume', 'sma5', 'sma20', 'ema12', 'ema26', 'dn', 'mavg', 'up',\r\n 'pctB', 'rsi14', 'macd', 'signal'] * 20\r\ntitle.append('lastday_close')\r\ntitle.append('after6ratio')\r\ntitle.append('after12ratio')\r\ntitle.append('after18ratio')\r\ntitle.append('after24ratio')\r\nwr.writerow(title)\r\n\r\nscaler = MinMaxScaler(feature_range=(0, 1)) # scaler\r\nfor i in gram_20:\r\n close = np.array(i[-1][4], np.float64) # -1 = lastIndex\r\n target = np.array(i[-1][-4], np.float64)\r\n ratio = target / close\r\n\r\n for j in range(1, i.shape[1] - 4):\r\n col = scaler.fit_transform(np.reshape(i[:, j], (-1, 1)))\r\n col = np.transpose(col)\r\n i[:, j] = col\r\n\r\n data = i[:, :-4]\r\n data = np.reshape(data, (-1))\r\n data = np.append(data, close)\r\n data = np.append(data, ratio)\r\n\r\n wr.writerow(data)\r\no.close()\r\n","sub_path":"btc_pred_modify/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287624880","text":"import math\n\narr = [4, 10, 3, 5, 1]\nresult = [];\n\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\ndef createMaxHeap(arr):\n print(arr);\n\n startIndex = math.floor(len(arr)//2)-1;\n while startIndex > -1:\n maxHeapify(arr, startIndex)\n startIndex = startIndex-1\n\ndef maxHeapify(arr, startIndex):\n\n largest = startIndex\n left = (startIndex * 2) +1\n right = (startIndex * 2) +2\n\n if left < len(arr) and arr[startIndex] < arr[left]:\n largest = left\n\n if right < len(arr) and arr[startIndex] < arr[right]:\n if arr[left] < arr[right]:\n largest = right\n\n if largest != startIndex:\n\n swap(arr, startIndex, largest)\n maxHeapify(arr, largest)\n\ndef heapsort (result, arr):\n while len(arr)>0:\n createMaxHeap(arr)\n print(\"complete maxheap\")\n print(arr)\n ''' result 노드에 추가 '''\n result.append(arr[0])\n '''마지막 노드와 첫번째 노드 교체'''\n if len(arr) != 1:\n arr[0] = arr[len(arr)-1]\n arr.pop()\n\nheapsort(result, arr)\n\nprint(result)\n","sub_path":"sortnsearching/heapsort.py","file_name":"heapsort.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468366256","text":"from pyglet.media import Source, Player, load\nfrom youtube_dl import YoutubeDL\nprint(\"Hello this is music app\")\nprint(\"Pick one of options:\")\nwhile True:\n options=[\"Show All songs\",\"Show detail of a song\",\"Play a song\",\"Search and download songs\",\"Exit\"]\n for i,item in enumerate(options):\n print(i,\".\",item)\n n = input(\">>>\")\n if n == 1:\n print('Song list is empty')\n a = input('Press enter to continue ...')\n elif n == 2:\n print('Song list is empty')\n b = input('Press any key to continue...')\n elif n == 3:\n print('Song list is empty')\n b = input('Press any key to continue...')\n elif n == 5:\n break\n else:\n s = input('Enter a song you want to search')\n list=[]\n print(\"Searching for songs, please wait ...\")\n ","sub_path":"Session12/musicapp.py","file_name":"musicapp.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"15576433","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 11 16:06:49 2019\r\n\r\n@author: Alexander Hurley\r\n\r\nML 3 - Deep Neural Network\r\n\r\nDataset divorce.csv was taken from https://archive.ics.uci.edu/ml/datasets/Divorce+Predictors+data+set\r\nand modified by Alexander Hurley for use in the INFR 3700U final project. \r\n\r\nNOTE! Visualizing the DNN effectively turned out to be a daunting task. To compensate for the minimal \r\n graphics used for the DNN, I have included extra statistical graphics for the dataset showing \r\n various correlations and distributions. The code for those graphs is commented out at the \r\n bottom of this file.\r\n\r\n\"\"\"\r\n\r\n# Import required libraries\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout\r\nfrom tensorflow.keras.utils import plot_model\r\nimport pandas as pd\r\n\r\n# I had to manually point to my GraphViz installation as the default installs\r\n# are broken on the current build via Anaconda for Windows 10\r\nimport os\r\nos.environ[\"PATH\"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'\r\n\r\n# Reading data from file to the \"divorce\" variable\r\ndivorce = pd.read_csv(\"divorce.csv\",\r\n skiprows = 1,\r\n names=[\"atr1\", \"atr2\", \"atr3\", \"atr4\", \"atr5\", \"atr6\", \"atr7\", \"atr8\", \"atr9\", \r\n \"atr10\", \"atr11\", \"atr12\", \"atr13\", \"atr14\", \"atr15\", \"atr16\", \"atr17\", \r\n \"atr18\", \"atr19\", \"atr20\", \"atr21\", \"atr22\", \"atr23\", \"atr24\", \"atr25\", \r\n \"atr26\", \"atr27\", \"atr28\", \"atr29\", \"atr30\", \"atr31\", \"atr32\", \"atr33\", \r\n \"atr34\", \"atr35\", \"atr36\", \"atr37\", \"atr38\", \"atr39\", \"atr40\", \"atr41\", \r\n \"atr42\", \"atr43\", \"atr44\", \"atr45\", \"atr46\", \"atr47\", \"atr48\", \"atr49\", \r\n \"atr50\", \"atr51\", \"atr52\", \"atr53\", \"atr54\", \"divorce\"])\r\n'''\r\n# Split train and test sets 80/20\r\nfrom sklearn.model_selection import train_test_split\r\ntrain, test = train_test_split(divorce, test_size=0.2)#, random_state=42)\r\ny_train = train.iloc[:,-1]\r\nx_train = train.drop(['divorce'], axis=1)\r\ny_test = test.iloc[:,-1]\r\nx_test = test.drop(['divorce'], axis=1)\r\n\r\n# Standard Scaler using split data\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler = StandardScaler()\r\nscaler.fit(x_train)\r\nx_train = scaler.transform(x_train)\r\nx_test = scaler.transform(x_test)\r\n\r\n# Sequential Model using exponential, sigmoid, tanh and hard_sigmoid activations\r\nmodel = Sequential()\r\nmodel.add(Dense(40, activation='exponential', input_dim=54))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(40, activation='sigmoid'))\r\nmodel.add(Dropout(0.3))\r\n#model.add(Dense(40, activation='tanh'))\r\nmodel.add(Dense(40, activation='hard_sigmoid'))\r\nmodel.add(Dropout(0.3))\r\n\r\n# If you run this, please comment out lines 91-118\r\n# Calculate accuracy and loss\r\n# Compile using the adamax optimizer\r\nmodel.compile(loss='sparse_categorical_crossentropy',\r\n optimizer='adamax', \r\n metrics=['accuracy'])\r\n\r\n# Train model with the divorce train set\r\nmodel.fit(x_train, y_train, epochs=15, batch_size=1, verbose=0)# validation_split=0.2)\r\n\r\n# Evaluate on test set\r\nloss, acc = model.evaluate(x_test, y_test, batch_size=8)\r\n\r\n# Create a physical model (simple)\r\nplot_model(model, to_file='model1.png', show_shapes=False, show_layer_names=False)\r\n\r\n# Create a physical model (detail)\r\nplot_model(model, to_file='model2.png', show_shapes=True, show_layer_names=True)\r\n\r\n# Prints the Model Summary\r\nmodel.summary()\r\n\r\n# Print output accuracy and loss\r\nprint('\\nDNN Test Results: loss: {0:2.5f}, val_acc: {1:2.3f}'.format(loss, acc))\r\n'''\r\n'''\r\n# Calculate MSE of model\r\n# If you run this, please comment out lines 65-90\r\n\r\n# Compile using the adamax optimizer\r\nmodel.compile(loss='mse',\r\n optimizer='adamax', \r\n metrics=['accuracy'])\r\n\r\n# Train model with the divorce train set\r\nmodel.fit(x_train, y_train, epochs=15, batch_size=1, verbose=0)# validation_split=0.2)\r\n\r\n# Evaluate on test set\r\nloss, acc = model.evaluate(x_test, y_test, batch_size=8)\r\n\r\n# Create a physical model (simple)\r\nplot_model(model, to_file='model4.png', show_shapes=False, show_layer_names=False)\r\n\r\n# Create a physical model (detail)\r\nplot_model(model, to_file='model3.png', show_shapes=True, show_layer_names=True)\r\n\r\n# Prints the Model Summary\r\nmodel.summary()\r\n\r\n# Print output MSE\r\nprint('\\nDNN Test Results: MSE: {0:2.5f}'.format(loss))\r\nprint(\"\\n\\n\\n\")\r\n'''\r\n\r\n\r\n# The code below is used to generate statistical graphs for understanding the \r\n# dataset. Some of the more interesting attributes to test are:\r\n# atr1, atr42, atr43, atr46, atr54\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n# Preparing the graphs\r\nattribute = \"atr47\"\r\ngraphPrep = divorce[[attribute, \"divorce\"]]\r\n\r\n# Dataframe where divorce occurs\r\ndivy = graphPrep[graphPrep[\"divorce\"] == 1]\r\n\r\n# Plotting y for divorce\r\ny1 = divy.atr47.value_counts(dropna=False).sort_index()\r\n\r\n# Dataframe where divorce does not occur\r\ndivn = graphPrep[graphPrep[\"divorce\"] == 0]\r\n\r\n# Plotting y for no divorce\r\ny2 = divn.atr47.value_counts(dropna=False).sort_index()\r\n\r\nprint(y1)\r\nprint(y2)\r\n\r\n# Building the divorce bar graph as red\r\nplt.bar([\"1\",\"2\",\"3\",\"4\",\"5\"], y1, color='red')\r\nplt.show()\r\n\r\n# Building the no divorce bar graph as blue\r\nplt.bar([\"1\",\"2\",\"3\",\"4\",\"5\"], y2, color='blue')\r\nplt.show()\r\n","sub_path":"dnn.py","file_name":"dnn.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426668719","text":"import sys\nsys.path.append(\"../../../\")\n\nfrom advent_of_code.utils.utils import get_input_lines\n\nINPUT = get_input_lines()\nINPUT = [x.rstrip() for x in INPUT]\n\ndef naughty_nice(string: str) -> bool:\n \"\"\"\n Returns:\n True if nice\n False if naughty\n \"\"\"\n\n for combo in [\"ab\", \"cd\", \"pq\", \"xy\"]:\n if string.find(combo) > 0:\n return False\n\n # Get a count of the vowels in the input string\n vowel_count: int = 0\n for char in \"aeiou\":\n vowel_count += string.count(char)\n\n # Get the individual chars\n chars = set(string)\n has_duplicate: bool = False\n for char in chars:\n if string.find(char*2) >= 0:\n has_duplicate = True\n break\n\n return (vowel_count >= 3\n and has_duplicate)\n\nassert naughty_nice(\"ugknbfddgicrmopn\") == True\nassert naughty_nice(\"aaa\") == True\nassert naughty_nice(\"jchzalrnumimnmhp\") == False\nassert naughty_nice(\"haegwjzuvuyypxyu\") == False\nassert naughty_nice(\"dvszwmarrgswjxmb\") == False\n\ndef solve():\n global INPUT\n\n num_naughty: int = 0\n num_nice: int = 0\n\n for line in INPUT:\n if naughty_nice(line):\n num_nice += 1\n else:\n num_naughty += 1\n\n return (num_nice, num_naughty)\n\nanswer_one = solve()\nprint(answer_one)\n","sub_path":"2015/05/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"268913559","text":"\"\"\"Create a Radiometrically Terrain-Corrected (RTC) image from a Sentinel-1 scene sing GAMMA software\"\"\"\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport shutil\nimport sys\nimport zipfile\nfrom secrets import token_hex\n\nfrom hyp3lib import ExecuteError, OrbitDownloadError\nfrom hyp3lib import saa_func_lib as saa\nfrom hyp3lib.area2point import fix_geotiff_locations\nfrom hyp3lib.asf_geometry import reproject2grid\nfrom hyp3lib.byteSigmaScale import byteSigmaScale\nfrom hyp3lib.copy_metadata import copy_metadata\nfrom hyp3lib.createAmp import createAmp\nfrom hyp3lib.execute import execute\nfrom hyp3lib.getDemFor import getDemFile\nfrom hyp3lib.getParameter import getParameter\nfrom hyp3lib.get_bb_from_shape import get_bb_from_shape\nfrom hyp3lib.get_dem import get_dem\nfrom hyp3lib.get_orb import downloadSentinelOrbitFile\nfrom hyp3lib.ingest_S1_granule import ingest_S1_granule\nfrom hyp3lib.makeAsfBrowse import makeAsfBrowse\nfrom hyp3lib.make_cogs import cogify_dir\nfrom hyp3lib.ps2dem import ps2dem\nfrom hyp3lib.raster_boundary2shape import raster_boundary2shape\nfrom hyp3lib.rtc2color import rtc2color\nfrom hyp3lib.system import gamma_version\nfrom hyp3lib.utm2dem import utm2dem\nfrom osgeo import gdal\n\nimport hyp3_rtc_gamma\nfrom hyp3_rtc_gamma.check_coreg import CoregistrationError, check_coreg\nfrom hyp3_rtc_gamma.create_metadata import create_arc_xml\nfrom hyp3_rtc_gamma.metadata_utils import write_asf_meta\nfrom hyp3_rtc_gamma.smoothem import smooth_dem_tiles\nfrom hyp3_rtc_gamma.xml2meta import sentinel2meta\n\n\ndef fetch_orbit_file(in_file):\n logging.info(f'Fetching orbit file for {in_file}')\n orbit_file = None\n try:\n orbit_file, _ = downloadSentinelOrbitFile(in_file)\n except OrbitDownloadError:\n logging.warning('Unable to fetch orbit file. Continuing.')\n return orbit_file\n\n\ndef get_product_name(granule_name, orbit_file=None, resolution=30, gamma0=True, power=True,\n filtered=False, matching=False):\n platform = granule_name[0:3]\n beam_mode = granule_name[4:6]\n polarization = granule_name[14:16]\n datetime = granule_name[17:32]\n res = int(resolution)\n\n if orbit_file is None:\n o = 'O'\n elif 'POEORB' in orbit_file:\n o = 'P'\n elif 'RESORB' in orbit_file:\n o = 'R'\n else:\n o = 'O'\n\n product_id = token_hex(2).upper()\n\n g = 'g' if gamma0 else 's'\n p = 'p' if power else 'a'\n f = 'f' if filtered else 'n'\n m = 'm' if matching else 'd'\n\n product_name = f'{platform}_{beam_mode}_{datetime}_{polarization}{o}_RTC{res}_G_{g}{p}u{f}e{m}_{product_id}'\n return product_name\n\n\ndef perform_sanity_checks():\n logging.info(\"Performing sanity checks on output PRODUCTs\")\n tif_list = glob.glob(\"PRODUCT/*.tif\")\n for myfile in tif_list:\n if \"VV\" in myfile or \"HH\" in myfile or \"VH\" in myfile or \"HV\" in myfile:\n # Check that the main polarization file is on a 30 meter posting\n x, y, trans, proj = saa.read_gdal_file_geo(saa.open_gdal_file(myfile))\n logging.debug(\" trans[1] = {}; trans[5] = {}\".format(trans[1], trans[5]))\n if abs(trans[5]) > 10 and abs(trans[1]) > 10:\n logging.debug(\"Checking corner coordinates...\")\n ul1 = trans[3]\n lr1 = trans[3] + y * trans[5]\n ul2 = trans[0]\n lr2 = trans[0] + x * trans[1]\n if ul1 % 30 != 0:\n logging.error(\"ERROR: Corner coordinates are amiss\")\n logging.error(\"ERROR: ul1 coordinate not on a 30 meter posting\")\n logging.error(\"ERROR: ul1 = {}\".format(ul1))\n elif lr1 % 30 != 0:\n logging.error(\"ERROR: Corner coordinates are amiss\")\n logging.error(\"ERROR: lr1 coordinate not on a 30 meter posting\")\n logging.error(\"ERROR: lr1 = {}\".format(lr1))\n elif ul2 % 30 != 0:\n logging.error(\"ERROR: Corner coordinates are amiss\")\n logging.error(\"ERROR: ul2 coordinate not on a 30 meter posting\")\n logging.error(\"ERROR: ul2 = {}\".format(ul2))\n elif lr2 % 30 != 0:\n logging.error(\"ERROR: Corner coordinates are amiss\")\n logging.error(\"ERROR: lr2 coordinate not on a 30 meter posting\")\n logging.error(\"ERROR: lr2 = {}\".format(lr2))\n else:\n logging.debug(\"...ok\")\n\n\ndef reproject_dir(dem_type, res, prod_dir=None):\n if \"REMA\" in dem_type:\n epsg = 3031\n elif \"GIMP\" in dem_type:\n epsg = 3413\n else:\n return\n\n tmp_geotiff = \"tmp_reproj_dir_{}.tif\".format(os.getpid())\n home = os.getcwd()\n if prod_dir:\n os.chdir(prod_dir)\n\n for inGeotiff in glob.glob(\"*.tif\"):\n in_raster = gdal.Open(inGeotiff)\n out_raster = reproject2grid(in_raster, epsg, xRes=res)\n in_raster = None # Because GDAL is weird!\n gdal.Translate(tmp_geotiff, out_raster)\n os.remove(inGeotiff)\n shutil.move(tmp_geotiff, inGeotiff)\n\n if prod_dir:\n os.chdir(home)\n\n\ndef report_kwargs(in_name, out_name, res, dem, roi, shape, match_flag, dead_flag, gamma_flag, lo_flag,\n pwr_flag, filter_flag, looks, terms, par, no_cross_pol, smooth, area, orbit_file):\n logging.info(\"Parameters for this run:\")\n logging.info(\" Input name : {}\".format(in_name))\n logging.info(\" Output name : {}\".format(out_name))\n logging.info(\" Output resolution : {}\".format(res))\n logging.info(\" DEM file : {}\".format(dem))\n if roi is not None:\n logging.info(\" Area of Interest : {}\".format(roi))\n if shape is not None:\n logging.info(\" Shape File : {}\".format(shape))\n logging.info(\" Match flag : {}\".format(match_flag))\n logging.info(\" If no match, use Dead Reckoning : {}\".format(dead_flag))\n logging.info(\" Gamma0 output : {}\".format(gamma_flag))\n logging.info(\" Low resolution flag : {}\".format(lo_flag))\n logging.info(\" Create power images : {}\".format(pwr_flag))\n logging.info(\" Speckle Filtering : {}\".format(filter_flag))\n logging.info(\" Number of looks to take : {}\".format(looks))\n logging.info(\" Number of terms in used in match : {}\".format(terms))\n if par is not None:\n logging.info(\" Offset file : {}\".format(par))\n logging.info(\" Process crosspol : {}\".format(not no_cross_pol))\n logging.info(\" Smooth DEM tiles : {}\".format(smooth))\n logging.info(\" Save Pixel Area : {}\".format(area))\n logging.info(\" Orbit File : {}\".format(orbit_file))\n\n\ndef process_pol(in_file, rtc_name, out_name, pol, res, look_fact, match_flag, dead_flag, gamma_flag,\n filter_flag, pwr_flag, browse_res, dem, terms, par=None, area=False, orbit_file=None):\n logging.info(\"Processing the {} polarization\".format(pol))\n\n mgrd = \"{out}.{pol}.mgrd\".format(out=out_name, pol=pol)\n tif = \"image_cal_map.mli.tif\"\n\n # Ingest the granule into gamma format\n ingest_S1_granule(in_file, pol, look_fact, mgrd, orbit_file=orbit_file)\n width = getParameter(\"{}.par\".format(mgrd), \"range_samples\")\n\n # Apply filter if requested\n if filter_flag:\n el_looks = look_fact * 30\n execute(f\"enh_lee {mgrd} temp.mgrd {width} {el_looks} 1 7 7\", uselogging=True)\n shutil.move(\"temp.mgrd\", mgrd)\n\n options = \"-p -n {} -q -c \".format(terms)\n if gamma_flag:\n options += \"-g \"\n\n logging.info(\"Running RTC process... initializing\")\n geo_dir = \"geo_{}\".format(pol)\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {geo_dir}/area.dem\"\n f\" {geo_dir}/area.dem_par {geo_dir} image {res} 0 {options}\", uselogging=True)\n\n if match_flag and not par:\n fail = False\n logging.info(\"Running RTC process... coarse matching\")\n try:\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {geo_dir}/area.dem\"\n f\" {geo_dir}/area.dem_par {geo_dir} image {res} 1 {options}\", uselogging=True)\n except ExecuteError:\n logging.warning(\"WARNING: Determination of the initial offset failed, skipping initial offset\")\n\n logging.info(\"Running RTC process... fine matching\")\n try:\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {geo_dir}/area.dem\"\n f\" {geo_dir}/area.dem_par {geo_dir} image {res} 2 {options}\", uselogging=True)\n except ExecuteError:\n if not dead_flag:\n logging.error(\"ERROR: Failed to match images\")\n sys.exit(1)\n else:\n logging.warning(\"WARNING: Coregistration has failed; defaulting to dead reckoning\")\n os.remove(\"{}/{}\".format(geo_dir, \"image.diff_par\"))\n fail = True\n\n if not fail:\n try:\n check_coreg(out_name, res, max_offset=75, max_error=2.0)\n except CoregistrationError:\n if not dead_flag:\n logging.error(\"ERROR: Failed the coregistration check\")\n sys.exit(1)\n else:\n logging.warning(\"WARNING: Coregistration check has failed; defaulting to dead reckoning\")\n os.remove(\"{}/{}\".format(geo_dir, \"image.diff_par\"))\n\n logging.info(\"Running RTC process... finalizing\")\n if par:\n shutil.copy(par, \"{}/image.diff_par\".format(geo_dir))\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {geo_dir}/area.dem\"\n f\" {geo_dir}/area.dem_par {geo_dir} image {res} 3 {options}\", uselogging=True)\n\n os.chdir(geo_dir)\n\n # Divide sigma0 by sin(theta) to get beta0\n execute(f\"float_math image_0.inc_map - image_1.sin_theta {width} 7 - - 1 1 - 0\")\n\n execute(f\"float_math image_cal_map.mli image_1.sin_theta image_1.beta {width} 3 - - 1 1 - 0\")\n\n execute(f\"float_math image_1.beta image_0.sim image_1.flat {width} 3 - - 1 1 - 0\")\n\n # Make Geotiff Files\n execute(f\"data2geotiff area.dem_par image_0.ls_map 5 {out_name}.ls_map.tif\", uselogging=True)\n execute(f\"data2geotiff area.dem_par image_0.inc_map 2 {out_name}.inc_map.tif\", uselogging=True)\n execute(f\"data2geotiff area.dem_par image_1.flat 2 {out_name}.flat.tif\", uselogging=True)\n execute(\"data2geotiff area.dem_par area.dem 2 outdem.tif\", uselogging=True)\n\n gdal.Translate(\"{}.dem.tif\".format(out_name), \"outdem.tif\", outputType=gdal.GDT_Int16)\n\n if gamma_flag:\n gdal.Translate(\"tmp.tif\", tif, metadataOptions=['Band1={}_gamma0'.format(pol)])\n else:\n gdal.Translate(\"tmp.tif\", tif, metadataOptions=['Band1={}_sigma0'.format(pol)])\n shutil.move(\"tmp.tif\", tif)\n createAmp(tif, nodata=0)\n\n # Make meta files and stats\n execute(f\"asf_import -format geotiff {out_name}.ls_map.tif ls_map\", uselogging=True)\n execute(\"stats -overstat -overmeta ls_map\", uselogging=True)\n execute(f\"asf_import -format geotiff {out_name}.inc_map.tif inc_map\", uselogging=True)\n execute(\"stats -overstat -overmeta -mask 0 inc_map\", uselogging=True)\n execute(f\"asf_import -format geotiff image_cal_map.mli_amp.tif tc_{pol}\", uselogging=True)\n execute(f\"stats -nostat -overmeta -mask 0 tc_{pol}\", uselogging=True)\n\n # Make browse resolution tif file\n if res == browse_res:\n shutil.copy(\"image_cal_map.mli_amp.tif\", \"{}_{}_{}m.tif\".format(out_name, pol, browse_res))\n else:\n gdal.Translate(\"{}_{}_{}m.tif\".format(out_name, pol, browse_res), \"image_cal_map.mli_amp.tif\",\n xRes=browse_res, yRes=browse_res)\n\n # Move files into the product directory\n out_dir = \"../PRODUCT\"\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n if pwr_flag:\n shutil.move(tif, \"{}/{}\".format(out_dir, rtc_name))\n else:\n copy_metadata(tif, \"image_cal_map.mli_amp.tif\")\n shutil.move(\"image_cal_map.mli_amp.tif\", \"{}/{}\".format(out_dir, rtc_name))\n\n shutil.move(\"{}.ls_map.tif\".format(out_name), \"{}/{}_ls_map.tif\".format(out_dir, out_name))\n shutil.move(\"{}.inc_map.tif\".format(out_name), \"{}/{}_inc_map.tif\".format(out_dir, out_name))\n shutil.move(\"{}.dem.tif\".format(out_name), \"{}/{}_dem.tif\".format(out_dir, out_name))\n if area:\n shutil.move(\"{}.flat.tif\".format(out_name), \"{}/{}_flat_{}.tif\".format(out_dir, out_name, pol))\n\n os.chdir(\"..\")\n\n\ndef process_2nd_pol(in_file, rtc_name, cpol, res, look_fact, gamma_flag, filter_flag, pwr_flag, browse_res,\n outfile, dem, terms, par=None, area=False, orbit_file=None):\n if cpol == \"VH\":\n mpol = \"VV\"\n else:\n mpol = \"HH\"\n\n mgrd = \"{out}.{pol}.mgrd\".format(out=outfile, pol=cpol)\n tif = \"image_cal_map.mli.tif\"\n\n # Ingest the granule into gamma format\n ingest_S1_granule(in_file, cpol, look_fact, mgrd, orbit_file=orbit_file)\n width = getParameter(\"{}.par\".format(mgrd), \"range_samples\")\n\n # Apply filtering if requested\n if filter_flag:\n el_looks = look_fact * 30\n execute(f\"enh_lee {mgrd} temp.mgrd {width} {el_looks} 1 7 7\", uselogging=True)\n shutil.move(\"temp.mgrd\", mgrd)\n\n options = \"-p -n {} -q -c \".format(terms)\n if gamma_flag:\n options += \"-g \"\n\n home_dir = os.getcwd()\n geo_dir = \"geo_{}\".format(cpol)\n mdir = \"geo_{}\".format(mpol)\n if not os.path.isdir(geo_dir):\n os.mkdir(geo_dir)\n\n shutil.copy(\"geo_{}/image.diff_par\".format(mpol), \"{}\".format(geo_dir))\n os.symlink(\"../geo_{}/image_0.map_to_rdc\".format(mpol), \"{}/image_0.map_to_rdc\".format(geo_dir))\n os.symlink(\"../geo_{}/image_0.ls_map\".format(mpol), \"{}/image_0.ls_map\".format(geo_dir))\n os.symlink(\"../geo_{}/image_0.inc_map\".format(mpol), \"{}/image_0.inc_map\".format(geo_dir))\n os.symlink(\"../geo_{}/image_0.sim\".format(mpol), \"{}/image_0.sim\".format(geo_dir))\n os.symlink(\"../geo_{}/area.dem_par\".format(mpol), \"{}/area.dem_par\".format(geo_dir))\n\n if par:\n shutil.copy(par, \"{}/image.diff_par\".format(geo_dir))\n\n execute(f\"mk_geo_radcal {mgrd} {mgrd}.par {dem} {dem}.par {mdir}/area.dem\"\n f\" {mdir}/area.dem_par {geo_dir} image {res} 3 {options}\", uselogging=True)\n\n os.chdir(geo_dir)\n\n # Divide sigma0 by sin(theta) to get beta0\n execute(f\"float_math image_0.inc_map - image_1.sin_theta {width} 7 - - 1 1 - 0\")\n\n execute(f\"float_math image_cal_map.mli image_1.sin_theta image_1.beta {width} 3 - - 1 1 - 0\")\n\n execute(f\"float_math image_1.beta image_0.sim image_1.flat {width} 3 - - 1 1 - 0\")\n\n # Make geotiff file\n if gamma_flag:\n gdal.Translate(\"tmp.tif\", tif, metadataOptions=['Band1={}_gamma0'.format(cpol)])\n else:\n gdal.Translate(\"tmp.tif\", tif, metadataOptions=['Band1={}_sigma0'.format(cpol)])\n shutil.move(\"tmp.tif\", tif)\n\n # Make browse resolution file\n createAmp(tif, nodata=0)\n if res == browse_res:\n shutil.copy(\"image_cal_map.mli_amp.tif\", \"{}_{}_{}m.tif\".format(outfile, cpol, browse_res))\n else:\n gdal.Translate(\"{}_{}_{}m.tif\".format(outfile, cpol, browse_res), \"image_cal_map.mli_amp.tif\", xRes=browse_res,\n yRes=browse_res)\n\n # Create meta files and stats\n execute(f\"asf_import -format geotiff image_cal_map.mli_amp.tif tc_{cpol}\", uselogging=True)\n execute(f\"stats -nostat -overmeta -mask 0 tc_{cpol}\", uselogging=True)\n\n # Move files to product directory\n out_dir = \"../PRODUCT\"\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n execute(f\"data2geotiff area.dem_par image_1.flat 2 {outfile}.flat.tif\", uselogging=True)\n\n if pwr_flag:\n shutil.move(tif, \"{}/{}\".format(out_dir, rtc_name))\n else:\n copy_metadata(tif, \"image_cal_map.mli_amp.tif\")\n shutil.move(\"image_cal_map.mli_amp.tif\", \"{}/{}\".format(out_dir, rtc_name))\n if area:\n shutil.move(\"{}.flat.tif\".format(outfile), \"{}/{}_flat_{}.tif\".format(out_dir, rtc_name, cpol))\n\n os.chdir(home_dir)\n\n\ndef create_browse_images(out_name, pol, cpol, browse_res):\n ampfile = \"geo_{pol}/{name}_{pol}_{res}m.tif\".format(pol=pol, name=out_name, res=browse_res)\n if cpol:\n ampfile2 = \"geo_{pol}/{name}_{pol}_{res}m.tif\".format(pol=cpol, name=out_name, res=browse_res)\n threshold = -24\n outfile = \"{}_rgb.tif\".format(out_name)\n rtc2color(ampfile, ampfile2, threshold, outfile, amp=True, cleanup=True)\n colorname = \"PRODUCT/{}_rgb\".format(out_name)\n makeAsfBrowse(outfile, colorname)\n\n os.chdir(\"geo_{}\".format(pol))\n outdir = \"../PRODUCT\"\n outfile = \"{}/{}\".format(outdir, out_name)\n ampfile = \"{name}_{pol}_{res}m.tif\".format(pol=pol, name=out_name, res=browse_res)\n sigmafile = ampfile.replace(\".tif\", \"_sigma.tif\")\n byteSigmaScale(ampfile, sigmafile)\n makeAsfBrowse(sigmafile, outfile)\n\n os.chdir(\"../PRODUCT\")\n\n infile = \"{}_inc_map.tif\".format(out_name)\n outfile = \"{}_inc_map\".format(out_name)\n sigmafile = infile.replace(\".tif\", \"_sigma.tif\")\n byteSigmaScale(infile, sigmafile)\n makeAsfBrowse(sigmafile, outfile)\n os.remove(sigmafile)\n\n infile = \"{}_ls_map.tif\".format(out_name)\n outfile = \"{}_ls_map\".format(out_name)\n makeAsfBrowse(infile, outfile)\n\n infile = \"{}_dem.tif\".format(out_name)\n outfile = \"{}_dem\".format(out_name)\n sigmafile = infile.replace(\".tif\", \"_sigma.tif\")\n byteSigmaScale(infile, sigmafile)\n makeAsfBrowse(sigmafile, outfile)\n os.remove(sigmafile)\n\n raster_boundary2shape(out_name + \"_\" + pol + \".tif\", None, out_name + \"_shape.shp\", use_closing=False,\n pixel_shift=True, fill_holes=True)\n\n os.chdir(\"..\")\n\n\ndef create_consolidated_log(out_name, lo_flag, dead_flag, match_flag, gamma_flag, roi,\n shape, pwr_flag, filter_flag, pol, looks, log_file, smooth, terms,\n no_cross_pol, par):\n out = \"PRODUCT\"\n logname = \"{}/{}.log\".format(out, out_name)\n logging.info(\"Creating log file: {}\".format(logname))\n\n f = open(logname, \"w\")\n f.write(\"Consolidated log for: {}\\n\".format(out_name))\n options = \"\"\n if lo_flag:\n options += \"-l \"\n if not dead_flag:\n options += \"--fail \"\n if match_flag:\n options += \"-n \"\n if not gamma_flag:\n options += \"--sigma \"\n if filter_flag:\n options += \"-f \"\n if not pwr_flag:\n options += \"--amp \"\n if smooth:\n options += \"--smooth \"\n options += \"-k {}\".format(looks)\n options += \"-t {}\".format(terms)\n if par:\n options += \"--par {}\".format(par)\n if no_cross_pol:\n options += \"--nocrosspol\"\n if roi:\n options += \"-a {}\".format(roi)\n if shape:\n options += \"-s {}\".format(shape)\n\n cmd = \"rtc_sentinel.py \" + options\n f.write(\"Command: {}\\n\".format(cmd))\n f.close()\n\n geo_dir = \"geo_{}\".format(pol)\n add_log(log_file, logname)\n add_log(\"{}/mk_geo_radcal_0.log\".format(geo_dir), logname)\n add_log(\"{}/mk_geo_radcal_1.log\".format(geo_dir), logname)\n add_log(\"{}/mk_geo_radcal_2.log\".format(geo_dir), logname)\n add_log(\"{}/mk_geo_radcal_3.log\".format(geo_dir), logname)\n add_log(\"coreg_check.log\", logname)\n\n\ndef add_log(log, full_log):\n g = open(full_log, \"a\")\n g.write(\"==============================================\\n\")\n g.write(\"Log: {}\\n\".format(log))\n g.write(\"==============================================\\n\")\n\n if not os.path.isfile(log):\n g.write(\"(not found)\\n\")\n g.close()\n return ()\n\n f = open(log, \"r\")\n for line in f:\n g.write(\"{}\".format(line))\n f.close()\n\n g.write(\"\\n\")\n g.close()\n\n\ndef create_iso_xml(outfile, out_name, pol, cpol, in_file, dem_type, log, gamma_ver):\n hdf5_name = \"hdf5_list.txt\"\n path = in_file\n etc_dir = os.path.abspath(os.path.dirname(hyp3_rtc_gamma.etc.__file__))\n shutil.copy(\"{}/sentinel_xml.xsl\".format(etc_dir), \"sentinel_xml.xsl\")\n\n out = \"PRODUCT\"\n\n execute(f\"xsltproc --stringparam path {path} --stringparam timestamp timestring\"\n f\" --stringparam file_size 1000 --stringparam server stuff\"\n f\" --output out.xml sentinel_xml.xsl {path}/manifest.safe\", uselogging=True)\n\n m = sentinel2meta(\"out.xml\")\n write_asf_meta(m, \"out.meta\")\n\n ver_file = \"{}/manifest.safe\".format(path)\n ipf_ver = None\n if os.path.exists(ver_file):\n f = open(ver_file, \"r\")\n for line in f:\n if \"IPF\" in line:\n t = line.split('\"')\n ipf_ver = t[3].strip()\n else:\n logging.warning(\"No manifest.safe file found in {}\".format(path))\n\n g = open(hdf5_name, \"w\")\n g.write(\"[GAMMA RTC]\\n\")\n g.write(\"granule = {}\\n\".format(in_file.replace(\".SAFE\", \"\")))\n g.write(\"metadata = out.meta\\n\")\n\n geo_dir = \"geo_{}\".format(pol)\n dem_seg = \"{}/area.dem\".format(geo_dir)\n dem_seg_par = \"{}/area.dem_par\".format(geo_dir)\n\n g.write(\"oversampled dem file = {}\\n\".format(dem_seg))\n g.write(\"oversampled dem metadata = {}\\n\".format(dem_seg_par))\n g.write(\"original dem file = {}/{}_dem.tif\\n\".format(out, out_name))\n g.write(\"layover shadow mask = {}/{}_ls_map.tif\\n\".format(out, out_name))\n g.write(\"layover shadow stats = {}/ls_map.stat\\n\".format(geo_dir))\n g.write(\"incidence angle file = {}/{}_inc_map.tif\\n\".format(out, out_name))\n g.write(\"incidence angle metadata = {}/inc_map.meta\\n\".format(geo_dir))\n\n g.write(\"input {} file = {}\\n\".format(pol, outfile))\n g.write(\"terrain corrected {pol} metadata = {dir}/tc_{pol}.meta\\n\".format(pol=pol, dir=geo_dir))\n g.write(\"terrain corrected {} file = {}/{}\\n\".format(pol, out, outfile))\n\n if cpol:\n outfile2 = outfile.replace(pol, cpol)\n g.write(\"input {} file = {}\\n\".format(pol, outfile))\n geo_dir2 = geo_dir.replace(pol, cpol)\n g.write(\"terrain corrected {pol} metadata = {dir}/tc_{pol}.meta\\n\".format(pol=cpol, dir=geo_dir2))\n g.write(\"terrain corrected {} file = {}/{}\\n\".format(cpol, out, outfile2))\n\n g.write(\"initial processing log = {}\\n\".format(log))\n g.write(\"terrain correction log = {}\\n\".format(log))\n g.write(\"main log = {}\\n\".format(log))\n g.write(\"mk_geo_radcal_0 log = {}/mk_geo_radcal_0.log\\n\".format(geo_dir))\n g.write(\"mk_geo_radcal_1 log = {}/mk_geo_radcal_1.log\\n\".format(geo_dir))\n g.write(\"mk_geo_radcal_2 log = {}/mk_geo_radcal_2.log\\n\".format(geo_dir))\n g.write(\"mk_geo_radcal_3 log = {}/mk_geo_radcal_3.log\\n\".format(geo_dir))\n g.write(\"coreg_check log = coreg_check.log\\n\")\n g.write(\"mli.par file = {}.{}.mgrd.par\\n\".format(out_name, pol))\n g.write(\"gamma version = {}\\n\".format(gamma_ver))\n g.write(\"hyp3_rtc version = {}\\n\".format(hyp3_rtc_gamma.__version__))\n g.write(\"ipf version = {}\\n\".format(ipf_ver))\n g.write(\"dem source = {}\\n\".format(dem_type))\n g.write(\"browse image = {}/{}.png\\n\".format(out, out_name))\n g.write(\"kml overlay = {}/{}.kmz\\n\".format(out, out_name))\n\n g.close()\n\n execute(f\"write_hdf5_xml {hdf5_name} {out_name}.xml\", uselogging=True)\n\n logging.info(\"Generating {}.iso.xml with {}/rtc_iso.xsl\\n\".format(out_name, etc_dir))\n\n execute(f\"xsltproc {etc_dir}/rtc_iso.xsl {out_name}.xml > {out_name}.iso.xml\", uselogging=True)\n\n shutil.copy(\"{}.iso.xml\".format(out_name), \"{}\".format(out))\n\n\ndef clean_prod_dir():\n os.chdir(\"PRODUCT\")\n for myfile in glob.glob(\"*ls_map*png*\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*ls_map*kmz\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*inc_map*png*\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*inc_map*kmz\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*dem*png*\"):\n os.remove(myfile)\n for myfile in glob.glob(\"*dem*kmz\"):\n os.remove(myfile)\n os.chdir(\"..\")\n\n\ndef configure_log_file():\n log_file = f'rtc_sentinel_{os.getpid()}.log'\n log_file_handler = logging.FileHandler(log_file)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')\n log_file_handler.setFormatter(formatter)\n logging.getLogger().addHandler(log_file_handler)\n return log_file\n\n\ndef rtc_sentinel_gamma(in_file,\n out_name=None,\n res=None,\n dem=None,\n roi=None,\n shape=None,\n match_flag=False,\n dead_flag=True,\n gamma_flag=True,\n lo_flag=True,\n pwr_flag=True,\n filter_flag=False,\n looks=None,\n terms=1,\n par=None,\n no_cross_pol=False,\n smooth=False,\n area=False):\n\n log_file = configure_log_file()\n\n logging.info(\"===================================================================\")\n logging.info(\" Sentinel RTC Program - Starting\")\n logging.info(\"===================================================================\")\n\n if res is None:\n res = 10\n if lo_flag:\n res = 30\n\n browse_res = 30\n if res > browse_res:\n browse_res = res\n\n if looks is None:\n if res == 30:\n if \"GRD\" in in_file:\n looks = 6\n else:\n looks = 3\n else:\n looks = int(res / 10 + 0.5)\n\n in_file = in_file.rstrip('/')\n if not os.path.exists(in_file):\n logging.error(\"ERROR: Input file {} does not exist\".format(in_file))\n sys.exit(1)\n if in_file.endswith('.zip'):\n logging.info(f'Unzipping {in_file}')\n with zipfile.ZipFile(in_file, 'r') as z:\n z.extractall()\n in_file = in_file.replace('.zip', '.SAFE')\n\n input_type = in_file[7:10]\n\n orbit_file = fetch_orbit_file(in_file)\n\n if out_name is None:\n out_name = get_product_name(in_file, orbit_file, res, gamma_flag, pwr_flag, filter_flag, match_flag)\n\n report_kwargs(in_file, out_name, res, dem, roi, shape, match_flag, dead_flag, gamma_flag, lo_flag,\n pwr_flag, filter_flag, looks, terms, par, no_cross_pol, smooth, area, orbit_file)\n\n orbit_file = os.path.abspath(orbit_file) # ingest_S1_granule requires absolute path\n\n if dem is None:\n logging.info(\"Getting DEM file covering this SAR image\")\n tifdem = \"tmp_{}_dem.tif\".format(os.getpid())\n if shape is not None:\n min_x, min_y, max_x, max_y = get_bb_from_shape(shape)\n logging.info(f'bounding box: {min_x}, {min_y}, {max_x}, {max_y}')\n roi = [min_x, min_y, max_x, max_y]\n if roi is not None:\n dem_type = get_dem(roi[0], roi[1], roi[2], roi[3], tifdem, post=30)\n else:\n demfile, dem_type = getDemFile(in_file, tifdem, post=30)\n\n if 'REMA' in dem_type and smooth:\n logging.info(\"Preparing to smooth DEM tiles\")\n dem, parfile = smooth_dem_tiles(\"DEM\", build=True)\n else:\n dem = \"area.dem\"\n parfile = \"area.dem.par\"\n if \"GIMP\" in dem_type or \"REMA\" in dem_type:\n ps2dem(tifdem, dem, parfile)\n else:\n utm2dem(tifdem, dem, parfile)\n os.remove(tifdem)\n elif \".tif\" in dem:\n tiff_dem = dem\n dem = \"area.dem\"\n parfile = \"area.dem.par\"\n utm2dem(tiff_dem, dem, parfile)\n dem_type = \"Unknown\"\n elif os.path.isfile(\"{}.par\".format(dem)):\n dem_type = \"Unknown\"\n else:\n logging.error(\"ERROR: Unrecognized DEM: {}\".format(dem))\n sys.exit(1)\n\n vvlist = glob.glob(\"{}/*/*vv*.tiff\".format(in_file))\n vhlist = glob.glob(\"{}/*/*vh*.tiff\".format(in_file))\n hhlist = glob.glob(\"{}/*/*hh*.tiff\".format(in_file))\n hvlist = glob.glob(\"{}/*/*hv*.tiff\".format(in_file))\n\n cpol = None\n pol = None\n if vvlist:\n logging.info(\"Found VV polarization - processing\")\n pol = \"VV\"\n rtc_name = out_name + \"_\" + pol + \".tif\"\n process_pol(in_file, rtc_name, out_name, pol, res, looks,\n match_flag, dead_flag, gamma_flag, filter_flag, pwr_flag,\n browse_res, dem, terms, par=par, area=area, orbit_file=orbit_file)\n\n if vhlist and not no_cross_pol:\n cpol = \"VH\"\n rtc_name = out_name + \"_\" + cpol + \".tif\"\n logging.info(\"Found VH polarization - processing\")\n process_2nd_pol(in_file, rtc_name, cpol, res, looks,\n gamma_flag, filter_flag, pwr_flag, browse_res,\n out_name, dem, terms, par=par, area=area, orbit_file=orbit_file)\n\n if hhlist:\n logging.info(\"Found HH polarization - processing\")\n pol = \"HH\"\n rtc_name = out_name + \"_\" + pol + \".tif\"\n process_pol(in_file, rtc_name, out_name, pol, res, looks,\n match_flag, dead_flag, gamma_flag, filter_flag, pwr_flag,\n browse_res, dem, terms, par=par, area=area, orbit_file=orbit_file)\n\n if hvlist and not no_cross_pol:\n cpol = \"HV\"\n logging.info(\"Found HV polarization - processing\")\n rtc_name = out_name + \"_\" + cpol + \".tif\"\n process_2nd_pol(in_file, rtc_name, cpol, res, looks,\n gamma_flag, filter_flag, pwr_flag, browse_res,\n out_name, dem, terms, par=par, area=area, orbit_file=orbit_file)\n\n if hhlist is None and vvlist is None:\n logging.error(f\"ERROR: Can not find VV or HH polarization in {in_file}\")\n sys.exit(1)\n\n fix_geotiff_locations()\n reproject_dir(dem_type, res, prod_dir=\"PRODUCT\")\n reproject_dir(dem_type, res, prod_dir=\"geo_{}\".format(pol))\n if cpol:\n reproject_dir(dem_type, res, prod_dir=\"geo_{}\".format(cpol))\n create_browse_images(out_name, pol, cpol, browse_res)\n rtc_name = out_name + \"_\" + pol + \".tif\"\n gamma_ver = gamma_version()\n create_iso_xml(rtc_name, out_name, pol, cpol, in_file, dem_type, log_file, gamma_ver)\n create_arc_xml(in_file, out_name, input_type, gamma_flag, pwr_flag, filter_flag, looks, pol, cpol,\n dem_type, res, hyp3_rtc_gamma.__version__, gamma_ver, rtc_name)\n cogify_dir(directory='PRODUCT')\n clean_prod_dir()\n perform_sanity_checks()\n logging.info(\"===================================================================\")\n logging.info(\" Sentinel RTC Program - Completed\")\n logging.info(\"===================================================================\")\n\n create_consolidated_log(out_name, lo_flag, dead_flag, match_flag, gamma_flag, roi,\n shape, pwr_flag, filter_flag, pol, looks, log_file, smooth, terms,\n no_cross_pol, par)\n return 'PRODUCT', out_name\n\n\ndef main():\n \"\"\"Main entrypoint\"\"\"\n parser = argparse.ArgumentParser(\n prog='rtc_sentinel.py',\n description=__doc__,\n )\n parser.add_argument('input', help='Name of input file, either .zip or .SAFE')\n parser.add_argument(\"-o\", \"--outputResolution\", type=float, help=\"Desired output resolution\")\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-e\", \"--externalDEM\", help=\"Specify a DEM file to use - must be in UTM projection\")\n group.add_argument(\"-r\", \"--roi\", type=float, nargs=4, metavar=('LON_MIN', 'LAT_MIN', 'LON_MAX', 'LAT_MAX'),\n help=\"Specify ROI to use\")\n group.add_argument(\"-s\", \"--shape\", help=\"Specify shape file to use\")\n\n parser.add_argument(\"-n\", action=\"store_false\", help=\"Do not perform matching\")\n parser.add_argument(\"--fail\", action=\"store_true\",\n help=\"if matching fails, fail the program. Default: use dead reckoning\")\n parser.add_argument(\"--sigma\", action=\"store_true\", help=\"create sigma0 instead of gamma0\")\n parser.add_argument(\"--amp\", action=\"store_true\", help=\"create amplitude images instead of power\")\n parser.add_argument(\"--smooth\", action=\"store_true\", help=\"smooth DEM file before terrain correction\")\n parser.add_argument(\"-l\", action=\"store_true\", help=\"create a lo-res output (30m)\")\n parser.add_argument(\"-f\", action=\"store_true\", help=\"run enhanced lee filter\")\n parser.add_argument(\"-k\", \"--looks\", type=int,\n help=\"set the number of looks to take (def:3 for SLC/6 for GRD)\")\n parser.add_argument(\"-t\", \"--terms\", type=int, default=1,\n help=\"set the number of terms in matching polynomial (default is 1)\")\n parser.add_argument('--output', help='base name of the output files')\n parser.add_argument(\"--par\", help=\"Stack processing - use specified offset file and don't match\")\n parser.add_argument(\"--nocrosspol\", action=\"store_true\", help=\"Do not process the cross pol image\")\n parser.add_argument(\"-a\", \"--area\", action=\"store_true\", help=\"Keep area map\")\n args = parser.parse_args()\n\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)\n\n # FIXME: This function's inputs should be 1:1 (name and value!) with CLI args!\n rtc_sentinel_gamma(args.input,\n out_name=args.output,\n res=args.outputResolution,\n dem=args.externalDEM,\n roi=args.roi,\n shape=args.shape,\n match_flag=args.n,\n dead_flag=not args.fail,\n gamma_flag=not args.sigma,\n lo_flag=args.l,\n pwr_flag=not args.amp,\n filter_flag=args.f,\n looks=args.looks,\n terms=args.terms,\n par=args.par,\n no_cross_pol=args.nocrosspol,\n smooth=args.smooth,\n area=args.area)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hyp3_rtc_gamma/rtc_sentinel.py","file_name":"rtc_sentinel.py","file_ext":"py","file_size_in_byte":34326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617356755","text":"import requests\nimport os\nimport csv\n\ndef get_stock_cache(cache_dir, start, end):\n \"\"\"\n Returns dict of \n { \n start: {\n stock1: price1,\n stock2: price2,\n ...\n },\n date2: {\n ...\n },\n ...\n end: {\n ...\n }\n }\n for all stocks found in .rh_cache/historical_prices\n \"\"\"\n result = {}\n for stockcsv in os.listdir(cache_dir):\n filepath = \"{}/{}\".format(cache_dir, stockcsv)\n with open(filepath, \"r\") as f:\n csv_reader = csv.reader(f, delimiter=\",\")\n header = None\n for row in csv_reader:\n if header is None:\n header = [r for r in row]\n else:\n date, price = row[0], row[1]\n stock = stockcsv.replace(\".csv\", \"\")\n if date >= start and date <= end:\n if date not in result: result[date] = {}\n result[date][stock] = price\n return result\n\nclass IexStock(object):\n def __init__(self, token):\n self.token = token\n self.base_url = \"https://cloud.iexapis.com/v1\"\n\n def hist_data(self, symbol, date_range):\n suffix=\"token={}&chartCloseOnly=true\".format(self.token)\n url = \"{}/stock/{}/chart/{}?{}\".format(self.base_url, symbol, date_range, suffix)\n return requests.get(url).json()\n","sub_path":"Robinhood/IexStock.py","file_name":"IexStock.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317365852","text":"import json\nimport base64\nfrom datetime import datetime, timedelta\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\n\nfrom h42auth import app, mongo\n\nclass ForwardAuth:\n __user = None\n __new = False\n server = None\n host = None\n method = None\n protocol = None\n port = None\n uri = None\n url = None\n token = None\n user = None\n souid = None\n is_authenticated = False\n expires = None\n auth_expires = None\n\n def __init__(self, data=None):\n if data:\n self.load(data)\n else:\n self.__new = True\n self.token = str(uuid4())\n self.expires = datetime.utcnow() + timedelta(hours=2)\n self.auth_expires = datetime.utcnow() + timedelta(minutes=5)\n\n def generate_url(self):\n if (self.protocol == 'https') & (self.port == '443'):\n self.url = '%s://%s%s' % (str(self.protocol), str(self.host), str(self.uri))\n elif (self.protocol == 'http') & (self.port == '80'):\n self.url = '%s://%s%s' % (str(self.protocol), str(self.host), str(self.uri))\n else:\n self.url = '%s://%s:%s%s' % (self.protocol, self.host, self.port, self.uri)\n return self.url\n\n def set_user(self, user):\n self.is_authenticated = True\n self.__user = user\n self.user = user.username\n self.souid = user.uid\n self.auth_expires = datetime.utcnow() + timedelta(hours=2)\n\n def check_headers(self, headers):\n app.logger.info(str(headers))\n print (headers)\n if headers.has_key('X-Auth-Service') :\n self.url = 'https://{}/'.format(headers['X-Auth-Service'])\n self.host = headers['X-Auth-Service']\n elif headers.has_key('Referer'):\n url = urlparse(headers['Referer'])\n self.url = headers['Referer']\n if url.scheme:\n self.protocol = url.scheme\n if url.port:\n self.port = url.port\n if url.hostname:\n self.host = url.hostname\n if url.path:\n self.uri = url.path\n else:\n self.uri = \"/\"\n else:\n if headers.has_key('X-Forwarded-Host'):\n self.host = headers['X-Forwarded-Host']\n if headers.has_key('X-Forwarded-Proto'):\n self.protocol = headers['X-Forwarded-Proto']\n if headers.has_key('X-Forwarded-Port'):\n self.port = headers['X-Forwarded-Port']\n if headers.has_key('X-Forwarded-Uri'):\n self.uri = headers['X-Forwarded-Uri']\n else:\n self.uri = \"/\"\n self.generate_url()\n\n self.server = headers['X-Forwarded-Server']\n if headers.has_key('X-Forwarded-Method'):\n self.method = headers['X-Forwarded-Method']\n\n def save(self):\n data = dict()\n if self.__new:\n data['_id'] = self.token\n data['token'] = self.token\n data['server'] = self.server\n data['host'] = self.host\n data['method'] = self.method\n data['protocol'] = self.protocol\n data['port'] = self.port\n data['uri'] = self.uri\n data['url'] = self.url\n data['user'] = self.user\n data['is_authenticated'] = self.is_authenticated\n data['expires'] = self.expires\n data['auth_expires'] = self.auth_expires\n data['souid'] = self.souid\n if self.__new:\n mongo.cx.h42auth.forward_sessions.insert_one(data)\n self.__new = False\n else:\n mongo.cx.h42auth.forward_sessions.update_one({'_id': self.token},{'$set': data})\n\n def load(self, data):\n self.__new = False\n self.token = data['token']\n self.server = data['server']\n self.host = data['host']\n self.method = data['method']\n self.protocol = data['protocol']\n self.port = data['port']\n self.uri = data['uri']\n self.url = data['url']\n self.user = data['user']\n self.is_authenticated = data['is_authenticated']\n self.expires = data['expires']\n if 'auth_expires' in data:\n self.auth_expires = data['auth_expires']\n else:\n self.auth_expires = self.expires\n self.souid = data['souid']\n\n def destroy(self):\n mongo.cx.h42auth.forward_sessions.delete_one({'_id':self.token})\n\n @classmethod\n def find_auth(cls, token):\n data = mongo.cx.h42auth.forward_sessions.find_one({'_id':token})\n if data:\n return ForwardAuth(data)\n return None\n\n @classmethod\n def user_logout(cls, user):\n mongo.cx.h42auth.forward_sessions.delete_many({'souid':user.uid})\n\n @classmethod\n def clean_session(cls):\n mongo.cx.h42auth.forward_sessions.delete_many({'expires':{'$lt':datetime.utcnow()}})\n mongo.cx.h42auth.forward_sessions.delete_many({'auth_expires':{'$lt':datetime.utcnow()}})\n\n @classmethod\n def terminate_session(cls, token):\n mongo.cx.h42auth.forward_sessions.delete_one({'_id':token})\n\n @classmethod\n def get_user_sessions(cls, user):\n fasess = mongo.cx.h42auth.forward_sessions.find({'souid':user.uid})\n if fasess:\n sessions = list()\n for sess in fasess:\n sessions.append(ForwardAuth(sess))\n return sessions\n return None\n","sub_path":"h42auth/forward.py","file_name":"forward.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"545332373","text":"\n\nfrom django.urls import path\nfrom online_ordering import views\n\nurlpatterns = [\n path('',views.index,name=\"index\"),\n path('about/',views.about,name=\"about\"),\n path('login/',views.login,name=\"login\"),\n path('signup/',views.signup,name=\"signup\"),\n \n]\n\n","sub_path":"mainfolder/online_ordering/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"356283529","text":"from scrapy.http import HtmlResponse\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.info('JSMiddleware called')\n\n\nclass JSMiddleware(object):\n def __init__(self):\n dcaps = dict(DesiredCapabilities.PHANTOMJS)\n service = ['--ignore-ssl-errors=true',\n '--ssl-protocol=any',\n '--web-security=false']\n dcaps = {'handlesAlerts': False,\n 'javascriptEnabled': True,\n 'takesScreenshot': False}\n dcaps[\"phantomjs.page.settings.userAgent\"] = (\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36\")\n self.driver = webdriver.PhantomJS(\n desired_capabilities=dcaps, service_args=service)\n self.driver.set_window_size(1120, 550)\n self.driver.set_page_load_timeout(15)\n\n def ajax_complete(self, driver):\n jquery = False\n jscomplete = False\n try:\n jquery = (0 == driver.execute_script(\"return jQuery.active\"))\n except WebDriverException:\n pass\n\n try:\n if driver.execute_script(\"return document.readyState\") == \"complete\":\n jscomplete = True\n except WebDriverException:\n pass\n return jquery & jscomplete\n\n def process_request(self, request, spider):\n if 'PhantomJS' not in request.meta:\n return\n self.driver.get(request.url)\n WebDriverWait(self.driver, 20).until(\n self.ajax_complete, \"Wait till loaded\")\n body = self.driver.page_source.encode('utf-8')\n response = HtmlResponse(self.driver.current_url,\n body=body, encoding='utf-8', request=request)\n return response\n","sub_path":"templates/JS.py","file_name":"JS.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"186868339","text":"import glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy as sp\n\nfrom deprecated import deprecated\n\n@deprecated(version='0.0.2', reason=\"This module is moved to utils.py\")\ndef seq2mat(seq,seq_dict):\n\n mat = sp.zeros((len(seq_dict),len(seq)),dtype=int)\n for i,bp in enumerate(seq):\n mat[seq_dict[bp],i] = 1\n return mat\n\n@deprecated(version='0.0.2', reason=\"This module is moved to utils.py\")\ndef choose_dict(dicttype,modeltype='MAT'):\n\n if dicttype == 'dna':\n seq_dict = {'A':0,'C':1,'G':2,'T':3}\n inv_dict = {0:'A',1:'C',2:'G',3:'T'}\n elif dicttype == 'rna':\n seq_dict = {'A':0,'C':1,'G':2,'U':3}\n inv_dict = {0:'A',1:'C',2:'G',3:'U'}\n elif dicttype == 'protein':\n seq_dict = {\n '*':0,'A':1,'C':2,'D':3,'E':4,'F':5,'G':6,'H':7,'I':8,'K':9,'L':10,\n 'M':11,'N':12,'P':13,'Q':14,'R':15,'S':16,'T':17,'V':18,'W':19,'Y':20}\n inv_dict = {v:k for k,v in seq_dict.items()}\n else:\n raise SortSeqError('Unknown dicttype: %s'%dicttype)\n\n if modeltype == 'NBR' or modeltype == 'PAIR':\n seq_dict = {\n ''.join([inv_dict[i],inv_dict[z]]):i*len(seq_dict)+z \n for i in range(len(seq_dict)) for z in range(len(seq_dict))}\n inv_dict = {seq_dict[i]:i for i in seq_dict.keys()}\n return seq_dict,inv_dict\n\n@deprecated(version='0.0.2', reason=\"This module is moved to utils.py\")\ndef sliding_window(y,windowsize=3):\n out_vec = np.zeros_like(y)\n for i in range(len(y)-windowsize):\n out_vec[i] = np.sum(y[i:i+windowsize])/windowsize\n return out_vec\n\n","sub_path":"regseq/deprecated/peak_utils.py","file_name":"peak_utils.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"92419639","text":"from sopel.formatting import colors, color, bold, underline\nfrom sopel.module import commands, NOLIMIT, example, rule, rate\nfrom threading import Thread\nimport json\nimport os\nimport requests\nimport sched\nimport sys\nimport time\nimport web\n\nchannels = [\n \"#paolo\",\n \"#fusesustaining\"\n ]\n\nprojects = [\n \"fabric8\",\n \"camel\",\n \"hawtio\",\n \"fuse\",\n \"cxf\",\n \"fuseenterprise\",\n \"karaf\",\n \"aries\",\n \"felix\"\n]\n\nbot_instance = None\n\n\nurls = (\n # the second param here is a Class\n '/', 'webhook',\n '/hello', 'index'\n)\n\nclass index:\n def GET(self):\n # print bot_instance\n print (\"Hello, world!\")\n for channel in channels:\n bot_instance.say(\"message\", channel)\n return \"\"\nclass webhook:\n def POST(self):\n print (\"web hook invoked\")\n data = web.data()\n message = inspect_event(data)\n for channel in channels:\n bot_instance.say(message, channel)\n print (data)\n# listen on port 8080\napp = web.application(urls, globals())\nserver = Thread(target=app.run)\nserver.setDaemon(True)\nserver.start()\n\ndef inspect_event(event_string):\n message = \"\"\n event = json.loads(event_string)\n if \"pull_request\" in event:\n print (\"it's a pull request\")\n print (\"Action is: \" + event[\"action\"])\n if event[\"action\"] in [\"review_requested\", \"opened\", \"reopened\"]:\n url = event[\"pull_request\"][\"_links\"][\"html\"][\"href\"]\n user = event[\"sender\"][\"login\"]\n message = \"NEW Review Request from \" + user + \": \" + url\n\n else:\n print (\"it's something else\")\n return message\n\n# curl -H 'Accept: application/vnd.github.black-cat-preview+json' -H 'Authorization: token xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' https://api.github.com/repos/jboss-fuse/camel/pulls\n# .url\n# .state == open\n# .locked == false\n# \n# curl -H 'Accept: application/vnd.github.black-cat-preview+json' -H 'Authorization: token xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' https://api.github.com/repos/jboss-fuse/camel/pulls/142/reviews\n# \n# if( output empty then post)\n\n\ndef setup(bot):\n print (\"\\ninvoking setup\\n\")\n global bot_instance\n bot_instance = bot\n\ndef configure(config):\n config.core\n\nbase_url = \"https://api.github.com\"\ntoken = os.environ['GH_TOKEN']\npulls_url = base_url + \"/repos/jboss-fuse/:project/pulls\"\nsingle_pull_url = pulls_url + '/:pull/reviews'\n\nheaders = {\n 'Accept': 'application/vnd.github.black-cat-preview+json',\n 'Authorization': 'token ' + token\n}\n\ndef query_jira(jira_id):\n response = requests.get(jboss_org_rest + jira_id, headers=headers)\n response = response.json()\n if \"fields\" in response:\n return \"[{0}] {1} - {2}\".format(jira_id, response[\"fields\"][\"summary\"], color(jboss_org_case + jira_id , colors.GREY))\n color(text, colors.PINK) \n else:\n return \"Sorry but I couldn't fetch the URL\"\n\n@commands('pr')\n@example('.pr')\n@rate(600)\ndef pr(bot, trigger):\n #text = trigger.group(2)\n for project in projects:\n url = pulls_url.replace(':project', project)\n # print \"[PR URL] \" + url\n response = requests.get(url, headers=headers)\n prs = response.json()\n for pr in prs:\n print (pr)\n if pr[\"state\"] == \"open\" and pr[\"locked\"] == False :\n pr_url = pr[\"url\"]\n pr_html_url = pr[\"html_url\"]\n pr_number = pr[\"number\"]\n url = single_pull_url.replace(':project', project).replace(':pull', str(pr_number))\n # print \"[PR reviews] \" + url\n response = requests.get(url, headers=headers)\n response = response.json()\n # print response\n if len(response) == 0:\n bot.say( \"[Approval Required] {0} - {1} - {2}\".format(pr_html_url, pr[\"user\"][\"login\"], color(pr[\"title\"], colors.GREY)))\n\n\n","sub_path":"scripts/modules/review_requests.py","file_name":"review_requests.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"370551623","text":"from scrapy.spiders import CrawlSpider\nfrom converter.items import *\nimport time\nimport logging\nfrom w3lib.html import remove_tags, replace_escape_chars\nfrom converter.spiders.lom_base import LomBase\n\n\nclass RSSBase(CrawlSpider, LomBase):\n start_urls = []\n commonProperties = {}\n response = None\n\n def __init__(self, **kwargs):\n LomBase.__init__(self, **kwargs)\n\n def parse(self, response):\n response.selector.remove_namespaces()\n # common properties\n self.commonProperties[\"language\"] = response.xpath(\n \"//rss/channel/language//text()\"\n ).get()\n self.commonProperties[\"source\"] = response.xpath(\n \"//rss/channel/generator//text()\"\n ).get()\n self.commonProperties[\"publisher\"] = response.xpath(\n \"//rss/channel/author//text()\"\n ).get()\n self.commonProperties[\"thumbnail\"] = response.xpath(\n \"//rss/channel/image/url//text()\"\n ).get()\n self.response = response\n return self.startHandler(response)\n\n def startHandler(self, response):\n for item in response.xpath(\"//rss/channel/item\"):\n responseCopy = response.replace(url=item.xpath(\"link//text()\").get())\n responseCopy.meta[\"item\"] = item\n yield LomBase.parse(self, responseCopy)\n\n def getId(self, response):\n return response.meta[\"item\"].xpath(\"link//text()\").get()\n\n def getHash(self, response):\n return self.version + str(response.meta[\"item\"].xpath(\"pubDate//text()\").get())\n\n def mapResponse(self, response):\n r = LomBase.mapResponse(self, response)\n return r\n\n def getBase(self, response):\n base = LomBase.getBase(self, response)\n thumbnail = self.commonProperties[\"thumbnail\"]\n if thumbnail:\n base.add_value(\"thumbnail\", thumbnail)\n return base\n\n def getLOMGeneral(self, response):\n general = LomBase.getLOMGeneral(self, response)\n general.add_value(\n \"identifier\", response.meta[\"item\"].xpath(\"guid//text()\").get()\n )\n general.add_value(\n \"title\", response.meta[\"item\"].xpath(\"title//text()\").get().strip()\n )\n general.add_value(\"language\", self.commonProperties[\"language\"])\n description = response.meta[\"item\"].xpath(\"description//text()\").get()\n if not description:\n description = (\n response.meta[\"item\"].xpath('//*[name()=\"summary\"]//text()').get()\n )\n general.add_value(\"description\", description)\n return general\n\n def getLOMTechnical(self, response):\n technical = LomBase.getLOMTechnical(self, response)\n # technical.add_value('format', item.xpath('enclosure/@type').get())\n # technical.add_value('size', item.xpath('enclosure/@length').get())\n # technical.add_value('location', item.xpath('enclosure/@url').get())\n technical.add_value(\"format\", \"text/html\")\n technical.add_value(\n \"location\", response.meta[\"item\"].xpath(\"link//text()\").get()\n )\n return technical\n","sub_path":"converter/spiders/rss_base.py","file_name":"rss_base.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"46486544","text":"import sys # 標準ライブラリをインポート\n\nif __name__ == \"__main__\":\n\n param = sys.argv\n\n f = open (param[1], \"r\", encoding=\"utf-8\")\n g = open (\"[Grep]\" + param[1], \"w\", encoding=\"utf-8\")\n for line in f: # ファイルから1行ずつ読み込む\n if \" \" + param[2] + \" \" in line: # 指定した文字列を検索する\n print(line, end=\"\\n\") # 1行ずつ表示する\n g.write ( line ) # ファイルに書き込む\n g.close() # ファイルを閉じる\n","sub_path":"2.3/grep.py","file_name":"grep.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"259277955","text":"import numpy as np\nfrom scipy import interp\nimport matplotlib.pyplot as plt\nplt.rcParams['legend.fontsize'] = 10\nfrom itertools import cycle\nfrom sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,\n GradientBoostingClassifier)\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.model_selection import StratifiedKFold\nimport carregabase as cbe\nfrom sklearn.model_selection import train_test_split,cross_val_score,cross_val_predict\nfrom imblearn.over_sampling import RandomOverSampler\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom imblearn.over_sampling import SMOTE\nimport MatrizConfusao as MT\nfrom sklearn.metrics import confusion_matrix\nfrom imblearn.under_sampling import CondensedNearestNeighbour\nfrom imblearn.under_sampling import TomekLinks \nfrom sklearn.decomposition import PCA\n\n\n# Import some data to play with\n\nX_rec,y_rec,data=cbe.carrega_base(\"mimiciii_apache_total.csv\")\n\n#X_reduced = PCA(n_components=15).fit_transform(X_rec)\n\nX, X_test, y, y_test = train_test_split(X_rec, y_rec, test_size=0.10,stratify=y_rec)\n\n#Over_sampling\nsm = SMOTE(random_state=42)\n#sm = RandomOverSampler(random_state=42)\n#Under_sampling\n#sm2 = RandomUnderSampler(random_state=42)\n#sm2 = CondensedNearestNeighbour(random_state = 42, n_jobs=8)\nsm2 = TomekLinks(random_state = 42, n_jobs = 8)\n\n#X_train2, X_test, y_train2, y_test = train_test_split(X, y, test_size=0.10,stratify=y)\n\n#X_res, y_res = sm.fit_sample(X_train2, y_train2)\n\nX_train = np.array(X)\ny_train = np.array(y)\n\n# #############################################################################\n# Classification and ROC analysis\n\n# Run classifier with cross-validation and plot ROC curves\ncv = StratifiedKFold(n_splits=10)\nclassifier = RandomForestClassifier(max_depth=None,\n n_estimators=800,\n min_impurity_decrease=0.01,\n n_jobs=8)\n\ntprs = []\naucs = []\nstd_S = []\nstd_E = []\nmean_fpr = np.linspace(0, 1, 100)\n\ni = 0\nfor train, test in cv.split(X_train, y_train):\n\n #Aplicacao SMOTE em treinamento\n X_res2, y_res2 = sm2.fit_sample(X_train[train], y_train[train])\n X_res3, y_res3 = sm.fit_sample(X_res2, y_res2)\n X_res, y_res = sm2.fit_sample(X_res3, y_res3)\n\n probas_ = classifier.fit(X_res, y_res).predict_proba(X_train[test])#[:, 1])\n # Compute ROC curve and area the curve\n fpr, tpr, thresholds = roc_curve(y_train[test], probas_[:, 1])\n tprs.append(interp(mean_fpr, fpr, tpr))\n tprs[-1][0] = 0.0\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n proba = classifier.predict(X_train[test])\n TN, FP, FN, TP = confusion_matrix(y_train[test], proba).ravel()\n #print(TN, FP, FN, TP)\n tn = float(TN)\n tp = float(TP)\n fn = float(FN)\n fp = float(FP)\n Sens = tp / (tp + fn)\n Esp = tn / (tn + fp)\n std_S.append(Sens)\n std_E.append(Esp)\n S = np.mean(Sens)\n E = np.mean(Esp)\n\n plt.plot(fpr, tpr, lw=1, alpha=0.3,\n label='Pasta %d (AUC = %0.4f S = %0.4f E = %0.4f)'\n % (i + 1, roc_auc, Sens, Esp))\n\n i += 1\nplt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Randomico', alpha=.8)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nSn = np.mean(std_S)\nEs = np.mean(std_E)\ndps = np.std(std_S)\ndpe = np.std(std_E)\n#print(std_auc, Sn, dps, Es, dpe)\nplt.figure(1)\nplt.plot(mean_fpr, mean_tpr, color='b',\n label=r'Media ROC (AUC = %0.4f $\\pm$ %0.4f S = %0.4f $\\pm$ %0.4f E = %0.4f $\\pm$ %0.4f)'\n % (mean_auc,std_auc, Sn, dps, Es, dpe),\n lw=2, alpha=.8)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nplt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ std. dev.')\n\nplt.xlim([-0.05, 1.05])\nplt.ylim([-0.05, 1.05])\nplt.xlabel('Taxa de falsos positivos')\nplt.ylabel('Taxa de verdadeiros positivos')\nplt.title('Curvas ROC validacao cruzada da RF')\nplt.legend(loc=\"lower right\")\n#plt.show()\n\n# Compute confusion matrix\ny_pred_rf_cm = classifier.predict(X_test)\ncnf_matrix = MT.confusion_matrix(y_test, y_pred_rf_cm)\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nMT.plot_confusion_matrix(cnf_matrix, classes=('Sobreviventes','Nao Sobreviventes'),\n title='Matriz de confusao')\n\nplt.show()","sub_path":"PGCA/apoio.py","file_name":"apoio.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519490795","text":"\"\"\"Store the game board's current state.\"\"\"\n\n# Third-Party Libraries\nimport numpy as np\n\nfrom . import constants\n\n\nclass DorfBoard:\n\n EDGE_ZERO_INDEX = \"left\"\n DIRECTION = \"clockwise\"\n ORIGIN_TILE = 6 * [constants.TileEdge.GRASS]\n\n def __init__(self, from_npz=None):\n\n if from_npz is None:\n self.size = 8\n self.edges = np.zeros([self.size, self.size, 6], dtype=np.uint8)\n self.status = np.zeros([self.size, self.size], dtype=np.uint8)\n x, y = self.get_origin_xy()\n self.edges[x, y] = self.ORIGIN_TILE\n self.status[x, y] = constants.TileStatus.GOOD\n for x_, y_ in self.get_neighboring_tiles(x, y):\n self.update_tile_status(x_, y_)\n else:\n data = np.load(from_npz)\n self.edges = data[\"edges\"]\n self.status = data[\"status\"]\n self.size = len(self.edges)\n\n def get_origin_xy(self):\n return int(self.size / 2 - 1), int(self.size / 2 - 1)\n\n def is_in_grid(self, x, y):\n return x >= 0 and y >= 0 and x < self.size and y < self.size\n\n def is_on_border(self, x, y):\n return x == 0 or y == 0 or x == self.size - 1 or y == self.size - 1\n\n def is_near_border(self, x, y, distance=1):\n if not self.is_in_grid(x, y):\n return constants.DorfBoardResult.ERROR\n return (\n x <= distance\n or y <= distance\n or x >= self.size - 1 - distance\n or y >= self.size - 1 - distance\n )\n\n def enlarge(self, pad_size=2):\n new_size = self.size + 2 * pad_size\n x0 = y0 = pad_size\n x1 = y1 = pad_size + self.size\n new_edges = np.zeros([new_size, new_size, 6], dtype=np.uint8)\n new_status = np.zeros([new_size, new_size], dtype=np.uint8)\n new_edges[x0:x1, y0:y1] = self.edges\n new_status[x0:x1, y0:y1] = self.status\n self.edges = new_edges\n self.status = new_status\n self.size = new_size\n\n def enlarge_and_relocate(self, x, y, pad_size=2):\n self.enlarge(pad_size)\n return x + pad_size, y + pad_size\n\n @staticmethod\n def get_neighboring_tiles(x, y):\n return [\n (x - 1, y),\n (x, y - 1),\n (x + 1, y - 1),\n (x + 1, y),\n (x, y + 1),\n (x - 1, y + 1),\n ]\n\n def get_tile_rotations(self, tile):\n rotations = []\n for i in range(6):\n rotations.append(tuple(tile[i:] + tile[:i]))\n rotations = list(set(rotations))\n rotations = [list(r) for r in rotations]\n return rotations\n\n def is_empty_tile(self, x, y):\n return (self.edges[x, y] == constants.TileEdge.EMPTY).all()\n\n def get_opposite_edge(self, x, y, edge_index):\n if edge_index == 0:\n return x - 1, y, 3\n elif edge_index == 1:\n return x, y - 1, 4\n elif edge_index == 2:\n return x + 1, y - 1, 5\n elif edge_index == 3:\n return x + 1, y, 0\n elif edge_index == 4:\n return x, y + 1, 1\n elif edge_index == 5:\n return x - 1, y + 1, 2\n\n def is_legal_connection(self, x, y, edge_index, tile):\n edge1 = tile[edge_index]\n x_, y_, edge_index_ = self.get_opposite_edge(x, y, edge_index)\n if not self.is_in_grid(x_, y_):\n return True\n edge2 = self.edges[x_, y_, edge_index_]\n if [edge1, edge2] in constants.ILLEGAL_CONNECTIONS:\n return False\n elif [edge2, edge1] in constants.ILLEGAL_CONNECTIONS:\n return False\n else:\n return True\n\n def is_good_connection(self, x, y, edge_index, tile=None):\n if tile is None:\n tile = self.edges[x, y]\n if not self.is_legal_connection(x, y, edge_index, tile):\n return False\n edge1 = tile[edge_index]\n x_, y_, edge_index_ = self.get_opposite_edge(x, y, edge_index)\n edge2 = self.edges[x_, y_, edge_index_]\n if [edge1, edge2] in constants.GOOD_CONNECTIONS:\n return True\n elif [edge2, edge1] in constants.GOOD_CONNECTIONS:\n return True\n else:\n return False\n\n def get_valid_locations(self, tile):\n return zip(*np.where(self.status == constants.TileStatus.VALID))\n\n def get_legal_placements(self, tile):\n rotations = self.get_tile_rotations(tile)\n valid_locations = self.get_valid_locations(tile)\n legal_placements = []\n for (x, y) in valid_locations:\n for rotation in rotations:\n is_legal = True\n for edge_index in range(6):\n if not self.is_legal_connection(x, y, edge_index, rotation):\n is_legal = False\n break\n if is_legal:\n legal_placements.append([x, y, rotation])\n return legal_placements\n\n def get_connecting_edges(self, x, y):\n connections = []\n for edge_index in range(6):\n x_, y_, opposite_edge_index = self.get_opposite_edge(x, y, edge_index)\n if self.is_in_grid(x_, y_) and not self.is_empty_tile(x_, y_):\n connections.append(self.edges[x_, y_, opposite_edge_index])\n else:\n connections.append(constants.TileEdge.EMPTY)\n return connections\n\n def get_num_good_and_bad_connections(self, x, y, tile=None):\n if tile is None:\n tile = self.edges[x, y]\n neighbors = self.get_neighboring_tiles(x, y)\n num_good_connections = num_bad_connections = 0\n for edge_index, (x_, y_) in enumerate(neighbors):\n if self.is_in_grid(x_, y_) and not self.is_empty_tile(x_, y_):\n if self.is_good_connection(x, y, edge_index, tile):\n num_good_connections += 1\n else:\n num_bad_connections += 1\n return num_good_connections, num_bad_connections\n\n def get_tile_status_from_connections(self, x, y):\n if self.is_empty_tile(x, y):\n for x_, y_ in self.get_neighboring_tiles(x, y):\n if self.is_in_grid(x_, y_) and not self.is_empty_tile(x_, y_):\n return constants.TileStatus.VALID\n return constants.TileStatus.EMPTY\n else:\n (\n num_good_connections,\n num_bad_connections,\n ) = self.get_num_good_and_bad_connections(x, y)\n if num_good_connections == 6:\n return constants.TileStatus.PERFECT\n elif num_bad_connections > 0:\n return constants.TileStatus.IMPERFECT\n else:\n return constants.TileStatus.GOOD\n\n def update_tile_status(self, x, y):\n self.status[x, y] = self.get_tile_status_from_connections(x, y)\n\n def place_tile(self, x, y, tile):\n if not self.is_in_grid(x, y) or [x, y, tile] not in self.get_legal_placements(\n tile\n ):\n print(\"Illegal placement: ({},{}): \".format(x, y), tile)\n return constants.DorfBoardResult.ERROR\n if self.is_near_border(x, y, distance=1):\n x, y = self.enlarge_and_relocate(x, y)\n result = constants.DorfBoardResult.ENLARGE\n else:\n result = constants.DorfBoardResult.OK\n self.edges[x, y] = tile\n self.status[x, y] = self.get_tile_status_from_connections(x, y)\n for x_, y_ in self.get_neighboring_tiles(x, y):\n self.update_tile_status(x_, y_)\n return result\n\n def remove_tile(self, x, y):\n if not self.is_in_grid(x, y) or self.is_empty_tile(x, y):\n print(\"Illegal removal: ({},{}): \".format(x, y))\n return constants.DorfBoardResult.ERROR\n self.edges[x, y] = 6 * [constants.TileEdge.EMPTY]\n self.status[x, y] = self.get_tile_status_from_connections(x, y)\n for x_, y_ in self.get_neighboring_tiles(x, y):\n self.update_tile_status(x_, y_)\n return constants.DorfBoardResult.OK\n\n def evaluate_placement(self, x, y, tile):\n for edge_index in range(6):\n if not self.is_legal_connection(x, y, edge_index, tile):\n return constants.DorfBoardResult.ILLEGAL\n # Compute the number of adjecent tiles that would become perfects\n num_perfects = 0\n num_meh_connections = 0\n neighbors = self.get_neighboring_tiles(x, y)\n for edge_index, (x_, y_) in enumerate(neighbors):\n if not self.is_empty_tile(x_, y_):\n (\n num_good_connections,\n num_bad_connections,\n ) = self.get_num_good_and_bad_connections(x_, y_)\n is_good = self.is_good_connection(x, y, edge_index, tile)\n if num_good_connections == 6 - 1 and is_good:\n num_perfects += 1\n elif num_bad_connections > 0 and not is_good:\n num_meh_connections += 1\n # Compute the number of good and bad connections\n (\n num_good_connections,\n num_bad_connections,\n ) = self.get_num_good_and_bad_connections(x, y, tile)\n if num_good_connections == 6:\n num_perfects += 1\n # Give a proxy score\n score = (\n 0.5 * num_perfects\n + num_good_connections\n - (1.5 * num_bad_connections - num_meh_connections)\n )\n evaluation = {\n \"score\": score,\n \"perfect\": num_perfects,\n \"good\": num_good_connections,\n \"bad\": num_bad_connections,\n \"meh\": num_meh_connections,\n }\n return evaluation\n\n def rank_all_placements(self, tile):\n placements = self.get_legal_placements(tile)\n evaluations = []\n for placement in placements:\n x_, y_, tile_ = placement\n evaluation = self.evaluate_placement(x_, y_, tile_)\n if not evaluation == constants.DorfBoardResult.ILLEGAL:\n evaluations.append((placement, evaluation))\n ranked_evaluations = sorted(\n evaluations, key=lambda x: x[1][\"score\"], reverse=True\n )\n return ranked_evaluations\n\n def get_hint(self, tile, top_k=None, threshold=None):\n ranked_evaluations = self.rank_all_placements(tile)\n num_evals = len(ranked_evaluations)\n if threshold is not None:\n above_threshold = [\n evaluation[\"score\"] >= threshold for _, evaluation in ranked_evaluations\n ]\n num_evals = above_threshold.index(False)\n if top_k is not None:\n num_evals = min(top_k, num_evals)\n return ranked_evaluations[0:num_evals]\n\n def get_num_tiles_with_status(self, status):\n if isinstance(status, list):\n match = np.zeros(shape=self.status.shape, dtype=bool)\n for s in status:\n match |= self.status == s\n else:\n match = self.status == status\n return np.count_nonzero(match)\n\n def save(self, to_npz):\n np.savez(to_npz, edges=self.edges, status=self.status)\n","sub_path":"src/dorfromantik_helper/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":11147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498022090","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-unsafe\n\nimport unittest\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom graphql import (\n GraphQLBoolean,\n GraphQLField,\n GraphQLID,\n GraphQLNonNull,\n GraphQLObjectType,\n GraphQLSchema,\n)\nfrom graphql_server.typemap import TypeMap\nfrom graphql_server.types import graphql_field, graphql_object\nfrom tools.pyre.tools.generate_taint_models.get_dynamic_graphql_sources import (\n DynamicGraphQLSourceGenerator,\n)\n\nfrom .test_functions import __name__ as qualifier, all_functions\n\n\ndef function1(foo) -> bool:\n return True\n\n\ndef function2(foo, *bar) -> bool:\n return True\n\n\nclass TestClass(object):\n def method1(self, foo) -> bool:\n return True\n\n def method2(self, foo, *bar) -> bool:\n return True\n\n\n@dataclass\nclass DirectObject:\n id: int\n resolver1: bool\n resolver2: bool\n resolver3: bool\n resolver4: bool\n lambda_resolver: bool\n\n\n@graphql_object()\nclass DirectObjectResult:\n @graphql_field()\n def success(self) -> str:\n return \"True\"\n\n @graphql_field()\n def error_message(self) -> Optional[str]:\n return \"Foo\"\n\n\nDirectObjectType = GraphQLObjectType(\n name=\"DirectObjectType\",\n description=\"GraphQLObject directly created at top level\",\n fields={\n \"no_resolver\": GraphQLField(GraphQLNonNull(GraphQLID)),\n \"resolver1\": GraphQLField(GraphQLBoolean, resolver=function1),\n \"resolver2\": GraphQLField(GraphQLBoolean, resolver=function2),\n \"resolver3\": GraphQLField(GraphQLBoolean, resolver=TestClass.method1),\n \"resolver4\": GraphQLField(GraphQLBoolean, resolver=TestClass.method2),\n \"lambda_resolver\": GraphQLField(GraphQLBoolean, resolver=lambda x: x),\n },\n)\n\n\n@graphql_object()\nclass Query:\n @graphql_field(graphql_core_type=DirectObjectType)\n def get_object(self) -> DirectObject:\n return DirectObject(4, True, True, True, True, True)\n\n\nTYPEMAP = TypeMap([Query])\n# pyre-fixme[6]: Expected\n# `Optional[typing.List[graphql.type.definition.GraphQLNamedType]]` for 2nd param but\n# got `_OrderedDictValuesView[typing.Any]`.\nSCHEMA = GraphQLSchema(query=TYPEMAP[\"Query\"], types=TYPEMAP.values())\n\n\nclass GetDynamicGraphQLSourcesTest(unittest.TestCase):\n def test_gather_functions_to_model(self) -> None:\n functions = DynamicGraphQLSourceGenerator(\n graphql_schema=SCHEMA, graphql_object_type=GraphQLObjectType\n ).gather_functions_to_model()\n\n self.assertTrue(\n {function1, function2, TestClass.method1, TestClass.method2}.issubset(\n set(functions)\n )\n )\n\n def test_compute_models(self) -> None:\n source = \"TaintSource[UserControlled]\"\n sink = \"TaintSink[ReturnedToUser]\"\n self.assertEqual(\n [\n *map(\n str,\n DynamicGraphQLSourceGenerator(\n graphql_schema=SCHEMA, graphql_object_type=GraphQLObjectType\n ).compute_models(all_functions),\n )\n ],\n [\n f\"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...\",\n f\"def {qualifier}.TestClass.methodB(self, *args: {source}) -> {sink}: ...\",\n f\"def {qualifier}.testA() -> {sink}: ...\",\n f\"def {qualifier}.testB(x) -> {sink}: ...\",\n f\"def {qualifier}.testC(x) -> {sink}: ...\",\n f\"def {qualifier}.testD(x, *args: {source}) -> {sink}: ...\",\n f\"def {qualifier}.testE(x, **kwargs: {source}) -> {sink}: ...\",\n ],\n )\n","sub_path":"tools/generate_taint_models/tests/get_dynamic_graphql_sources_test.py","file_name":"get_dynamic_graphql_sources_test.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"246645436","text":"\nimport pandas as pd\nimport numpy as np\nimport zipfile\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\nimport time\nimport matplotlib\n# Supplied map bounding box:\n# ll.lon ll.lat ur.lon ur.lat\n# -122.52469 37.69862 -122.33663 37.82986\nPATH_MAP = \"/home/batman/git/hack_sfpd/INPUT/sf_map_copyright_openstreetmap_contributors.txt\"\nmapdata = np.loadtxt(PATH_MAP)\nasp = mapdata.shape[0] * 1.0 / mapdata.shape[1]\n\nlon_lat_box = (-122.5247, -122.3366, 37.699, 37.8299)\nclipsize = [[-122.5247, -122.3366],[ 37.699, 37.8299]]\n\n\nPATH_OUT = r\"/home/batman/git/hack_sfpd/Out\"\n\n#%% Cmap mapper\n# http://scipy-cookbook.readthedocs.io/items/Matplotlib_ColormapTransformations.html\n\ndef cmap_map(function, cmap):\n \"\"\" Applies function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.\n This routine will break any discontinuous points in a colormap.\n \"\"\"\n cdict = cmap._segmentdata\n step_dict = {}\n # Firt get the list of points where the segments start or end\n for key in ('red', 'green', 'blue'):\n step_dict[key] = list(map(lambda x: x[0], cdict[key]))\n step_list = sum(step_dict.values(), [])\n step_list = np.array(list(set(step_list)))\n # Then compute the LUT, and apply the function to the LUT\n reduced_cmap = lambda step : np.array(cmap(step)[0:3])\n old_LUT = np.array(list(map(reduced_cmap, step_list)))\n new_LUT = np.array(list(map(function, old_LUT)))\n # Now try to make a minimal segment definition of the new LUT\n cdict = {}\n for i, key in enumerate(['red','green','blue']):\n this_cdict = {}\n for j, step in enumerate(step_list):\n if step in step_dict[key]:\n this_cdict[step] = new_LUT[j, i]\n elif new_LUT[j,i] != old_LUT[j, i]:\n this_cdict[step] = new_LUT[j, i]\n colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))\n colorvector.sort()\n cdict[key] = colorvector\n\n return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)\n#%%Subset!\ndf_sub = df[1:10000] \n\n#%% Create figure ALL\nstart_time = time.time()\n\n#Seaborn FacetGrid, split by crime Category\ng= sns.FacetGrid(df_sub, col=\"Category\", col_wrap=6, size=5, aspect=1/asp)\n\n#Show the background map\nfor ax in g.axes:\n ax.imshow(mapdata, cmap=plt.get_cmap('gray'), \n extent=lon_lat_box, \n aspect=asp)\n#Kernel Density Estimate plot\ng.map(sns.kdeplot, \"X\", \"Y\", clip=clipsize)\n\nthis_fig = plt.gcf()\n# your code\n\nelapsed_time = time.time() - start_time\nlogging.debug(\"{}\".format(elapsed_time))\n#%% Modify figure\nthis_fig.axes[0]\n\nthis_fig.show()\n\nfig = plt.gcf()\nfig.set_size_inches(18.5, 10.5)\nfig.savefig('test2png.png', dpi=100)\n\nthis_fig.size\n\nthis_fig = pl.gcf()\n\n#%% Save figure\nplt.savefig('category_density_plot.png')\nwith open('myplot.pkl','wb') as fid:\n pickle.dump(this_fig, fid)\n\n#%% Reload\nthis_path = r\"/home/batman/git/hack_sfpd/Out/myplot.pkl\"\nwith open(this_path,'rb') as fid:\n ax = pickle.load(fid)\nplt.show()\n\n#%% Do a larger plot with 1 category only\n\n\nthis_cat = 'ASSAULT'\n\nfor this_cat in df.Category.unique():\n start_time = datetime.now()\n \n df_1cat = df[df.Category == this_cat][0:10000] \n df_1cat = df[df.Category == this_cat]\n #light_jet = cmap_map(lambda x: x/2 + 0.4, matplotlib.cm.gray)\n light_jet = cmap_map(lambda x: x*1.1, matplotlib.cm.gray)\n \n \n this_cat_fig =plt.figure(figsize=LANDSCAPE_A4)\n ax = sns.kdeplot(df_1cat.X, df_1cat.Y, clip=clipsize, aspect=1/asp, shade=False, color=\"r\",cmap=\"seismic\")\n #ax.imshow(mapdata, cmap=plt.get_cmap('Greys'), \n # extent=lon_lat_box, \n # aspect=asp)\n ax.imshow(mapdata, cmap=light_jet, \n extent=lon_lat_box, \n aspect=asp)\n \n min_time = df_1cat.dt.min().strftime(\"%Y-%m-%d\")\n max_time = df_1cat.dt.max().strftime(\"%Y-%m-%d\")\n num_recs = len(df_1cat)\n plt.suptitle(\"KDE plot for {} category\".format(this_cat),y=0.95,fontsize=16)\n plt.title(\"{} to {}, {} records\".format(min_time,max_time,num_recs))\n \n path_this_cat_out = os.path.join(PATH_OUT,this_cat+\".png\")\n \n plt.savefig(path_this_cat_out)\n \n elapsed_time = datetime.now() - start_time\n logging.debug(\"Wrote {} category to KDE map over {:0.1f}s\".format(this_cat, elapsed_time.total_seconds()))\n\n#%% Get countours\n\ndef get_contour_verts(cn):\n contours = []\n # for each contour line\n for cc in cn.collections:\n paths = []\n # for each separate section of the contour line\n for pp in cc.get_paths():\n xy = []\n # for each segment of that section\n for vv in pp.iter_segments():\n xy.append(vv[0])\n paths.append(np.vstack(xy))\n contours.append(paths)\n\n return contours\n\n\ncountours = get_contour_verts(ax)\n\n","sub_path":"03 scripts/00 Superceded/02 Plotting Kernel Density.py","file_name":"02 Plotting Kernel Density.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"84154902","text":"# Reverse digits of an integer.\n#\n# Example1: x = 123, return 321\n# Example2: x = -123, return -321\n\n\nclass Solution:\n # @param {integer} x\n # @return {integer}\n def reverse(self, x):\n num = abs(x)\n sign = 1 if x > 0 else -1\n ret, rem = 0, 0\n while num != 0:\n ret, rem = ret*10, num % 10\n ret += rem\n num /= 10\n if ret < 2147483648:\n return sign * ret\n else:\n return 0\n","sub_path":"007_Reverse_Integer.py","file_name":"007_Reverse_Integer.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245768979","text":"import socket\n\nimport sys\n\nHOST, PORT = '78.47.201.69', 4001\ndata = ' '.join(sys.argv[1:])\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n sock.connect((HOST, PORT))\n sock.sendall((data + '\\n').encode('utf-8'))\n\n received = sock.recv(1024)\nfinally:\n sock.close()\n\nprint('Sent: {}'.format(data))\nprint('Received: {}'.format(received.decode('utf-8')))","sub_path":"Client/client3.py","file_name":"client3.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"470323558","text":"# noinspection PyUnresolvedReferences\nfrom sr.robot import *\n\nimport constants\nimport logger\nfrom engine import Engine\nfrom map import Map\nfrom directional_motion import DirectionalMotion\nfrom vector2d import Vector2D\n\n\ndef main():\n # noinspection PyUnresolvedReferences\n engine = Engine(Robot())\n\n virtual_map = Map(constants.map_size, robot_dimensions={constants.zone: constants.own_robot_dimensions},\n robot_coordinates={constants.zone: constants.own_robot_start_coordinates})\n\n def execute_directional_motion():\n directional_motion = DirectionalMotion(engine, virtual_map.robots[0], target_coordinates)\n\n while not directional_motion.motion_complete():\n event, success = directional_motion.update()\n virtual_map.apply_movement_event(event)\n logger.log_to_file(event.encode(), \"movements.txt\", timestamp=False)\n\n while True:\n target_coordinates = Vector2D(-200, 200)\n\n execute_directional_motion()\n\n target_coordinates = Vector2D(80, 80)\n\n execute_directional_motion()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Tests/directional_motion_test.py","file_name":"directional_motion_test.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148001959","text":"import numpy as np\nimport os\nfrom scipy import misc\nfrom keras.models import model_from_json\nimport pickle\nimport imutils\nimport cv2\n\n# count = 1\ndef predictModel():\n\tclassifier_f = open(\"int_to_word_out.pickle\", \"rb\")\n\tint_to_word_out = pickle.load(classifier_f)\n\tclassifier_f.close()\n\n\n\n\t# load json and create model\n\tjson_file = open('model_face.json', 'r')\n\tloaded_model_json = json_file.read()\n\tjson_file.close()\n\tloaded_model = model_from_json(loaded_model_json)\n\t# load weights into new model\n\tloaded_model.load_weights(\"model_face.h5\")\n\tprint(\"Model is now loaded in the disk\")\n\n\n\timg=os.listdir(\"predict\")[0]\n\timage=np.array(misc.imread(\"predict/\"+img))\n\timage = misc.imresize(image, (64, 64))\n\tpimage = image.copy()\n\timage=np.array([image])\n\t\n\timage = image.astype('float32')\n\timage = image / 255.0\n\n\tprediction=loaded_model.predict(image)\n\n\tprint(prediction)\n\n\tprint(np.max(prediction))\n\n\tprint(int_to_word_out[np.argmax(prediction)])\n\t\n\toutput = imutils.resize(pimage, width=400)\n\n\n\tcv2.putText(output,str(int_to_word_out[np.argmax(prediction)]), (10,25),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0),2)\n\tcv2.putText(output,str(np.max(prediction)), (100,25),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0),2)\t\n\n\t# cv2.imshow(\"output\",output)\n\tcv2.imwrite(\"predicted/output.jpg\",output)\n\t# count += 23\n\t# cv2.imwrite(\"predicted/output\" + str(count) +\".jpg\",output)\n\t\t\n# predictModel()","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"524368254","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nfrom celery.backends.redis import RedisBackend\nfrom kombu.utils import cached_property\nimport json\nimport pickle\nfrom redis import Redis\n\nfrom .redis_sentinel import EnsuredRedisMixin, get_redis_via_sentinel\n\n\nclass RedisSentinelBackend(RedisBackend):\n \"\"\"\n Redis results backend with support for Redis Sentinel\n\n .. note::\n In order to correctly configure the sentinel,\n this backend expects an additional backend celery\n configuration to be present - ``CELERY_RESULT_BACKEND_TRANSPORT_OPTIONS``.\n Here is are sample transport options::\n\n CELERY_RESULT_BACKEND_TRANSPORT_OPTIONS = {\n 'sentinels': [('192.168.1.1', 26379),\n ('192.168.1.2', 26379),\n ('192.168.1.3', 26379)],\n 'service_name': 'master',\n 'socket_timeout': 0.1,\n }\n \"\"\"\n\n def __init__(self, transport_options=None, *args, **kwargs):\n super(RedisSentinelBackend, self).__init__(*args, **kwargs)\n\n _get = self.app.conf.get\n self.transport_options = transport_options or _get('CELERY_RESULT_BACKEND_TRANSPORT_OPTIONS') or {}\n self.sentinels = self.transport_options['sentinels']\n self.service_name = self.transport_options['service_name']\n self.socket_timeout = self.transport_options.get('socket_timeout', 0.1)\n\n @cached_property\n def client(self):\n \"\"\"\n Cached property for getting ``Redis`` client to be used to interact with redis.\n\n Returned client also subclasses from :class:`.EnsuredRedisMixin` which\n ensures that all redis commands are executed with retry logic in case\n of sentinel failover.\n\n Returns\n -------\n Redis\n Redis client connected to Sentinel via Sentinel connection pool\n \"\"\"\n params = self.connparams\n params.update({\n 'sentinels': self.sentinels,\n 'service_name': self.service_name,\n 'socket_timeout': self.socket_timeout,\n })\n return get_redis_via_sentinel(\n redis_class=type(str('Redis'), (EnsuredRedisMixin, Redis), {}),\n **params\n )\n\n def prepare_exception(self, exc, serializer=None):\n exc = self._wrap_exc(exc)\n return super(RedisSentinelBackend, self).prepare_exception(exc, serializer)\n\n def exception_to_python(self, exc):\n exc = super(RedisSentinelBackend, self).exception_to_python(exc)\n return self._unwrap_exc(exc)\n\n @staticmethod\n def _wrap_exc(e):\n try:\n return Exception(json.dumps({\n 'type': type(e).__name__,\n 'module': type(e).__module__,\n 'data': e.serialize() if hasattr(e, 'serialize') else pickle.dumps(e),\n }))\n except:\n return e\n\n @staticmethod\n def _unwrap_exc(e):\n try:\n exc = json.loads(str(e))\n cls = getattr(RedisSentinelBackend._get_module(exc['module']), exc['type'])\n if hasattr(cls, 'deserialize'):\n return cls.deserialize(exc['data'])\n return pickle.loads(exc['data'])\n except:\n return e\n\n @staticmethod\n def _get_module(module_name):\n from importlib import import_module\n from sys import modules\n from types import ModuleType\n module = modules.get(module_name)\n if isinstance(module, ModuleType):\n return module\n return import_module(module_name)\n","sub_path":"celery_redis_sentinel/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"488642714","text":"import os\n\nimport torch\nimport torchaudio\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom speechbrain.pretrained import EncoderDecoderASR\n\nfrom pdb import set_trace as Tra\n\n\nprint('*'*30)\nprint('torch version ?', torch.__version__)\nprint('torchaudio version ?', torchaudio.__version__)\nprint('*'*30)\nprint('cuda availability ? {}'.format(torch.cuda.is_available()))\nprint('total gpu nums : {}'.format(torch.cuda.device_count()))\nprint('cudnn backends version : {}'.format(torch.backends.cudnn.version()))\nprint('cuda version : {}'.format(torch.version.cuda))\nprint('*'*30)\nfor n in range(torch.cuda.device_count()):\n print('{}th GPU name is {}'.format(n,torch.cuda.get_device_name(n)))\n print('\\t capability of this GPU is {}'.format(torch.cuda.get_device_capability(n)))\nprint('*'*30)\n\n\ndef main():\n\n asr_model = EncoderDecoderASR.from_hparams(source=\"speechbrain/asr-transformer-transformerlm-librispeech\", savedir=\"pretrained_models/asr-transformer-transformerlm-librispeech\", run_opts={\"device\":\"cuda\"})\n\n audio_files=[]\n audio_files.append('./LibriSpeech/test-clean/1089/134686/1089-134686-0030.flac')\n audio_files.append('./LibriSpeech/test-clean/1089/134686/1089-134686-0014.flac')\n audio_files.append('./LibriSpeech/test-clean/1089/134686/1089-134686-0007.flac')\n audio_files.append('./LibriSpeech/test-clean/1089/134691/1089-134691-0000.flac')\n audio_files.append('./LibriSpeech/test-clean/1089/134691/1089-134691-0003.flac')\n audio_files.append('./LibriSpeech/test-clean/1188/133604/1188-133604-0030.flac')\n audio_files.append('./LibriSpeech/test-clean/1089/134691/1089-134691-0019.flac')\n audio_files.append('./LibriSpeech/test-clean/1188/133604/1188-133604-0006.flac')\n\n sigs=[]\n lens=[]\n for audio_file in audio_files:\n snt, fs = torchaudio.load(audio_file)\n sigs.append(snt.squeeze())\n lens.append(snt.shape[1])\n\n batch = pad_sequence(sigs, batch_first=True, padding_value=0.0)\n lens = torch.Tensor(lens) / batch.shape[1]\n\n '''\n (Pdb) batch.size(); lens.size(); lens\n torch.Size([8, 68400])\n torch.Size([8])\n tensor([0.6351, 0.5205, 1.0000, 0.4877, 0.5088, 0.4480, 0.7380, 0.5614])\n '''\n\n # Tra()\n\n with torch.no_grad():\n ## CTC / Attention joint beam search with lm \n result = asr_model.transcribe_batch(batch, lens)\n\n ## encoding only\n # encoder_out = asr_model.encode_batch(batch, lens)\n\n # ## step by step\n # batch, lens = batch.to('cuda'), lens.to('cuda')\n # encoder_out = asr_model.mods.encoder(batch, lens)\n # predicted_tokens, scores = asr_model.mods.decoder(encoder_out, lens)\n # predicted_words = [\n # asr_model.tokenizer.decode_ids(token_seq)\n # for token_seq in predicted_tokens\n # ]\n\n # ctc_outputs = asr_model.mods.decoder.ctc_forward_step(encoder_out)\n\n # logits = asr_model.mods.decoder.ctc_fc(encoder_out)\n # ctc_outputs2 = asr_model.mods.decoder.softmax(logits)\n\n # Tra()\n\n '''\n (Pdb) encoder_out.size(); ctc_outputs.size()\n torch.Size([8, 107, 512])\n torch.Size([8, 107, 5000])\n\n (Pdb) len(asr_model.tokenizer)\n 5000\n\n ##### S2SBaseSearcher \n (Pdb) asr_model.mods.decoder.bos_index; asr_model.mods.decoder.eos_index;\n 1\n 2\n (Pdb) asr_model.mods.decoder.min_decode_ratio; asr_model.mods.decoder.max_decode_ratio;\n 0.0\n 1.0\n\n # asr_model.mods.decoder.lm_modules\n # asr_model.mods.decoder.model\n\n ##### S2SBeamSearcher \n (Pdb) asr_model.mods.decoder.ctc_weight; asr_model.mods.decoder.blank_index; asr_model.mods.decoder.att_weight\n 0.4\n 0\n 0.6\n\n (Pdb) asr_model.mods.decoder.ctc_window_size; asr_model.mods.decoder.beam_size;\n 0\n 66\n\n (Pdb) asr_model.mods.decoder.lm_weight;\n (Pdb) asr_model.mods.decoder.lm_modules\n TransformerLM(\n (positional_encoding): PositionalEncoding()\n (encoder): TransformerEncoder(\n (layers): ModuleList(\n (0): TransformerEncoderLayer(\n (self_att): MultiheadAttention(\n (att): MultiheadAttention(\n (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n )\n )\n (pos_ffn): PositionalwiseFeedForward(\n (ffn): Sequential(\n (0): Linear(in_features=768, out_features=3072, bias=True)\n (1): GELU()\n (2): Dropout(p=0.0, inplace=False)\n (3): Linear(in_features=3072, out_features=768, bias=True)\n )\n )\n (norm1): LayerNorm(\n (norm): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n )\n (norm2): LayerNorm(\n (norm): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n )\n (dropout1): Dropout(p=0.0, inplace=False)\n (dropout2): Dropout(p=0.0, inplace=False)\n )\n\n\n (Pdb) asr_model.mods.decoder.lm_modules.output_proj\n ModuleList(\n (layers): ModuleList(\n (0): Linear(\n (w): Linear(in_features=768, out_features=768, bias=True)\n )\n (1): LayerNorm(\n (norm): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n )\n (2): Linear(\n (w): Linear(in_features=768, out_features=5000, bias=True)\n )\n )\n )\n\n (Pdb) asr_model.mods.decoder.fc\n Linear(\n (w): Linear(in_features=512, out_features=5000, bias=True)\n )\n (Pdb) asr_model.mods.decoder.ctc_fc\n Linear(\n (w): Linear(in_features=512, out_features=5000, bias=True)\n )\n (Pdb) asr_model.mods.decoder.softmax\n LogSoftmax(dim=-1)\n\n (Pdb) asr_model.mods.decoder.temperature; asr_model.mods.decoder.temperature_lm;\n 1.15\n 1.15\n '''\n\n for i, hypo in enumerate(result[0]):\n print('{} : {}'.format(i+1,hypo))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"inference_experiment/sb_inference.py","file_name":"sb_inference.py","file_ext":"py","file_size_in_byte":5775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"549942439","text":"import collections\nimport numpy as np\n\n\ndef tokenize_string(sample):\n return tuple(sample.lower().split(' '))\n\n\nclass KmerLanguageModel(object):\n def __init__(self, n, samples, tokenize=False):\n if tokenize:\n tokenized_samples = []\n for sample in samples:\n tokenized_samples.append(tokenize_string(sample))\n samples = tokenized_samples\n print(samples)\n\n self._n = n\n self._samples = samples\n self._kmer_counts = collections.defaultdict(int)\n self._total_kmers = 0\n for kmer in self.kmers():\n self._kmer_counts[kmer] += 1\n self._total_kmers += 1\n\n def kmers(self):\n n = self._n\n for i in range(len(self._samples)-n+1):\n yield self._samples[i:i+n]\n\n def unique_kmers(self):\n return set(self._kmer_counts.keys())\n\n def log_likelihood(self, kmer):\n if kmer not in self._kmer_counts:\n return -np.inf\n else:\n return np.log(self._kmer_counts[kmer]) - np.log(self._total_kmers)\n\n def kl_to(self, p):\n # p is another KmerLanguageModel\n log_likelihood_ratios = []\n for kmer in p.kmers():\n log_likelihood_ratios.append(p.log_likelihood(kmer) - self.log_likelihood(kmer))\n return np.mean(log_likelihood_ratios)\n\n def cosine_sim_with(self, p):\n # p is another KmerLanguageModel\n p_dot_q = 0.\n p_norm = 0.\n q_norm = 0.\n for kmer in p.unique_kmers():\n p_i = np.exp(p.log_likelihood(kmer))\n q_i = np.exp(self.log_likelihood(kmer))\n p_dot_q += p_i * q_i\n p_norm += p_i**2\n for kmer in self.unique_kmers():\n q_i = np.exp(self.log_likelihood(kmer))\n q_norm += q_i**2\n return p_dot_q / (np.sqrt(p_norm) * np.sqrt(q_norm))\n\n def precision_wrt(self, p):\n # p is another KmerLanguageModel\n num = 0.\n denom = 0\n p_kmers = p.unique_kmers()\n for kmer in self.unique_kmers():\n if kmer in p_kmers:\n num += self._kmer_counts[kmer]\n denom += self._kmer_counts[kmer]\n return float(num) / denom\n\n def recall_wrt(self, p):\n return p.precision_wrt(self)\n\n def js_with(self, p):\n log_p = np.array([p.log_likelihood(kmer) for kmer in p.unique_kmers()])\n log_q = np.array([self.log_likelihood(kmer) for kmer in p.unique_kmers()])\n log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))\n kl_p_m = np.sum(np.exp(log_p) * (log_p - log_m))\n\n log_p = np.array([p.log_likelihood(kmer) for kmer in self.unique_kmers()])\n log_q = np.array([self.log_likelihood(kmer) for kmer in self.unique_kmers()])\n log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))\n kl_q_m = np.sum(np.exp(log_q) * (log_q - log_m))\n\n return 0.5*(kl_p_m + kl_q_m) / np.log(2)\n\n","sub_path":"utils/language_utils.py","file_name":"language_utils.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"370268191","text":"include_rules = [\n \"+bindings\",\n \"+core\",\n # We do not want any new dependencies on core/exported or\n # core/frames/Web(Local|Remote)FrameImpl.h until we resolve the control\n # layer.\n \"!core/exported\",\n \"!core/frame/WebLocalFrameImpl.h\",\n \"!core/frame/WebRemoteFrameImpl.h\",\n \"+modules\",\n \"+platform\",\n \"+public/platform\",\n \"+public/web\",\n \"-web\",\n]\n","sub_path":"third_party/WebKit/Source/modules/DEPS","file_name":"DEPS","file_ext":"","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"463491994","text":"s=\"1a3b45Js1hi255\"\n#print(ord(\"9\"))\nl=[]\nsum=0\nstr=\"\"\nfor i in range(len(s)):\n ascii= ord(s[i])\n if (ascii>=48 and ascii<=57) or ascii==ord(\"-\"):\n str+=s[i]\n else:\n if str==\"\":\n continue\n l.append(int(str))\n sum+=int(str)\n str=\"\"\n if i==len(s)-1 and ord(s[i])>=48 and ord(s[i])<=57:\n l.append(int(str))\n sum += int(str)\n\nprint(sum)\nprint(l)","sub_path":"Problems/que6.py","file_name":"que6.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491689206","text":"from numpy import *\nimport time\nimport sqlite3\nimport sys\nimport scipy\nimport Gnuplot, Gnuplot.funcutils, time\n\nclass SizeGraph(object):\n def graph(self,surprisal_database):\n plot = Gnuplot.Gnuplot(persist=1)\n plot.ylabel(\"No. of usernames\")\n plot.xlabel(\"timestep\")\n x = []\n while True:\n surprisal_list = self.readGraph(surprisal_database)\n x.append(len(surprisal_list))\n dat = Gnuplot.Data(x)\n plot.plot(dat)\n\n def readGraph(self, surprisal_database):\n # Create database\n surprisal_list = []\n\n # Get the number of changes\n conn = sqlite3.connect(surprisal_database)\n c = conn.cursor()\n c.execute(\"SELECT * FROM usernames\")\n for row in c:\n surprisal_list.append(row[1])\n conn.close()\n return surprisal_list \n\nif __name__ == '__main__':\n d = SizeGraph()\n d.graph(sys.argv[1])\n","sub_path":"inforeaper_client/graphing_utilities/graph_types/SizeGraph.py","file_name":"SizeGraph.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532604044","text":"import pytest\nfrom marshmallow import Schema, fields\nfrom sqlalchemy import Column, Integer, String\n\nfrom flask_resty import Api, GenericModelView\nfrom flask_resty.testing import assert_response\n\n# -----------------------------------------------------------------------------\n\n\n@pytest.fixture\ndef models(db):\n class Widget(db.Model):\n __tablename__ = \"widgets\"\n\n id_1 = Column(Integer, primary_key=True)\n id_2 = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n\n db.create_all()\n\n yield {\"widget\": Widget}\n\n db.drop_all()\n\n\n@pytest.fixture\ndef schemas():\n class WidgetSchema(Schema):\n id_1 = fields.Integer(as_string=True)\n id_2 = fields.Integer(as_string=True)\n name = fields.String(required=True)\n\n return {\"widget\": WidgetSchema()}\n\n\n@pytest.fixture(autouse=True)\ndef routes(app, models, schemas):\n class WidgetViewBase(GenericModelView):\n model = models[\"widget\"]\n schema = schemas[\"widget\"]\n id_fields = (\"id_1\", \"id_2\")\n\n class WidgetListView(WidgetViewBase):\n def get(self):\n return self.list()\n\n def post(self):\n return self.create(allow_client_id=True)\n\n class WidgetView(WidgetViewBase):\n def get(self, id_1, id_2):\n return self.retrieve((id_1, id_2))\n\n def patch(self, id_1, id_2):\n return self.update((id_1, id_2), partial=True)\n\n def delete(self, id_1, id_2):\n return self.destroy((id_1, id_2))\n\n api = Api(app)\n api.add_resource(\n \"/widgets\", WidgetListView, WidgetView, id_rule=\"/\"\n )\n\n\n@pytest.fixture(autouse=True)\ndef data(db, models):\n db.session.add_all(\n (\n models[\"widget\"](id_1=1, id_2=2, name=\"Foo\"),\n models[\"widget\"](id_1=1, id_2=3, name=\"Bar\"),\n models[\"widget\"](id_1=4, id_2=5, name=\"Baz\"),\n )\n )\n db.session.commit()\n\n\n# -----------------------------------------------------------------------------\n\n\ndef test_list(client):\n response = client.get(\"/widgets\")\n assert_response(\n response,\n 200,\n [\n {\"id_1\": \"1\", \"id_2\": \"2\", \"name\": \"Foo\"},\n {\"id_1\": \"1\", \"id_2\": \"3\", \"name\": \"Bar\"},\n {\"id_1\": \"4\", \"id_2\": \"5\", \"name\": \"Baz\"},\n ],\n )\n\n\ndef test_retrieve(client):\n response = client.get(\"/widgets/1/2\")\n assert_response(response, 200, {\"id_1\": \"1\", \"id_2\": \"2\", \"name\": \"Foo\"})\n\n\ndef test_create(client):\n response = client.post(\n \"/widgets\", data={\"id_1\": \"4\", \"id_2\": \"6\", \"name\": \"Qux\"}\n )\n assert response.headers[\"Location\"] == \"/widgets/4/6\"\n\n assert_response(response, 201, {\"id_1\": \"4\", \"id_2\": \"6\", \"name\": \"Qux\"})\n\n\ndef test_update(client):\n response = client.patch(\n \"/widgets/1/2\", data={\"id_1\": \"1\", \"id_2\": \"2\", \"name\": \"Qux\"}\n )\n assert_response(response, 200, {\"id_1\": \"1\", \"id_2\": \"2\", \"name\": \"Qux\"})\n\n\ndef test_destroy(client):\n destroy_response = client.delete(\"/widgets/1/2\")\n assert_response(destroy_response, 204)\n\n retrieve_response = client.get(\"/widgets/1/2\")\n assert_response(retrieve_response, 404)\n","sub_path":"tests/test_composite_id.py","file_name":"test_composite_id.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"336790658","text":"import torch\n\n\ndef get_optimizer(model, config):\n if config.optimizer == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(),\n lr=config.learning_rate,\n momentum=config.momentum,\n weight_decay=config.weight_decay)\n elif config.optimizer == 'adam':\n optimizer = torch.optim.Adam(model.parameters(),\n lr=config.learning_rate,\n weight_decay=config.weight_decay)\n elif config.optimizer == 'adamW':\n # print(model)\n # cal_params = list(map(id, model.regression_head.cal.parameters()))\n # rest_params = filter(lambda x: id(x) not in cal_params, model.parameters())\n param_dicts = [\n {\"params\": [p for n, p in model.named_parameters() if\n \"backbone\" not in n and \"regression\" not in n and p.requires_grad]},\n {\n \"params\": [p for n, p in model.named_parameters() if \"backbone\" in n and p.requires_grad],\n \"lr\": 1e-4,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if \"regression\" in n and p.requires_grad],\n \"lr\": 2e-4,\n },\n ]\n optimizer = torch.optim.AdamW(param_dicts,\n lr=config.learning_rate,\n weight_decay=config.weight_decay)\n # optimizer = torch.optim.AdamW(model.parameters(),\n # lr=config.learning_rate,\n # weight_decay=config.weight_decay)\n else:\n raise NotImplementedError(\"Optimizer {} not supported\".format(config.optimizer))\n\n return optimizer\n\n","sub_path":"utils/helper_optimizer.py","file_name":"helper_optimizer.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"4325136","text":"class LL:\r\n def __init__(self):\r\n self.head=None\r\n\r\n def printll(self):\r\n temp=self.head\r\n while(temp!=None):\r\n print(temp.data,end=' ')\r\n temp=temp.next\r\n print()\r\n\r\n def append(self,val):\r\n n_node=Node(val)\r\n temp=self.head\r\n while(temp.next!=None):\r\n temp=temp.next\r\n temp.next=n_node\r\n\r\n def reverse(self):\r\n prev=None\r\n cur=self.head\r\n\r\n while cur!=None:\r\n suf = cur.next\r\n cur.next=prev\r\n prev=cur\r\n cur=suf\r\n self.head=prev\r\n\r\nclass Node:\r\n def __init__(self,val):\r\n self.data=val\r\n self.next=None\r\n\r\nif __name__ == '__main__':\r\n ll1=LL()\r\n n1=Node(5)\r\n ll1.head=n1\r\n ll1.append(25)\r\n ll1.append(55)\r\n ll1.append(75)\r\n print(\"Actual Linked list:\",end=\"\")\r\n ll1.printll()\r\n ll1.reverse()\r\n print(\"Reversed Linked list:\", end=\"\")\r\n ll1.printll()\r\n","sub_path":"Linked_List/reversell.py","file_name":"reversell.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"406364936","text":"# Copyright (c) Emil Madsen 2020. All rights reserved.\n# Licensed under the MIT license. See LICENSE file in the project root for full license information.\nimport os\n\nimport uvicorn\nfrom starlette.applications import Starlette\nfrom starlette.responses import Response\nfrom starlette.routing import Route\n\nhas_async_handler = False\ntry:\n from function import async_handler\n has_async_handler = True\nexcept ImportError:\n from function import handler\n\n# distutils.util.strtobool() can throw an exception\ndef is_true(val):\n return len(val) > 0 and val.lower() == \"true\" or val == \"1\"\n\n\nasync def main_route(request):\n raw_body = os.getenv(\"RAW_BODY\", \"false\")\n\n as_text = True\n\n if is_true(raw_body):\n as_text = False\n\n body = await request.body()\n if as_text:\n body = body.decode(\"utf-8\")\n\n if has_async_handler:\n ret = await async_handler.handle(body)\n else:\n ret = handler.handle(body)\n return Response(ret)\n\n\nif __name__ == \"__main__\":\n routes = [\n Route(\"/\", endpoint=main_route, methods=[\"GET\", \"POST\"]),\n Route(\"/{full_path:path}\", endpoint=main_route, methods=[\"GET\", \"POST\"])\n ]\n app = Starlette(routes=routes)\n uvicorn.run(app, host=\"0.0.0.0\", port=5000, log_level=\"info\")\n","sub_path":"template/python3-starlette/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234332256","text":"import re\n\nimport open_file as opFile\nimport random_word as getWord\nimport guessed_word as guess\nimport choose_difficulty as cDifficulty\nimport character_input as charInput\nimport recursive_get_char as recursChar\n\ndef play_game():\n \"\"\"\n This method is used to run the game.\n\n :returns: the win_game boolean which is either true or false.\n :rtype: a boolean.\n \"\"\"\n filename = \"dictionary.txt\"\n dictionary = \"\"\n rand_word = \"\"\n guessed_word = \"\"\n number_of_tries = 0\n count_old = 0\n win_game = True\n\n dictionary = opFile.open_file(filename)\n print(\"\\n||--------------------------------- Hangman ---------------------------------||\\n\")\n rand_word, number_of_tries = cDifficulty.choose_difficulty(dictionary)\n print(\"The random word for this game is: %s\" % (str(rand_word)))\n guessed_word = guess.create_guessed_word(rand_word)\n guess.print_guessed_word(guessed_word)\n print(\"Number of tries left: %d\\n\" % (number_of_tries))\n win_game = recursChar.recursive_get_char(number_of_tries, rand_word, guessed_word, count_old)\n if(win_game):\n print(\"You Win!\\n\")\n play_again(win_game)\n else:\n print(\"You Lose!\\n\")\n return win_game\n\ndef play_again(win_game):\n \"\"\"\n This method is for playing the game again.\n\n :param win_game: This variable is used to say whether the player has won the game or not.\n \"\"\"\n yes = r\"^(y|Y)(es)?$\"\n no = r\"^(n|N)(o)?$\"\n if(win_game):\n get_user_input = raw_input(\"Would you like to play again? \")\n try:\n user_choice = str(get_user_input)\n except TypeError:\n print(\"Please enter a valid response!\")\n play_game(win_game)\n if(re.match(yes, user_choice)):\n play_game()\n elif(re.match(no, user_choice)):\n print(\"Thanks for playing!\")\n else:\n print(\"Please enter yes or no, or Y or N.\")\n play_again(win_game)\n return\n return\n","sub_path":"Python-Projects/Hangman/Hangman_2.0/src/components/play_game.py","file_name":"play_game.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433006769","text":"from flask import render_template, json\nfrom app import app\nfrom app.models import Feature, Layer\nimport pprint\n\n@app.route('/')\n@app.route('/index')\ndef index():\n user = {'username': 'Miguel'}\n posts = [\n {\n 'author': {'username': 'John'},\n 'body': 'Beautiful day in Portland!'\n },\n {\n 'author': {'username': 'Susan'},\n 'body': 'The Avengers movie was so cool!'\n }\n ]\n return render_template('index.html', title='Home', user=user, posts=posts)\n\n\n@app.route('/layers')\n@app.route('/layers/')\ndef layers():\n layers = Layer.query.order_by(Layer.created_at).all()\n return render_template(\"layers/index.html\", title='Explore', layers=layers)\n\n\n\n\n@app.route('/layers/')\ndef layer(id):\n layer = Layer.query.filter_by(id=id).first_or_404()\n\n packages = layer.get_feature_installed()\n result = map(lambda x: x['blobSum'], layer.manifest_v1['fsLayers'])\n\n #history_raw = json.loads(layer.manifest_v1['history'])\n #history = json.dumps(history_raw, sort_keys = True, indent = 4, separators = (',', ': '))\n\n return render_template(\"layers/layer.html\", title='Explore', packages=packages) #history=history_raw['container_config']['Cmd'])\n\n\n\n@app.route('/user/')\ndef user(username):\n user = User.query.filter_by(username=username).first_or_404()\n posts = [\n {'author': user, 'body': 'Test post #1'},\n {'author': user, 'body': 'Test post #2'}\n ]\n return render_template('user.html', user=user, posts=posts)\n\n\n@app.route('/features')\ndef explore():\n features = Feature.query.order_by(Feature.name.asc())\n return render_template(\"features/index.html\", title='Explore', features=features)","sub_path":"flask/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"431836157","text":"import json\nimport shutil\nimport types\n\nfrom substratools import algo, exceptions\n\nimport pytest\n\n\n@pytest.fixture(autouse=True)\ndef setup(valid_opener):\n pass\n\n\nclass DummyAlgo(algo.Algo):\n\n def train(self, X, y, models, rank):\n new_model = {'value': 0}\n for m in models:\n assert isinstance(m, dict)\n assert 'value' in m\n new_model['value'] += m['value']\n return new_model\n\n def predict(self, X, model):\n pred = model['value']\n return X * pred\n\n def load_model(self, path):\n with open(path, 'r') as f:\n return json.load(f)\n\n def save_model(self, model, path):\n with open(path, 'w') as f:\n json.dump(model, f)\n\n\nclass NoSavedModelAlgo(DummyAlgo):\n def save_model(self, model, path):\n # do not save model at all\n pass\n\n\nclass WrongSavedModelAlgo(DummyAlgo):\n def save_model(self, model, path):\n # simulate numpy.save behavior\n with open(path + '.npy', 'w') as f:\n json.dump(model, f)\n\n\n@pytest.fixture\ndef create_models(workdir):\n model_a = {'value': 1}\n model_b = {'value': 2}\n\n model_dir = workdir / \"model\"\n model_dir.mkdir()\n\n def _create_model(model_data):\n model_name = model_data['value']\n filename = \"{}.json\".format(model_name)\n path = model_dir / filename\n path.write_text(json.dumps(model_data))\n return filename\n\n model_datas = [model_a, model_b]\n model_filenames = [_create_model(d) for d in model_datas]\n\n return model_datas, model_filenames\n\n\ndef test_create():\n # check we can instantiate a dummy algo class\n DummyAlgo()\n\n\ndef test_train_no_model():\n a = DummyAlgo()\n wp = algo.AlgoWrapper(a)\n model = wp.train([])\n assert model['value'] == 0\n\n\ndef test_train_multiple_models(workdir, create_models):\n _, model_filenames = create_models\n\n a = DummyAlgo()\n wp = algo.AlgoWrapper(a)\n\n model = wp.train(model_filenames)\n assert model['value'] == 3\n\n\ndef test_train_fake_data():\n a = DummyAlgo()\n wp = algo.AlgoWrapper(a)\n model = wp.train([], fake_data=True, n_fake_samples=2)\n assert model['value'] == 0\n\n\n@pytest.mark.parametrize(\"fake_data,expected_pred,n_fake_samples\", [\n (False, 'X', None),\n (True, ['Xfake'], 1),\n])\ndef test_predict(fake_data, expected_pred, n_fake_samples, workdir, create_models):\n _, model_filenames = create_models\n\n a = DummyAlgo()\n wp = algo.AlgoWrapper(a)\n pred = wp.predict(model_filenames[0], fake_data=fake_data, n_fake_samples=n_fake_samples)\n assert pred == expected_pred\n\n\ndef test_execute_train(workdir):\n\n output_model_path = workdir / 'model' / 'model'\n assert not output_model_path.exists()\n\n algo.execute(DummyAlgo(), sysargs=['train'])\n assert output_model_path.exists()\n\n algo.execute(DummyAlgo(), sysargs=['train', '--fake-data', '--n-fake-samples', '1'])\n assert output_model_path.exists()\n\n algo.execute(DummyAlgo(), sysargs=['train', '--debug'])\n assert output_model_path.exists()\n\n\ndef test_execute_train_multiple_models(workdir, create_models):\n _, model_filenames = create_models\n\n output_model_path = workdir / 'model' / 'model'\n assert not output_model_path.exists()\n pred_path = workdir / 'pred' / 'pred'\n assert not pred_path.exists()\n\n command = ['train']\n command.extend(model_filenames)\n\n algo.execute(DummyAlgo(), sysargs=command)\n assert output_model_path.exists()\n with open(output_model_path, 'r') as f:\n model = json.load(f)\n assert model['value'] == 3\n\n assert not pred_path.exists()\n\n\ndef test_execute_predict(workdir, create_models):\n _, model_filenames = create_models\n model_name = 'model'\n output_model_path = workdir / 'model' / model_name\n pred_path = workdir / 'pred' / 'pred'\n\n # first train models\n assert not pred_path.exists()\n command = ['train']\n command.extend(model_filenames)\n algo.execute(DummyAlgo(), sysargs=command)\n assert output_model_path.exists()\n\n # do predict on output model\n assert not pred_path.exists()\n algo.execute(DummyAlgo(), sysargs=['predict', model_name])\n assert pred_path.exists()\n with open(pred_path, 'r') as f:\n pred = json.load(f)\n assert pred == 'XXX'\n pred_path.unlink()\n\n # do predict with different model paths\n input_models_dir = workdir / 'other_models'\n input_models_dir.mkdir()\n input_model_path = input_models_dir / 'supermodel'\n shutil.move(output_model_path, input_model_path)\n assert not pred_path.exists()\n algo.execute(DummyAlgo(), sysargs=[\n 'predict', 'supermodel', '--models-path', str(input_models_dir)])\n assert pred_path.exists()\n with open(pred_path, 'r') as f:\n pred = json.load(f)\n assert pred == 'XXX'\n\n\n@pytest.mark.parametrize('algo_class', (NoSavedModelAlgo, WrongSavedModelAlgo))\ndef test_model_check(algo_class):\n a = algo_class()\n wp = algo.AlgoWrapper(a)\n\n with pytest.raises(exceptions.MissingFileError):\n wp.train([])\n\n\n@pytest.mark.parametrize('use_models_generator,models_type', (\n (True, types.GeneratorType),\n (False, list),\n))\ndef test_models_generator(mocker, workdir, create_models, use_models_generator, models_type):\n _, model_filenames = create_models\n\n command = ['train']\n command.extend(model_filenames)\n\n a = DummyAlgo()\n a.use_models_generator = use_models_generator\n mocker.patch.object(a, 'train', autospec=True, return_value={})\n\n algo.execute(a, sysargs=command)\n models = a.train.call_args[0][2]\n assert isinstance(models, models_type)\n","sub_path":"tests/test_algo.py","file_name":"test_algo.py","file_ext":"py","file_size_in_byte":5616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"83747460","text":"# -*- coding: UTF-8 -*-\n\"\"\"\nPurpose:\n Application Logic associated with the Clearing and blocking Signals for QT Widgets\n\n\"\"\"\n\nimport logging\nimport re\nfrom datetime import datetime\n\nfrom PySide.QtCore import Qt\nfrom PySide.QtGui import (QTextEdit, QLineEdit, QComboBox, QSpinBox, QDateEdit, QDateTimeEdit, QCheckBox,\n QTableView, QTreeView, QToolButton, QPushButton)\nfrom pytz import timezone\n\n__pgmname__ = \"clear\"\n__author__ = \"AJ Reynolds\"\n__email__ = \"ar380v@att.com\"\n\n__maintainer__ = __author__\n\nlog = logging.getLogger(__pgmname__)\n\n_no_reset = re.compile('^.*_nrs')\n_only_reset = re.compile('^.*_nro')\n_readonly = re.compile('^.*_ro')\n_filter = re.compile('^led.*Filter.*')\n\n\ndef blockSignals_(Status=False, panel=None):\n _objTypes = [QTextEdit, QLineEdit, QComboBox, QCheckBox, QDateEdit, QDateTimeEdit, QSpinBox]\n for _objType in _objTypes:\n _list = panel.findChildren(_objType)\n for _wid in _list:\n if (_wid.objectName() == u'' or\n (_objType == QLineEdit and not _wid.objectName()[:3] == 'led') or\n (Status and _filter.search(_wid.objectName()))):\n continue\n _wid.blockSignals(Status)\n return\n\n\ndef clear_(retainBlock=False, panel=None, skipview=False):\n blockSignals_(True, panel)\n _objTypes = [QTableView, QTreeView, QLineEdit, QSpinBox, QTextEdit, QComboBox, QCheckBox, QDateEdit, QDateTimeEdit,\n QToolButton, QPushButton]\n for _objType in _objTypes:\n _list = panel.findChildren(_objType)\n for _wid in _list:\n if (_no_reset.search(_wid.objectName()) or\n _wid.objectName() == u'' or\n (_objType == QLineEdit and not _wid.objectName()[:3] == 'led')):\n continue\n\n if _objType in [QTableView, QTreeView]:\n if skipview and _wid.objectName()[:6] != 'trvOrg':\n continue\n try:\n _model = _wid.model().sourceModel()\n except:\n _model = _wid.model()\n if _model is None:\n continue\n _model.beginResetModel()\n try:\n _model.updateAll(None)\n except:\n pass\n _model.endResetModel()\n continue\n\n if _objType in [QLineEdit, QSpinBox, QTextEdit]:\n _wid.clear()\n if _objType == QCheckBox:\n _wid.setCheckState(Qt.Unchecked)\n elif _objType == QComboBox:\n _wid.setCurrentIndex(-1)\n elif _objType in [QDateEdit, QDateTimeEdit]:\n _wid.setDate(datetime.now(timezone('UTC')))\n elif _objType in [QToolButton, QPushButton]:\n try:\n _wid.setChecked(False)\n except:\n pass\n try:\n if _only_reset.search(_wid.objectName()):\n _wid.setEnabled(True)\n _wid.setReadOnly(False)\n continue\n if _readonly.search(_wid.objectName()):\n _wid.setEnabled(True)\n _wid.setReadOnly(True)\n continue\n _wid.setEnabled(False)\n _wid.setReadOnly(True)\n except:\n pass\n\n if not retainBlock:\n blockSignals_(False, panel)\n return\n\n\ndef set_ready(panel=None, check=None):\n _objTypes = [QLineEdit, QSpinBox, QTextEdit, QComboBox, QCheckBox, QDateEdit, QDateTimeEdit,\n QToolButton, QPushButton]\n _chkbxs = {x.objectName(): x for x in panel.findChildren(QCheckBox)}\n for _objType in _objTypes:\n _list = panel.findChildren(_objType)\n for _wid in _list:\n if _wid.objectName() == u'' or (_objType == QLineEdit and not _wid.objectName()[:3] == 'led'):\n continue\n if _readonly.search(_wid.objectName()):\n _wid.setEnabled(True)\n _wid.setReadOnly(True)\n continue\n\n if _objType in [QLineEdit, QSpinBox, QTextEdit]:\n _wid.setEnabled(True)\n _wid.setReadOnly(False)\n elif _objType in [QComboBox, QCheckBox, QToolButton, QPushButton]:\n _wid.setEnabled(True)\n elif _objType in [QDateEdit, QDateTimeEdit]:\n _chkbx = 'ckb{}'.format(_wid.objectName()[3:])\n if _chkbx in _chkbxs:\n if getattr(check, _chkbx).isChecked():\n _wid.setEnabled(True)\n _wid.setReadOnly(False)\n else:\n _wid.setEnabled(False)\n _wid.setReadOnly(False)\n else:\n _wid.setEnabled(True)\n _wid.setReadOnly(False)\n return\n\n\n__all__ = ['clear_', 'blockSignals_', 'set_ready']\n","sub_path":"jetCore/clear.py","file_name":"clear.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"562128165","text":"from turtle import*\ncolor(\"pink\")\nbgcolor(\"green\")\npensize(4)\n\ndef square(length,quantity):\n for i in range(4):\n forward(length*quantity)\n left(90)\n \nquantity=5\nlength=40\n\nfor i in range(quantity):\n square(length,i+1)\n n=length/2*(i+1)\n up()\n setposition(-n,-n)\n down()\n","sub_path":"Season 4/Session4-Assignment 4.2.py","file_name":"Session4-Assignment 4.2.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317547811","text":"import tensorflow as tf \nimport numpy as np \nimport pandas as pd\nfrom prepare_data import Dataset\nfrom dbn import DeepBeliefNetwork\nimport os\n\nfrom tensorflow import logging\n\n\nclass Trainer:\n def __init__(self, path, size,\n pretrain_iterations,rbm_layers,rbm_activations,freeze_rbms,\n dense_layers,dense_activations,batch_normalization,output_activation,\n batch_size,learning_rate,beta1,keep_chance=0.5):\n\n self.dataset = Dataset(path, size)\n self.dbn = DeepBeliefNetwork(pretrain_iterations,rbm_layers,rbm_activations,freeze_rbms,\n dense_layers,dense_activations,batch_normalization,output_activation,\n batch_size,learning_rate,beta1,keep_chance=keep_chance)\n self.dbn.build_graph(self.dataset.rbn_data, self.dataset.input_size, self.dataset.output_size)\n\n def train(self, save_dir):\n \n epochs = 0\n since_improved = 0\n best_loss = 999999.0\n best_accuracy = 0\n \n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n with self.dbn.graph.as_default():\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n train_writer = tf.summary.FileWriter(save_dir + '/train', sess.graph)\n while since_improved < 15:\n start= 0 \n train_features, train_labels = self.unison_shuffle(self.dataset.train_features, self.dataset.train_labels)\n while(start + self.dbn.batch_size < train_features.shape[0]):\n batch_data, batch_labels = train_features[start:start+self.dbn.batch_size], train_labels[start:start+self.dbn.batch_size]\n start += self.dbn.batch_size\n _ = sess.run([self.dbn.train_op],feed_dict={self.dbn.features: batch_data,self.dbn.labels: batch_labels, self.dbn.dropout: self.dbn.keep_chance})\n correct_prediction = 0 \n loss_, accuracy_ = sess.run([self.dbn.loss, self.dbn.accuracy],feed_dict={self.dbn.features: self.dataset.validation_features,\n self.dbn.labels: self.dataset.validation_labels, \n self.dbn.dropout: 1.0 })\n acc_ = accuracy_ / self.dataset.validation_features.shape[0]\n \n if loss_ < best_loss:\n best_loss = loss_\n best_accuracy = acc_\n since_improved = 0\n self.saver.save(sess,os.path.join(save_dir, 'train.ckpt'),global_step=self.dbn.global_step)\n else:\n since_improved += 1\n\n epochs += 1\n logging.info(\" Epoch: \" + str(epochs)+ \" Best Loss: \" + (\"%.4f\" % best_loss) + \" Validation Accuracy: \" + (\"%.4f\" % acc_))\n \n logging.info(\"Finised Training. Best Validation Accuracy is . \" + str(\"%.4f\" % best_accuracy))\n\n def unison_shuffle(self, a, b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]\n\n ","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96643043","text":"N=int(input())\nif N>1 and N<=1000:\n A=[]\n for i in range(2,N+1):\n a=0\n for j in range(2,i):\n if i%j==0:\n a+=1\n if a==0:\n A.append(str(i))\n print(' '.join(A))\nelif N==1:\n print(0)\n","sub_path":"python/basic of IO/IO/prime_number.py","file_name":"prime_number.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620002493","text":"# -*- encoding: utf-8 -*-\r\n\"\"\"\r\n@File : train.py\r\n@Time : 2019/11/28 11:45\r\n@Author : zwt\r\n@git : \r\n@Software: PyCharm\r\n\"\"\"\r\nimport os\r\nimport numpy as np\r\nimport json\r\nimport tensorflow as tf\r\nfrom .model import NerCore\r\n\r\n\r\nclass NerTrainner:\r\n def __init__(self, vocab_file=\"vocab.json\"):\r\n self.model_dir = \"ner\"\r\n self.vocab_file = vocab_file\r\n self.char_index = {' ': 0}\r\n self.load_dict()\r\n self.unknow_char_id = len(self.char_index)\r\n self.io_sequence_size = 70\r\n vocab_size = len(self.char_index) + 1\r\n self.classnames = {'O': 0, 'B-BRD': 1, 'I-BRD': 2, 'B-KWD': 3, 'I-KWD': 4}\r\n class_size = len(self.classnames)\r\n keep_prob = 0.5\r\n learning_rate = 0.0005\r\n trainable = True\r\n self.batch_size = 1\r\n\r\n with tf.variable_scope('ner_query'):\r\n self.model = NerCore(self.io_sequence_size, vocab_size, class_size, keep_prob, learning_rate,\r\n trainable)\r\n\r\n def load_dict(self):\r\n i = 0\r\n with open(self.vocab_file, \"r+\", encoding=\"utf-8\") as reader:\r\n items = json.load(reader)\r\n for charvalue in items:\r\n self.char_index[charvalue.strip()] = i + 1\r\n i += 1\r\n\r\n def train(self, epochs):\r\n records = self.load_samples()\r\n batch_count = int(len(records) / self.batch_size)\r\n initer = tf.global_variables_initializer()\r\n with tf.Session() as session:\r\n session.run(initer)\r\n ckpt = tf.train.get_checkpoint_state(self.model_dir)\r\n saver = tf.train.Saver()\r\n if ckpt is not None and ckpt.model_checkpoint_path:\r\n saver.restore(session, ckpt.model_checkpoint_path)\r\n for epoch in range(epochs):\r\n train_loss_value = 0.\r\n for i in range(batch_count):\r\n batch_records = records[i * self.batch_size:(i + 1) * self.batch_size]\r\n xrows, xlens, yrows = self.convert_batch(batch_records)\r\n feed_dict = {self.model.inputs: xrows, self.model.targets: yrows,\r\n self.model.sequence_lengths: xlens}\r\n batch_loss_value, _ = session.run([self.model.cost_func, self.model.optimizer], feed_dict)\r\n train_loss_value += batch_loss_value / batch_count\r\n if i % 100 == 0:\r\n batch_buffer = \"Progress {0}/{1} , cost : {2}\".format(i + 1, batch_count, batch_loss_value)\r\n print(batch_buffer)\r\n print(\"Epoch: %d/%d , train cost=%f \" % ((epoch + 1), epochs, train_loss_value))\r\n saver.save(session, os.path.join(self.model_dir, \"ner.dat\"))\r\n\r\n def convert_batch(self, records):\r\n xrows = np.zeros((self.batch_size, self.io_sequence_size), dtype=np.float32)\r\n xlens = np.zeros((self.batch_size), dtype=np.int32)\r\n yrows = np.zeros((self.batch_size, self.io_sequence_size), dtype=np.int32)\r\n count = len(records)\r\n for i in range(count):\r\n sent_text = records[i][\"text\"]\r\n tags = records[i][\"label\"].split(\" \")\r\n xlen = len(records[i][\"text\"])\r\n if xlen > self.io_sequence_size:\r\n print(xlen)\r\n xlens[i] = xlen\r\n xrows[i] = self.convert_xrow(sent_text)\r\n yrows[i] = self.convert_classids(tags)\r\n return xrows, xlens, yrows\r\n\r\n def convert_classids(self, tags):\r\n yrow = np.zeros(self.io_sequence_size, dtype=np.int32)\r\n for i in range(len(tags)):\r\n yrow[i] = self.classnames[tags[i]]\r\n return yrow\r\n\r\n def convert_xrow(self, input_text):\r\n char_vector = np.zeros((self.io_sequence_size), dtype=np.int32)\r\n for i in range(len(input_text)):\r\n char_value = input_text[i]\r\n if char_value in self.char_index.keys():\r\n char_vector[i] = self.char_index[char_value]\r\n return char_vector\r\n\r\n def load_samples(self, dstfile=\"train.json\"):\r\n data_items = []\r\n with open(dstfile, \"r\", encoding=\"utf-8\") as reader:\r\n for line in reader:\r\n record = json.loads(line.strip(), encoding=\"utf-8\")\r\n data_items.append(record)\r\n return data_items\r\n\r\n\r\nif __name__ == \"__main__\":\r\n trainner = NerTrainner()\r\n trainner.train(10)","sub_path":"BILSTM_CNN_CRF/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466843622","text":"import sys\nimport psutil\nimport time\nimport commands\n\ndef memory_stat():\n '''\n return the memory info\n '''\n mem = {}\n stat = {}\n f = open('/proc/meminfo')\n lines = f.readlines()\n for line in lines:\n if len(line) < 2 : continue\n name = line.split(':')[0]\n var = line.split(':')[1].split()[0]\n mem[name] = long(var) * 1024.0\n stat['MemUsed'] = mem['MemTotal'] - mem['MemFree'] - mem['Buffers'] - mem['Cached']\n stat['MemTotal'] = mem['MemTotal']\n stat['MemFree'] = mem['MemFree']\n stat['Buffers'] = mem['Buffers']\n stat['Cached'] = mem['Cached']\n return stat\n\ndef proc_stat(procid):\n \n ps_stat = None\n try:\n ps_stat = commands.getoutput('ps -fp %s -u' % procid).split('\\n')[1].split()\n except:\n return {}\n\n procInfo = []\n procInfo.append(ps_stat[0]) #User\n procInfo.append(str(procid)) #PID\n procInfo.append(ps_stat[3]) #%MEM\n procInfo.append(ps_stat[2]) #%CPU\n #procInfo['MemoryUsed'] = memory_stat()['MemTotal'] * float(ps_stat[3]) / 100\n procInfo.append(ps_stat[8]) #StartTime\n return procInfo\n\ndef getInfo():\n res = []\n all_processes = list(psutil.process_iter()) \n for proc in all_processes:\n t = []\n t.append(proc.name()) #Process_name\n t.extend(proc_stat(proc.pid))\n res.append(' '.join(t))\n return res\n\nstr = \"ab\"\nstr.ljust(20)","sub_path":"SystemInfo.py","file_name":"SystemInfo.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284025861","text":"from django.shortcuts import get_object_or_404\n\nfrom oscar.apps.checkout.mixins import (\n OrderPlacementMixin as BaseOrderPlacementMixin\n)\nfrom oscar.core.loading import get_model\n\n\nSource = get_model('payment', 'Source')\nSourceType = get_model('payment', 'SourceType')\n\n\nclass OrderPlacementMixin(BaseOrderPlacementMixin):\n\n def create_shipping_address(self, user, shipping_address):\n \"\"\"\n Create and return the shipping address for the current order.\n Compared to self.get_shipping_address(), ShippingAddress is saved and\n makes sure that appropriate UserAddress exists.\n \"\"\"\n # For an order that only contains items that don't require shipping we\n # won't have a shipping address, so we have to check for it.\n if not shipping_address:\n return None\n shipping_address.save()\n\n shipping_method = self.get_shipping_method(basket=self.request.basket)\n\n if user.is_authenticated and shipping_method.code != 'self-pickup':\n self.update_address_book(user, shipping_address)\n return shipping_address\n\n def handle_payment(self, order_number, total, **kwargs):\n payment_method = self.checkout_session.payment_method()\n source = Source(\n source_type=get_object_or_404(SourceType, code=payment_method),\n amount_allocated=total.excl_tax,\n )\n self.add_payment_source(source)\n\n def get_payment_method_name(self):\n payment_method = self.checkout_session.payment_method()\n source_type = get_object_or_404(SourceType, code=payment_method)\n return source_type.name\n","sub_path":"checkout/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"206827892","text":"from flask import Flask, render_template, make_response\nfrom werkzeug.contrib.cache import SimpleCache\nfrom decorators import timing\nfrom utils import find_primes\n\napp = Flask(__name__)\n\nCACHE_TIMEOUT = 300\n\ncache = SimpleCache()\n\n\n@app.route(\"/\")\n@timing\ndef hello():\n return render_template(\"index.html\", primes=None)\n\n\n@app.route(\"/heavy\")\n@timing\ndef heavy():\n return render_template(\"heavy.html\", primes=find_primes(100000))\n\n\n@app.route(\"/cached\")\n@timing\ndef cached():\n primes = cache.get('primes')\n if not primes:\n primes = find_primes(100000)\n cache.set('primes', list(primes))\n return render_template(\"heavy.html\", primes=primes)\n\n\n@app.route(\"/cached-with-headers\")\n@timing\ndef cached_with_headers():\n primes = cache.get('primes')\n if not primes:\n primes = find_primes(100000)\n cache.set('primes', list(primes))\n\n response = make_response(render_template(\"heavy.html\", primes=primes))\n\n response.headers['Cache-Control'] = 'public, max_age=300'\n\n return response\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"example-2/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117676811","text":"import pytest\n\nimport idom\nfrom idom.tools import html_to_vdom\n\n\ndef test_var_equivalence():\n assert idom.Var([1, 2, 3]) == idom.Var([1, 2, 3])\n assert idom.Var([1, 2, 3]) != idom.Var([1, 2])\n assert idom.Var([1, 2, 3]) != [1, 2, 3]\n\n\ndef test_var_repr():\n assert repr(idom.Var([1, 2, 3])) == \"Var([1, 2, 3])\"\n\n\ndef test_var_set():\n v = idom.Var(None)\n old_1 = v.set(\"new_1\")\n assert old_1 is None\n old_2 = v.set(\"new_2\")\n assert old_2 == \"new_1\"\n\n\ndef test_var_get():\n v = idom.Var(None)\n assert v.get() is None\n v.set(1)\n assert v.get() == 1\n\n\n@pytest.mark.parametrize(\n \"case\",\n [\n {\"source\": \"
\", \"model\": {\"tagName\": \"div\"}},\n {\n \"source\": \"
\",\n \"model\": {\n \"tagName\": \"div\",\n \"attributes\": {\"style\": {\"backgroundColor\": \"blue\"}},\n },\n },\n {\n \"source\": \"
Hello!
\",\n \"model\": {\"tagName\": \"div\", \"children\": [\"Hello!\"]},\n },\n {\n \"source\": \"
Hello!

World!

\",\n \"model\": {\n \"tagName\": \"div\",\n \"children\": [\"Hello!\", {\"tagName\": \"p\", \"children\": [\"World!\"]}],\n },\n },\n ],\n)\ndef test_html_to_vdom(case):\n assert html_to_vdom(case[\"source\"]) == {\n \"tagName\": \"div\",\n \"children\": [case[\"model\"]],\n }\n\n\ndef test_html_to_vdom_transform():\n source = \"

hello world and universe

\"\n\n def make_links_blue(node):\n if node[\"tagName\"] == \"a\":\n node[\"attributes\"][\"style\"] = {\"color\": \"blue\"}\n return node\n\n expected = {\n \"tagName\": \"p\",\n \"children\": [\n \"hello \",\n {\n \"tagName\": \"a\",\n \"children\": [\"world\"],\n \"attributes\": {\"style\": {\"color\": \"blue\"}},\n },\n \" and \",\n {\n \"tagName\": \"a\",\n \"children\": [\"universe\"],\n \"attributes\": {\"style\": {\"color\": \"blue\"}},\n },\n ],\n }\n\n assert html_to_vdom(source, make_links_blue) == {\n \"tagName\": \"div\",\n \"children\": [expected],\n }\n","sub_path":"tests/test_tools.py","file_name":"test_tools.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"894866","text":"#Recursion General Example\n\n# def recursive_function(parameters):\n# if base_case_condition(parameters):\n# return base_case_value\n# recursive_function(modified_parameters)\n\n\n#Example 1 Sum of previus numbers\ndef sum_positive_numbers(n):\n # The base case is n being smaller than 1\n if n < 1:\n return 0\n\n # The recursive case is adding this number to \n # the sum of the numbers smaller than this one.\n return n + sum_positive_numbers(n-1)\n\nprint(sum_positive_numbers(3)) # Should be 6\nprint(sum_positive_numbers(900)) # Should be 15\n\n#Example 2 Factorial\n\ndef factorial(n):\n print(\"Factorial called with \" + str(n))\n if n<2:\n print(\"Returning 1\")\n return 1\n result=n*factorial(n-1)\n print(\"Returning \" + str(result) + \" for factorial of \" + str(n))\n return result\n\nfactorial(4)\n\n","sub_path":"5_Recursion.py","file_name":"5_Recursion.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"328406459","text":"import climate\nimport numpy as np\n\nimport audio\n\n@climate.annotate(\n source='dataset to read',\n target=('wave file output', 'option'),\n pca=('pca transform', 'option'),\n start=('start at the Nth audio window', 'option', None, int),\n seconds=('play N seconds of audio', 'option', None, float),\n)\ndef main(source, target='', pca='', start=0, seconds=10.):\n b = audio.Builder()\n mags = b.read(source, pca, start, seconds)\n if b.log:\n mags = np.exp(mags)\n b.reconstruct(mags)\n b.write(target or source.replace('.npy', '.wav'))\n\n\nif __name__ == '__main__':\n climate.call(main)\n","sub_path":"audio/play-spec-windows.py","file_name":"play-spec-windows.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130236514","text":"import json\nfrom datetime import timedelta\nfrom typing import Any\n\nfrom flask import Blueprint, jsonify, request, abort\nfrom flask_jwt_extended import create_access_token, create_refresh_token\n\nimport config\nimport crud\nfrom utils import security\nfrom utils.decorator import get_db\n\nlogin_api = Blueprint('login', 'login')\n\n\n@login_api.route(\"/access-token\", methods=['POST'])\n@get_db\ndef login_access_token(db) -> Any:\n \"\"\"\n OAuth2 compatible token login, get an access token for future requests\n \"\"\"\n data = request.get_data(as_text=True)\n json_data = json.loads(data)\n user = crud.user_crud.authenticate(\n db, username=json_data.get('username'), password=json_data.get('password')\n )\n if not user:\n abort(400)\n elif not crud.user_crud.is_active(user):\n abort(400)\n access_token = create_access_token(identity=user.id, fresh=True, expires_delta=timedelta(\n minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES))\n refresh_token = create_refresh_token(user.id, expires_delta=timedelta(\n minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES))\n return {\n \"access_token\": access_token,\n \"refresh_token\": refresh_token,\n }\n","sub_path":"backend/api/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"578024236","text":"\r\n# 改进方案初步设想:\r\n# 4.OIR进行价差的调整,改变原au.py的第16行\r\n\r\nimport pandas as pd\r\nfrom zzy_lib import *\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.api as sm\r\n\r\n\r\ndf = pd.read_csv(r'C:\\Users\\60530\\Desktop\\量化交易\\RGF\\高频因子研究测试题\\au1912_20190924.csv', engine='python')\r\ndf = df.iloc[1:, :]\r\n\r\ndatetime = df.apply(lambda x: str(x['Date']) + x['Time'], axis=1)\r\ndatetime = pd.to_datetime(datetime, format='%Y%m%d%H:%M:%S.%f')\r\n\r\n# FairPrice = (BidPrice * AskSize + AskPrice * BidSize) / (BidSize + AskSize)\r\nFairPrice = df.apply(lambda x: ((x['BidPrice1'] * x['AskVolume1'] + (x['AskPrice1'] * x['BidVolume1'])) /\r\n (x['BidVolume1'] + x['AskVolume1'])), axis=1)\r\n\r\n# OIR(Order Imbalance Ratio) = (BidSize - AskSize) / (BidSize+AskSize)\r\nOIR = df.apply(lambda x: ((x['BidVolume1'] - x['AskVolume1']) / (x['BidVolume1'] + x['AskVolume1'])) /\r\n (x['AskPrice1'] - x['BidPrice1']), axis=1)\r\n\r\nlength = df.shape[0]\r\n# 标记买卖的止盈止损结果\r\nbuy, sell = label(FairPrice, length)\r\n\r\nret = FairPrice.pct_change()\r\n# 查看异常值\r\nret1 = ret[1:].reindex(ret[1:].abs().sort_values(ascending=False).index)*1000\r\n# 去除两个异常值\r\nret2 = ret.drop(index=ret1.index[:2])[1:]\r\nOIR2 = OIR.drop(index=ret1.index[:2])[1:]\r\n# datetime2 = datetime.drop(index=ret1.index[:2])[1:]\r\n\r\n# OLS拟合\r\nX = sm.add_constant(OIR2)\r\nresult = (sm.OLS(ret2.astype(float), X.astype(float))).fit()\r\n# result.summary()显示R^2小,拟合效果差\r\nb = result.params['const']\r\na = result.params[0]\r\n\r\n# 作图\r\n# y_fitted = result.fittedvalues\r\n# plt.figure(figsize=(10, 5))\r\n# plt.plot(datetime2, ret2)\r\n# plt.plot(datetime2, y_fitted)\r\n# plt.show()\r\n\r\n# 因子生成\r\n# OIR = OIR.values.astype(None)\r\nfactor = a*OIR + b\r\nbuy_factor = factor.apply(lambda x: 1 if x >= 0 else 0)\r\nsell_factor = factor.apply(lambda x: 1 if x <= 0 else 0)\r\n\r\n# 收益计算\r\nbuy_factor1 = buy_factor.values.astype(None)\r\nbuy_profit = buy_factor1 * buy.flatten()\r\n\r\nsell_factor1 = sell_factor.values.astype(None)\r\nsell_profit = sell_factor1 * sell.flatten()\r\n\r\nprofit = buy_profit + sell_profit\r\n# 胜率计算\r\ntp_times = sum(profit == 1)\r\nsl_times = sum(profit == -3)\r\nwin_ratio = tp_times / (tp_times + sl_times)\r\n\r\n# sumprofit = np.sum(profit)\r\nsumprofit = np.cumsum(profit)\r\n\r\nsumprofit.shape = (length, 1)\r\n\r\n# 计算最大回撤\r\nmb = np.full((length, 1), np.nan)\r\nhighestprofit = 0\r\nmaxback = 0\r\nfor i in range(length):\r\n highestprofit = np.max([highestprofit, sumprofit[i, 0]])\r\n maxback = np.max([maxback, highestprofit - sumprofit[i, 0]])\r\n mb[i, 0] = maxback\r\n\r\n# 计算夏普比率\r\nsharpe_ratio = np.sqrt(length) * profit.mean() / profit.std()\r\n\r\nplt.figure(figsize=(15, 5))\r\nplt.plot(sumprofit)\r\nplt.title('au_strategy')\r\n\r\n\r\n","sub_path":"回测框架/au/au_spread_adjustment.py","file_name":"au_spread_adjustment.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"554033016","text":"class Board :\n def __init__(self,):\n self.array = [[' ',' ',' '], \n [' ', ' ', ' '], \n [' ', ' ', ' ']]\n self.game_stillplay = True\n self.winner = None\n self.player = \"X\"\n self.turn = 0\n\n\n def startgame(self):\n textinput = TextInput() #declare obj var.\n printer = Printer()\n while self.game_stillplay: #game loop\n printer.show(self)\n if(self.win_check()):\n if self.player == 'O':\n self.player = 'X'\n print(\"THE WINNER IS PLAYER \" + self.player + \" !!\")\n self.player = 'O'\n else:\n self.player = 'O'\n print(\"THE WINNER IS PLAYER \" + self.player + \" !!\")\n self.player = 'X'\n self.clearBoard()\n print(\"\\nNEW GAME BEGIN!!!!\\n\")\n printer.show(self)\n if (self.tie_check()):\n print(\"Game TIE..\")\n self.clearBoard()\n printer.show(self)\n print(\" Player \" + self.player + ' TURN',end='')\n textinput.getInput(self)\n\n def win_check(self,):\n state = 0\n for row in range(3):\n if self.array[row][0] == self.array[row][1] == self.array[row][2] != \" \":\n state = 1\n # check column \n for column in range(3):\n if self.array[0][column] == self.array[1][column] == self.array[2][column] != \" \":\n state = 1\n # check cross \n if self.array[0][0] == self.array[1][1] == self.array[2][2] != \" \":\n state = 1\n elif self.array[0][2] == self.array[1][1] == self.array[2][0] != \" \":\n state = 1\n if(state == 1):\n return True\n else:\n return False\n\n def tie_check(self):\n for k in self.array:\n if '-' in k:\n return True\n else:\n return False\n\n def error_check(self,position):\n if(position > 9):\n return False\n tmp = self.getChar(position)\n if(tmp == 'O'):\n return False\n elif(tmp == 'X'):\n return False\n else:\n return True\n\n def getChar(self, position):\n list1 = [[0,0],[0,1],[0,2],[1,0],[1,1],[1,2],[2,0],[2,1],[2,2]]\n return self.array[list1[position-1][0]][list1[position-1][1]]\n\n def setChar(self, position): \n list1 = [[0,0],[0,1],[0,2],[1,0],[1,1],[1,2],[2,0],[2,1],[2,2]]\n if self.error_check(position) is True :\n if(self.player == 'O'):\n self.array[list1[position-1][0]][list1[position-1][1]] = self.player\n self.player = 'X'\n elif(self.player == 'X'):\n self.array[list1[position-1][0]][list1[position-1][1]] = self.player\n self.player = 'O'\n def clearBoard(self):\n self.array = [[' ',' ',' '],\n [' ',' ',' '],\n [' ',' ',' ']]\n#===========================================================================================\nclass Printer:\n def show(self,obj):\n\n print(\"|\",end='') \n for position in range(1,10):\n tmp = obj.getChar(position) #temp variable\n print(tmp,end = '')\n print(\"|\",end='') \n\n if(position%3 == 0 and position != 1):\n print(\"\\n-------\\n|\",end='')\n#===========================================================================================\nclass TextInput() :\n def getInput(self,obj):\n position = int(input(\"\\nwhich box you desire: \"))\n obj.setChar(position)\n \ngame = Board()\ngame.startgame()\n\n","sub_path":"ox_game.py","file_name":"ox_game.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"164596186","text":"\n\nimport tensorflow as tf\nimport sonnet as snt\n\ntf.flags.DEFINE_integer( \"num_training_steps\" , 10000 , '' )\n\ntf.flags.DEFINE_integer( \"report_interval\" , 100 , '' )\n\ntf.flags.DEFINE_integer( \"reduce_learning_interval\" , 200 ,'' )\n\ntf.flags.DEFINE_integer( \"lstm_depht\" , 2 ,'' )\ntf.flags.DEFINE_integer( \"lstm_units\" , 50 , '' )\n\ntf.flags.DEFINE_integer( \"lenght_features\" , 100 , '' )\n\ntf.flags.DEFINE_float( \"learning_rate\" , 0.001 , '' )\ntf.flags.DEFINE_float( \"learning_reduce_multi\" , 0.1 , '' )\ntf.flags.DEFINE_float( \"opti_epsilon\" , 0.001 , '' )\n\n\ntf.flags.DEFINE_string( \"checkpoint_dir\" , \"./train/dir\" , '' )\ntf.flags.DEFINE_integer( \"checkpoint_interval\" , 100 , '' )\n\n\n\nclass RnnInstacart(snt.AbstractModule ):\n\n def __init__( self , num_hidden , depth , output_size , use_skip_connections = True , use_dynamic = True ,name = \"rnn_instacart\" ):\n self.num_hidden = num_hidden \n self.depth = depth\n self.use_skip_connections = use_skip_connections\n self.use_dynamic = use_dynamic\n \n self._output_size = output_size \n super(RnnInstacart, self).__init__(name=name)\n print(\"wtf\")\n print(num_hidden)\n print(depth)\n with self._enter_variable_scope():\n # layer of lstm units\n self._output_module = snt.Linear( self._output_size , name = \"output\" )\n self._lstms = [ snt.LSTM( num_hidden , name=\"lstm_{}\".format(i) ) for i in range(depth ) ]\n\n self._core = snt.DeepRNN( self._lstms , skip_connections = self.use_skip_connections , name = \"deep_lstm\"\n )\n\n \n # create some layers here\n \n return \n\n \n \n def _build(self , inputs_sequence , prev_state ):\n\n # input_sequence [ LEN , batch_size , output_size ]\n input_shape = inputs_sequence.get_shape()\n \n batch_size = input_shape[0]\n \n #initial_state = self._core.initial_state( batch_size )\n print( \"controller shape \")\n print( inputs_sequence.shape )\n output_seq , final_state = self._core(inputs_sequence , prev_state )\n\n \n \"\"\"\n if self.use_dynamic :\n output_seq , final_state = tf.nn.dynamic_rnn(\n cell = self._core ,\n inputs = inputs_sequence ,\n time_major = True ,\n initial_state = initial_state \n )\n else :\n rnn_input_seq = tf.unstack( input_sequence )\n output , final_state = tf.contrib.rnn.static_rnn(\n cell = self._core ,\n inputs = inputs_rnn_input_seq ,\n initial_state = initial_state\n )\n output_seq = tf.stack( output )\n\n # return the output_seq, and final_sate\n \n batch_output_seq_m = snt.BatchApply(self._output_module )\n\n output_seq = batch_output_seq_m( output_seq )\n \"\"\"\n print(output_seq.shape )\n \n \n #output_seq = snt.Linear( self._output_size , name=\"output_linear\" )( output_seq )\n \n \n return output_seq , final_state\n \n \n def initial_state(self , batch_size , dtype ):\n\n return self._core.initial_state(batch_size , dtype )\n \n @property\n def state_size(self):\n return self._core.state_size\n\n @property\n def output_size(self):\n return self._core._output_size\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112226384","text":"from django.conf.urls import url\n\nfrom . import views\n\n\napp_name = 'performance'\nurlpatterns = [\n url(r'^create/$', views.PerformanceCreateView.as_view(), name='performance-create'),\n url(r'^list/$', views.PerformanceListView.as_view(), name='performance-list'),\n url(r'^(?P[0-9]+)/$', views.PerformanceView.as_view(), name='performance'),\n url(r'^measurement/list/$', views.MeasurementListView.as_view(), name='measurement-list'),\n url(r'^player/(?P[0-9]+)/latest/$', views.PerformancePlayerView.as_view(), name='performances-latest'),\n url(r'^benchmark/latest/$', views.BenchmarkListView.as_view(), name='benchmark-latest-list'),\n]\n","sub_path":"apps/performance/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417434146","text":"import fileinput\nimport glob\nimport os\n\n\ndef get_style(direct):\n lst_files = []\n for filename in glob.glob(os.path.join(direct, '*.css')):\n lst_files.append(filename)\n css_lines = []\n for line in fileinput.input(lst_files):\n css_lines.append(line.strip())\n return \"\\n\".join(css_lines)","sub_path":"math_mind/lib/add_css.py","file_name":"add_css.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571815120","text":"import requests\nimport re\nfrom optparse import OptionParser\nimport sys\nfrom appdirs import unicode\nfrom termcolor import colored\nif sys.version_info[0] >= 3:\n from urllib.parse import urljoin\nelse:\n import urlparse\nclass Crawler:\n def __init__(self):\n self.script_desc()\n self.target_links = []\n def arguman_al(self):\n parse = OptionParser(description=self.description,epilog=self.a,prog=self.program)\n parse.add_option(\"-u\", \"--url\", dest=\"url\", help=\"Hedef url\")\n (options, arguments) = parse.parse_args()\n if not options.url:\n parse.error(\"error parsing\")\n return options\n def get_links(self,url):\n try:\n if \"http://\" in url or \"https://\" in url:\n response=requests.get(url)\n return re.findall('(?:href=\")(.*?)\"', str(response.content))\n else:\n response=requests.get(\"http://\"+url)\n return re.findall('(?:href=\")(.*?)\"',str(response.content))\n except requests.exceptions.ConnectionError:\n pass\n except requests.exceptions.InvalidURL:\n pass\n except UnicodeError:\n pass\n\n\n def crawl(self,url):\n href_links=self.get_links(url)\n if href_links:\n for link in href_links:\n link=urljoin(url,link)\n if \"#\" in link:\n link=link.split(\"#\")[0]\n if options.url in link and link not in self.target_links:\n self.target_links.append(link)\n print(link)\n self.crawl(link)\n\n\n def script_desc(self):\n self.program=\"spider\"\n self.a=\"\"\n if sys.version_info[0] >= 3:\n self.description = \"\"\n else:\n self.description = unicode(\"\", \"utf8\")\n self.a = unicode(self.a,\"utf8\")\n\n def keyboardinterrupt_message(self):\n print(\"you pressed ctrl+c\")\ntry:\n crawl=Crawler()\n options=crawl.arguman_al()\n crawl.crawl(options.url)\nexcept KeyboardInterrupt:\n crawl.keyboardinterrupt_message()\n\n\n\n","sub_path":"link_crawler.py","file_name":"link_crawler.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368757922","text":"from django.conf.urls import *\n\nfrom . import views\n\nurlpatterns = patterns(\"\",\n # ex: /polls/\n url(r'^$', views.index, name='index'),\n # ex: /polls/5/\n url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n # ex: /polls/5/results/\n url(r'^(?P[0-9]+)/results/$', views.results, name='results'),\n # ex: /polls/5/vote/\n url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'),\n url(r'^jquery/$',views.jquery,name='jquery'),\n url(r'^search-form/$',views.search_form, name='search_form'),\n url(r'^search/$',views.search, name='search'),\n)","sub_path":"mysite/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455696515","text":"# -*- coding: utf-8 -*-\n#!/bin/env python\n\nimport sys\nfrom errors import E\n\ndef read_table(fname):\n\n f = open(fname)\n \n lines = [ l.strip() for l in f.readlines() ]\n \n categories = []\n yields = {}\n for line in lines:\n if \"Bin#\" in line:\n line = \"\".join([\"\t\"] + line.split()[3:])\n line = line.replace(\"|\", \"\t\t\t\")\n categories = line.split()\n for category in categories:\n yields[category] = []\n if \"Bin\" in line:\n line = \"\".join([\"\t\"] + line.split()[3:])\n line = line.replace(\"|\", \"\t\")\n line = line.replace(u\"\\u00B1\".encode(\"utf-8\"), \",\")\n for category, item in zip(categories, line.split()):\n val, err = item.split(\",\")\n yields[category].append(E(float(val), float(err)))\n\n return yields\n","sub_path":"read_table.py","file_name":"read_table.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"446118426","text":"#how to do recursion with for loop\n\ndef topla(liste):\n toplam =0\n\n for i in liste:\n toplam += i\n return toplam\n\nprint(topla([1, 2, 3, 4]))\n\n\n#recursion...\n\n# 1. return 1 + add([2, 3, 4, 5, 6])\n# 2. return 2 + add([3, 4, 5, 6])\n# 3. return 3 + add([4, 5, 6])\n# 4. return 4 + add([5, 6])\n# 5. return 5 + add([6])\n# 6. return 6 + add([])\n# 7. return 0\n\n#sonuç:\n# 1+ (2+ (3+ (4+ (5+ (6+ 0)))))\n\ndef add(liste):\n if (len(liste)) ==0:\n return 0\n else:\n return liste[0] + (add(liste[1:]))\n\nprint(add([1, 2, 3, 4, 5, 6]))\n\n","sub_path":"python/recursive_2.py","file_name":"recursive_2.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281520100","text":"#ling huchong, use bfs, find all subsets level by level\n#Your submission beats 99.60% Submissions!\n\nclass Solution:\n\n '''\n * @param nums: A set of numbers\n * @return: A list of lists\n '''\n def subsets(self, nums):\n results = []\n\n if nums == None:\n return results # empty list\n\n nums.sort()\n\n # BFS\n queue = [[]]\n\n while queue:\n subset = queue.pop(0)\n results.append(subset)\n\n for i in range(len(nums)):\n if (len(subset) == 0 or subset[-1] < nums[i]):\n nextSubset = subset.copy()\n nextSubset.append(nums[i])\n queue.append(nextSubset)\n return results\n\n","sub_path":"subsets/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411029470","text":"from flask import Flask, jsonify, request\r\napp = Flask(__name__)\r\nfrom flask_cors import CORS, cross_origin\r\ncors = CORS(app)\r\napp.config['CORS_HEADERS'] = 'Content-Type'\r\n\r\nimport numpy as np\r\nimport pickle\r\n\r\nfilename1 = 'MLModels/respiratory_infection.pkl'\r\nloaded_model1 = pickle.load(open(filename1, 'rb'))\r\nfilename2 = 'MLModels/artery_infection.pkl'\r\nloaded_model2 = pickle.load(open(filename2, 'rb'))\r\nfilename3 = 'MLModels/liver_infection.pkl'\r\nloaded_model3 = pickle.load(open(filename3, 'rb'))\r\nfilename4 = 'MLModels/malignancy_infection.pkl'\r\nloaded_model4 = pickle.load(open(filename4, 'rb'))\r\nfilename5 = 'MLModels/pulmonary_infection.pkl'\r\nloaded_model5 = pickle.load(open(filename5, 'rb'))\r\nfilename6 = 'MLModels/renal_infection.pkl'\r\nloaded_model6 = pickle.load(open(filename6, 'rb'))\r\nfilename7 = 'MLModels/stroke_infection.pkl'\r\nloaded_model7 = pickle.load(open(filename7, 'rb'))\r\n\r\n\r\n@app.route('/', methods = ['GET'])\r\n@cross_origin()\r\ndef check():\r\n return jsonify({'message':'It works!'})\r\n\r\n@app.route('/predict', methods = ['POST'])\r\n@cross_origin()\r\ndef predict():\r\n age=float(request.json[\"age\"])\r\n gender_num=float(request.json[\"gender_num\"])\r\n bmi=float(request.json[\"bmi\"])\r\n map=float(request.json[\"map\"])\r\n hr=float(request.json[\"hr\"])\r\n temp=float(request.json[\"temp\"])\r\n data_input=[age,gender_num,bmi,map,hr,temp]\r\n\r\n result1=loaded_model1.predict([data_input])[0]*100\r\n result2=loaded_model2.predict([data_input])[0]*100\r\n result3=loaded_model3.predict([data_input])[0]*100\r\n result4=loaded_model4.predict([data_input])[0]*100\r\n result5=loaded_model5.predict([data_input])[0]*100\r\n result6=loaded_model6.predict([data_input])[0]*100\r\n result7=loaded_model7.predict([data_input])[0]*100\r\n\r\n\r\n\r\n return jsonify({'resp':result1, 'artery':result2, 'liver':result3, 'malignancy':result4, 'pulmonary':result5, 'renal':result6, 'stroke':result7})\r\n \r\nif __name__=='__main__':\r\n app.run(debug=True, port=8080)","sub_path":"backend+ml/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144168540","text":"from django.conf.urls import url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\nfrom django.urls import path\n\nurlpatterns = [\n url(r'^$', views.apas_home, name='apas_home'),\n url(r'^upload/$', views.simple_upload, name='upload'),\n url(r'^formupload/$', views.model_form_upload, name='formupload'),\n url(r'^run_spark/', views.run_spark, name='run_spark'),\n url(r'^analysis_home/$', views.analysis_home, name='analysis_home'),\n url(r'^analysis_target/$', views.analysis_target, name='analysis_target'),\n url(r'^analysis/$', views.analysis, name='analysis'),\n url(r'^summary/', views.summary, name='summary'),\n url(r'^info/', views.info, name='info'),\n url(r'^login/', views.login, name='login'),\n url(r'^list/', views.portfolio_analysisLV.as_view(), name='list'),\n url(r'^jstest/(?P\\w+)/$',views.jstest),\n url(r'^json_cash/(?P\\w+)/(?P\\w+)/$', views.json_cash),\n url(r'^json_cash_quar/(?P\\w+)/(?P\\w+)/$', views.json_cash_quar),\n url(r'^json_cash_year/(?P\\w+)/(?P\\w+)/$', views.json_cash_year)\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"src/web-src/apas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"604634160","text":"\"\"\" \n Use a Decision Tree to identify emails from the Enron corpus by author: \n Sara has label 0\n Chris has label 1\n\"\"\"\nimport os\nimport sys\nfrom time import time\n\nsys.path.append(os.getcwd() + r\"/tools/\")\nfrom email_preprocess import preprocess\n\n\n### features_train and features_test are the features for the training\n### and testing datasets, respectively\n### labels_train and labels_test are the corresponding item labels\nfeatures_train, features_test, labels_train, labels_test = preprocess()\n\n# percentage of sample used in training the model\nsample_data_percentage = 1\n\n# minimum number of samples before the decision tree stops splitting\nmin_samples_split = 40\n\n# use sample percentage to determine the ranfe of the training samples\nfeatures_train = features_train[: int(sample_data_percentage * len(features_train))]\nlabels_train = labels_train[: int(sample_data_percentage * len(labels_train))]\n\nfrom sklearn.tree import DecisionTreeClassifier\n\n# create the classifier with the defined training criteria\nclassifier = DecisionTreeClassifier(\n criterion=\"entropy\", random_state=0, min_samples_split=min_samples_split\n)\n\nt = time()\n# train the classifier with the training data\nclassifier.fit(features_train, labels_train)\n\n# print accuracy of training sample\nprint(\n \"\\naccuracy of training dataset: \"\n + str(round(classifier.score(features_train, labels_train), 3))\n)\n# print training time\nprint(\"\\ntraining time:\" + str(round(time() - t, 3)) + \"s\")\n\n\nt = time()\n# obtain predictions using test data\npredictions = classifier.predict(features_test)\n\n# print accuracy of testing dataset\nprint(\n \"\\naccuracy of testing dataset: \"\n + str(round(classifier.score(features_test, labels_test), 3))\n)\n# print time taken to predict testing sample\nprint(\"\\ntesting time:\" + str(round(time() - t, 3)) + \"s\")\n","sub_path":"decision_tree/dt_author_id.py","file_name":"dt_author_id.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284620504","text":"n, m = map(int, input().split())\r\nl = [int(i) for i in input().split()]\r\nfd = {}\r\nod = {}\r\nc = 0\r\nfor i in l:\r\n\ttry:\r\n\t\tb = od[i]\r\n\texcept:\r\n\t\tod[i] = c\r\n\t\tc += 1\r\n\ttry:\r\n\t\tfd[i] += 1\r\n\texcept:\r\n\t\tfd[i] = 1\r\ns = [(-fd[i], od[i], i) for i in l]\r\nprint(*[i[2] for i in sorted(s, key=lambda x:(x[0], x[1], x[2]))])\r\n","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327650678","text":"import argparse\nimport logging\nimport sys\nimport time\n\nfrom tf_pose import common\nimport cv2\nimport numpy as np\nfrom tf_pose.estimator import TfPoseEstimator\nfrom tf_pose.networks import get_graph_path, model_wh\nfrom tf_pose.eval import write_coco_json\n\nlogger = logging.getLogger('TfPoseEstimator')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='tf-pose-estimation run')\n parser.add_argument('--image', type=str, default='./images/p1.jpg')\n parser.add_argument('--model', type=str, default='cmu', help='cmu / mobilenet_thin')\n\n parser.add_argument('--resize', type=str, default='0x0',\n help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')\n parser.add_argument('--resize-out-ratio', type=float, default=4.0,\n help='if provided, resize heatmaps before they are post-processed. default=1.0')\n\n args = parser.parse_args()\n\n w, h = model_wh(args.resize)\n if w == 0 or h == 0:\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))\n else:\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))\n\n # estimate human poses from a single image !\n image = common.read_imgfile(args.image, None, None)\n if image is None:\n logger.error('Image can not be read, path=%s' % args.image)\n sys.exit(-1)\n t = time.time()\n humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)\n elapsed = time.time() - t\n logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))\n image_h, image_w = image.shape[:2]\n result = []\n for human in humans:\n item = {\n 'keypoints': write_coco_json(human,image_w,image_h)\n }\n result.append(item)\n fp = open(\"Ta_pose.txt\", 'w')\n json.dump(result, fp)\n fp.close()","sub_path":"old/openpose_json_old.py","file_name":"openpose_json_old.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"402969782","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport os\nimport hashlib\nimport datetime\nimport json\nimport socket\n\nfrom django.conf import settings\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.views.generic import TemplateView, DetailView, ListView, CreateView, UpdateView\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.core.urlresolvers import reverse\nfrom .models import TempRange, ModData, System\nfrom .forms import TempRangeForm\n\n\ndef c2f(ctemp):\n if not ctemp:\n return None\n return 9.0/5.0 * float(ctemp) + 32\n\n\ndef get_interface_ip(ifname):\n import fcntl\n import struct\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s',\n ifname[:15]))[20:24])\n\n\nclass IndexView(TemplateView):\n template_name = 'index.html'\n\n def get_context_data(self, **kwargs):\n try:\n s = System.objects.last()\n if s.state in settings.STATE[1:]:\n self.template_name = 'panic.html'\n kwargs.update({\n 'sys_state': s.state,\n })\n except:\n pass\n\n try:\n q = ModData.objects.last()\n kwargs.update({\n 'temp': q.temp,\n 'relays': q.relay.split(';'),\n 'ifip': get_interface_ip('eth0'),\n })\n if int(float(q.temp)) >= 999 or '-1' in q.relay:\n kwargs.update({\n 'test': True,\n })\n except:\n pass\n return super(IndexView, self).get_context_data(**kwargs)\n\n\nclass SettingsView(TemplateView):\n template_name = 'settings.html'\n form = TempRangeForm\n\n @csrf_exempt\n def dispatch(self, *args, **kwargs):\n return super(SettingsView, self).dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n data['tmin'] = abs(int(request.POST.get('tmin')))\n data['tmax'] = abs(int(request.POST.get('tmax')))\n except:\n return HttpResponseRedirect(reverse('main-settings'))\n\n if data.get('tmin') >= 0 and data.get('tmax') > 0:\n try:\n q = TempRange.objects.last()\n q.tmin = data['tmin']\n q.tmax = data['tmax']\n except:\n q = TempRange(**data)\n finally:\n q.save()\n return HttpResponseRedirect(reverse('index'))\n return super(SettingsView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n try:\n q = TempRange.objects.last()\n kwargs.update({\n 'tmin': q.tmin,\n 'tmax': q.tmax,\n })\n except:\n pass\n\n kwargs.update({\n 'form': self.form,\n })\n return super(SettingsView, self).get_context_data(**kwargs)\n","sub_path":"sauna/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96886481","text":"#!/usr/bin/python\n# coding: UTF-8\n\n'''\npython busrt_detect_all.py\n'''\n\nimport collections\nimport pprint\nimport re\nimport sys\nimport time\nimport numpy as np\nimport pybursts\nimport math\nfrom concurrent import futures\nfrom itertools import chain\nimport pickle\nimport datetime\nimport subprocess\n\n'''\nレベルの重複削除\nBefore\n[[0 7079 65511]\n [1.0 54134 55689]\n [2.0 54134 55689]\n [3.0 55655 55689]\n [4.0 55655 55689]\n [5.0 55655 55689]\n [6.0 55655 55689]\n [7.0 55655 55689]\n [8.0 55655 55689]\n [9.0 55655 55689]\n [10.0 55655 55689]]\n\nAfter\n[[2.0 54134 55689]\n [10.0 55655 55689]]\n'''\n\nclass Node():\n def __init__(self, parent, st, en, lv, cnt, depth=0):\n self.parent = parent # 親\n self.st = st # データ\n self.en = en\n self.lv = lv\n self.cnt = cnt\n self.children = [] # 子\n self.depth = depth\n\n def add_node(self, added_node): # ノード追加\n self.children.append(added_node)\n added_node.parent = self\n added_node.depth = self.depth + 1\n\n def dens(self): # 1min間の発生件数を返す\n if self.en - self.st == 0:\n return 0\n else:\n return round(self.cnt / (self.en - self.st) * 60, 2)\n\n def value(self):\n return [self.lv, self.st, self.en, self.cnt, self.dens()]\n\n\n# p_numプロセスでバースト検知。time_listsをデータ数が多い順にp_nu個に分配して渡す。\ndef m_burst_detect(time_lists, p_num):\n if p_num > len(time_lists):\n p_num = len(time_lists)\n\n row_lists = sorted(time_lists.items(),\n key=lambda x: len(x[1]),\n reverse=True)\n\n arg_lists = []\n for i in range(p_num):\n arg_lists.append({k: v for e, (k, v) in enumerate(row_lists)\n if e % p_num == i})\n\n pool = futures.ProcessPoolExecutor(max_workers=p_num)\n return(list(chain.from_iterable(pool.map(burst_detect, arg_lists))))\n\n\ndef burst_detect(time_lists):\n burst_result = []\n for ind, v in time_lists.items():\n time_list = list(v) # 参照渡しではなくコピー\n if len(time_list) > 30: # 量でフィルタ\n\n # 最初と最後が0と86400じゃなかったら臨時で追加\n # if time_list[-1] < 86400:\n # time_list.append(86400)\n # if time_list[0] != 0:\n # time_list.insert(0, 0)\n\n # バースト検知\n burst_list = pybursts.kleinberg(sorted(set(time_list)),\n s=2, gamma=1.0)\n\n # ここで重複レベルを削除\n for j in range(len(burst_list)-1):\n if not any([x-y for x, y in zip(burst_list[j][1:],\n burst_list[j+1][1:])]): # 始点と終点が一緒だったら\n burst_list[j] = [0, 0, 0]\n burst_list = np.delete(burst_list, np.where(burst_list == 0)[0], 0)\n\n # ここでintervalが1min超える場合は削除\n # burst_list = check_interval(burst_list, time_list)\n\n # バーストツリー生成開始\n root_node = Node(None, 0, 0, 0, 0) # ルートノード\n for lv, st, en in burst_list:\n # 初期化\n parent_node = root_node\n isadded = 0\n burst_cnt = len([z for z in time_list if st <= z <= en])\n new_node = Node(None, st, en, lv, burst_cnt)\n\n while isadded == 0:\n for child_node in parent_node.children: # 子供を順次比較していく\n if child_node.st <= new_node.st \\\n and child_node.en >= new_node.en: # 包含関係チェック\n # 包含関係にあり、比較対象の子供がいない時は\n # そのまま追加して終わり\n if child_node.children == []:\n child_node.add_node(new_node)\n isadded = 1\n break\n else:\n # 包含関係にあり、比較対象の子供がいる場合は\n # 親交代して比較\n parent_node = child_node\n break\n else: # 包含関係になかったら、次の子供と比較\n pass\n else: # どの子供とも包含関係になかったら追加して終わり\n parent_node.add_node(new_node)\n isadded = 1\n # バーストツリー生成終了, root_node以下に格納。\n\n # バーストツリー表示\n # print(ind, 'result')\n # show_burst_tree(root_node)\n\n #バーストツリー走査\n # parent_node = root_node\n # result_node = []\n # while True:\n # for cur_node in parent_node.children:\n # if cur_node.children == [] :\n # result_node.append(cur_node)\n #\n # # cur_nodeの密度がどの子供の密度より2倍以上ある時\n # elif any(cur_node.dens > x.dens * 2\n # for x in cur_node.children) :\n # result_node.append(cur_node)\n # else : #半分以下の密度でない子供がいる時\n\n # 暫定listが残っていたらresultに追加\n if len(burst_list) != 0:\n # 第一層の子供の結果を全部入れる\n burst_result.append((ind,\n [z.value() for z in root_node.children]))\n return burst_result\n\n\n# バーストツリー表示\ndef show_burst_tree(parent_node):\n for i in range(parent_node.depth):\n print('\\t', end='')\n print('[',\n parent_node.lv,\n parent_node.st,\n parent_node.en,\n parent_node.cnt,\n parent_node.dens(),\n ']')\n for child in parent_node.children:\n show_burst_tree(child)\n\n\n# 1groupのtime listを受ける。\ndef check_interval(burst_range, group_time_list):\n if burst_range == []:\n return burst_range\n burst_range_result = []\n sub_list = []\n\n for lv, s, e in burst_range:\n sub_list = [y - x for x, y\n in zip(group_time_list[:-1],\n group_time_list[1:])\n if s <= x <= e and s <= y <= e]\n if max(sub_list) <= 60 * 2: # 最大インターバルが2分以内であること\n sub_list_count = collections.Counter(sub_list)\n over_1min_interval_rate = sum([x for k, x in sub_list_count.items()\n if k > 60]) / len(sub_list)\n if over_1min_interval_rate < 0.5:\n burst_range_result.append([lv, s, e])\n sub_list = []\n else:\n print('interval check hit', lv, s, e)\n\n return burst_range_result\n\ndef get_dumpname(day):\n evs = subprocess.check_output(['ls','dumps/{0}'.format(day)]).decode('utf-8')[:-1].split(\"\\n\")\n return evs\n\nif __name__ == '__main__':\n\n days = subprocess.check_output(['ls','dumps']).decode('utf-8')[:-1].split('\\n')\n\n for day in days:\n print(day)\n for DUMP_NAME in get_dumpname(day):\n with open('dumps/'+day+'/'+DUMP_NAME, \"rb\") as f:\n obj = pickle.load(f, encoding=\"bytes\")\n\n if len(obj) == 0:\n print(day,DUMP_NAME,'\\tno data')\n continue\n\n dt_day = datetime.datetime.strptime(day,\"%Y%m%d\")\n time_list = sorted([x.hour*3600 + x.minute*60 + x.second for x in obj if x.date() == dt_day.date()])\n\n cur_t = -1\n for i, t in enumerate(time_list):\n if cur_t == t:\n time_list[i] = round(time_list[i-1]+0.01, 3)\n else:\n cur_t = t\n\n time_lists = {day:time_list}\n\n burst_result = m_burst_detect(time_lists, 4)\n\n with open('burst_result/'+day+'/'+day+'_'+DUMP_NAME,'wb') as g:\n if burst_result != []:\n pickle.dump((DUMP_NAME,burst_result[0][0],burst_result[0][1]), g)\n else:\n pickle.dump((DUMP_NAME,day),g)\n","sub_path":"burst_detect_all.py","file_name":"burst_detect_all.py","file_ext":"py","file_size_in_byte":8480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111750430","text":"from manimlib.imports import *\n\nclass Chapter4(Scene): #解法二的动画\n def construct(self):\n circle = Circle().scale(2).set_color(BLUE_D)\n triangle = Polygon(\n circle.points[16],\n circle.points[5],\n circle.points[26]\n ).scale(0.96).shift(LEFT * 0.02).set_color(GREEN_D)\n triangle.set_fill(color = GREEN_D , opacity = 0.2)\n\n diameter = Line(circle.points[16] , circle.points[31] , color = RED)\n\n dashedline1 = DashedLine(np.array([0,2,0]) , np.array([0,-3,0]) , color = RED).move_to(circle.points[5]).shift(LEFT * 0.08 +\n DOWN * 1.8)\n dashedline2 = DashedLine(np.array([0,2,0]) , np.array([0,-3,0]) , color = RED).move_to(circle.points[10]).shift(RIGHT * 0.05+ DOWN * 1.8)\n\n dot1 = Dot(circle.points[0])\n dot2 = Dot(circle.points[0]).shift(LEFT * 1.04)\n dot3 = Dot(circle.points[15])\n dot4 = Dot(circle.points[15]).shift(RIGHT * 1)\n dot5 = Dot(circle.points[15]).shift(RIGHT * 2)\n\n letter = [TexMobject(e).set_color(ORANGE) for e in [\"A\",\"B\",\"O\",\"C\",\"D\"]]\n letter[0].next_to(dot3 , UP , buff = 0.05).shift(LEFT*0.3 + DOWN*0.2)\n letter[1].next_to(dot4, UP, buff=0.05).shift(RIGHT*0.2).scale(0.8)\n letter[2].next_to(dot5, UP, buff=0.05).scale(0.8)\n letter[3].next_to(dot2, UP, buff=0.05).shift(LEFT*0.2).scale(0.8)\n letter[4].next_to(dot1, UP, buff= 0.05).shift(RIGHT*0.3 + DOWN*0.2)\n\n\n lineA = Line(circle.points[6] , circle.points[25]).shift(LEFT * 0.25)\n lineB = Line(circle.points[8] , circle.points[23])\n\n lineC = Line(circle.points[14] , circle.points[17]).shift(RIGHT * 0.1)\n lineD = Line(circle.points[12], circle.points[20]).shift(RIGHT * 0.01)\n\n lineE = Line(circle.points[4], circle.points[27])\n\n linegroup = VGroup(lineA,lineB,lineC,lineD,lineE)\n\n\n diameter_middle = Line(dot4 , dot2)\n brace_middle = Brace(diameter_middle , DOWN).shift(DOWN * 2)\n\n diameter_left = Line(dot3 , dot4)\n brace_left = Brace(diameter_left , DOWN).shift(DOWN * 2)\n\n diameter_right = Line(dot2 , dot1)\n brace_right = Brace(diameter_right , DOWN).shift(DOWN * 2)\n\n text1 = TextMobject(\"弦长\",\" > \",\" L\").next_to(brace_middle , DOWN , buff = 0.1).scale(0.7)\n text1[1].set_color(RED)\n\n text2 = TextMobject(\"弦长\",\" < \",\" L\").next_to(brace_left , DOWN , buff = 0.1).scale(0.7).shift(LEFT * 0.4)\n text2[1].set_color(RED)\n\n text3 = TextMobject(\"弦长\",\" < \",\" L\").next_to(brace_right , DOWN , buff = 0.1).scale(0.7).shift(RIGHT * 0.4)\n text3[1].set_color(RED)\n\n self.play(ShowCreation(circle) , ShowCreation( triangle))\n self.wait(2)\n\n self.play(ShowCreation(diameter),*[ShowCreation(e) for e in [dot1 , dot2 , dot3 ,dot4 ,dot5]])\n self.wait(2)\n\n self.play(*[ShowCreation(i) for i in letter[0:5]])\n self.wait(2)\n\n self.play(ShowCreation(dashedline1),ShowCreation(dashedline2))\n self.wait(2)\n\n\n self.play(ShowCreation(linegroup))\n self.wait(2)\n\n\n\n self.play(WiggleOutThenIn(lineA),WiggleOutThenIn(lineB))\n self.wait(2)\n\n self.play(ShowCreation(brace_middle) , ShowCreation(text1))\n self.wait(2)\n\n\n self.play(WiggleOutThenIn(lineC),WiggleOutThenIn(lineD),WiggleOutThenIn(lineE))\n self.wait(2)\n\n\n self.play(ShowCreation(brace_left),ShowCreation(text2), ShowCreation(text3),ShowCreation(brace_right))\n self.wait(2)\n","sub_path":"Bertrand3.py","file_name":"Bertrand3.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"251218316","text":"\"\"\"\nIterative Solution\n\nO(n+m) time complexity\nO(1) space complexity\n\nKey Properties:\n - s is a subsequence of t\n - every single character in s must be found from t\n\"\"\"\n\nclass Solution(object):\n def isSubsequence(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n if not s:\n return True\n \n if not t:\n return False\n \n j = 0\n for i in xrange(len(s)):\n c = s[i] # current target character\n \n while j < len(t) and t[j] != c: # move index until same character is found from t\n j += 1\n \n if j == len(t):\n return False\n \n j += 1 # move index to the next character in t\n \n return True","sub_path":"medium/392_is_subsequence.py","file_name":"392_is_subsequence.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"581643180","text":"# -*- coding:utf-8 :-*-\n\nimport os\nimport sys\nimport numpy as np\n\ndef get_positive_y_max(b_bar, y):\n assert(len(b_bar) == len(y))\n\n j_min_ = None\n theta = float('inf')\n \n for i, (b_, y_) in enumerate(zip(b_bar, y)):\n if y_ <= 0.0:\n continue\n tmp = b_/y_\n if theta > tmp:\n theta = tmp\n j_min_ = i\n return j_min_, theta\n\n\n\nif __name__ == '__main__':\n A = np.asarray([[3.0, 2.0, 1.0, 0.0],\n [1.0, 2.0, 0.0, 1.0]])\n b = np.asarray([12.0, 8.0])\n c = np.asarray([-1.0, -1.0, 0.0, 0.0]) # need 0.0\n \n # initial index (they must be feasible...)\n B_index = np.asarray([2, 3])\n N_index = np.asarray([0, 1])\n\n # initial feasible solution\n B = A[:, B_index]\n B_inverse = np.linalg.inv(B)\n b_bar = np.dot(B_inverse, b)\n x = np.zeros(len(c), dtype = float)\n for x_, idx in zip(b_bar, B_index):\n x[idx] = x_\n\n # simplex loop\n while(True):\n c_B = c[B_index]\n c_N = c[N_index]\n B = A[:, B_index]\n N = A[:, N_index]\n B_inverse = np.linalg.inv(B)\n\n # pricing\n tmp = np.dot(B_inverse.transpose(), c_B)\n rho = c_N - np.dot(N.transpose(), tmp)\n k_ = np.argmin(rho)\n k = N_index[k_]\n\n if rho[k_] >= 0.0:\n break\n \n # ratio test\n b_bar = np.dot(B_inverse, b)\n y = np.dot(B_inverse, A[:, k])\n j_min_, theta = get_positive_y_max(b_bar, y)\n j_min = B_index[j_min_]\n \n # pivot\n x[k] = theta\n for tmp, idx in zip(y, B_index):\n x[idx] -= tmp * theta\n B_index = np.insert(B_index[B_index != j_min], 0, k)\n N_index = np.insert(N_index[N_index != k], 0, j_min) \n \n print(x)\n print('obj = {}'.format(np.dot(x, c)))\n","sub_path":"sample_program/sample_simplex.py","file_name":"sample_simplex.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"431202889","text":"import time\n\nimport tensorflow as tf\nimport numpy as np\nfrom keras.layers import Input, LSTM, Dense, Dropout\nfrom keras.models import Model\nfrom sklearn.model_selection import train_test_split\nimport scipy as sc\n\n\nclass Model:\n EMBEDDING_DIMENSION = 300\n TEST_SIZE = 0.1\n\n def __init__(self, latent_dim, kc_dimension):\n self.learning_rate = 0.001\n self.dropout_rate = 0.3\n self.loss_function = 'categorical_crossentropy'\n self.latent_dim = latent_dim\n self.kc_dimension = kc_dimension\n self.encoder_inputs = Input(shape=(None, self.EMBEDDING_DIMENSION), dtype=tf.float32)\n self.decoder_inputs = Input(shape=(None, self.kc_dimension))\n self.model = None\n self.encoder_states = []\n self.encoder_model = None\n self.decoder_model = None\n\n def create_model(self):\n encoder_lstm = LSTM(self.latent_dim, return_state=True)\n encoder_output, state_h, state_c = encoder_lstm(self.encoder_inputs)\n self.encoder_states = [state_h, state_c]\n\n decoder_lstm = LSTM(self.latent_dim, return_sequences=True, return_state=True)\n decoder_outputs, _, _ = decoder_lstm(self.decoder_inputs, initial_state=self.encoder_states)\n\n dropout_layer = Dropout(self.dropout_rate)\n decoder_outputs = dropout_layer(decoder_outputs)\n\n decoder_dense = Dense(self.kc_dimension, activation='softmax')\n decoder_outputs = decoder_dense(decoder_outputs)\n\n self.model = Model([self.encoder_inputs, self.decoder_inputs], decoder_outputs)\n optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)\n\n self.model.compile(optimizer=optimizer, loss=self.loss_function, metrics=['acc', 'mse'])\n\n def set_embedding_dimension(self, dimension):\n self.EMBEDDING_DIMENSION = dimension\n\n def train_model(self, input_sequence, output_sequence, max_length_src, max_length_tar, num_epochs=30,\n batch_size=100):\n x_train, x_test, y_train, y_test = train_test_split(input_sequence, output_sequence, test_size=0.1)\n\n num_train_samples = len(x_train)\n num_val_samples = len(x_test)\n\n print(\"************************** Model Train Initiated ****************************\")\n print(\"Parameters:\")\n print(\"Number of Train instances = \", num_train_samples)\n print(\"Number of Validation instances = \", num_val_samples)\n print(\"Batch Size = \", batch_size)\n print(\"Number of Epochs = \", num_epochs)\n print(\"Learning Rate = \", self.learning_rate)\n print(\"Dropout Rate = \", self.dropout_rate)\n print(\"Latent Dimension = \", self.latent_dim)\n print(\"*****************************************************************************\")\n\n history = self.model.fit_generator(\n generator=self.generate_batch(x_train, y_train, batch_size=batch_size,\n input_dimension=self.EMBEDDING_DIMENSION,\n output_dimension=self.kc_dimension, max_length_src=max_length_src,\n max_length_tar=max_length_tar),\n steps_per_epoch=num_train_samples // batch_size, epochs=num_epochs,\n validation_data=self.generate_batch(x_test, y_test, batch_size=batch_size,\n input_dimension=self.EMBEDDING_DIMENSION,\n output_dimension=self.kc_dimension, max_length_src=max_length_src,\n max_length_tar=max_length_tar),\n validation_steps=num_val_samples // batch_size, verbose=1)\n\n return history\n\n def generate_batch(self, x, y, batch_size, input_dimension, output_dimension, max_length_src, max_length_tar):\n while True:\n for j in range(0, len(x), batch_size):\n encoder_input_data = np.zeros((batch_size, max_length_src, input_dimension), dtype='float32')\n decoder_input_data = np.zeros((batch_size, max_length_tar, output_dimension), dtype='float32')\n decoder_target_data = np.zeros((batch_size, max_length_tar, output_dimension), dtype='float32')\n\n masking_layer = tf.keras.layers.Masking(mask_value=np.zeros(self.kc_dimension))\n\n for i, (input_seq, target_seq) in enumerate(zip(x[j:j + batch_size], y[j:j + batch_size])):\n for t, input_vec in enumerate(input_seq):\n encoder_input_data[i, t] = input_vec\n for t, output_vec in enumerate(target_seq):\n if t < len(target_seq) - 1:\n decoder_input_data[i, t] = output_vec\n if t > 0:\n decoder_target_data[i, t - 1] = output_vec\n masked_decoder_target_data = masking_layer(decoder_target_data)\n masked_decoder_input_data = masking_layer(decoder_input_data)\n yield [encoder_input_data, masked_decoder_input_data], masked_decoder_target_data\n\n def setup_inference_model(self):\n self.encoder_model = Model(self.encoder_inputs, self.encoder_states)\n decoder_state_input_h = Input(shape=(self.latent_dim,))\n decoder_state_input_c = Input(shape=(self.latent_dim,))\n decoder_state_inputs = [decoder_state_input_h, decoder_state_input_c]\n\n decoder_lstm = LSTM(self.latent_dim, return_sequences=True, return_state=True)\n decoder_outputs2, state_h2, state_c2 = decoder_lstm(self.decoder_inputs, initial_state=decoder_state_inputs)\n decoder_states2 = [state_h2, state_c2]\n\n decoder_dense = Dense(self.kc_dimension, activation='softmax')\n decoder_outputs2 = decoder_dense(decoder_outputs2)\n\n self.decoder_model = Model([self.decoder_inputs] + decoder_state_inputs, [decoder_outputs2] + decoder_states2)\n\n def evaluate_model(self, x, y, max_length_src, max_length_tar, kcOneHotEncoder):\n train_gen = self.generate_batch(x, y, batch_size=1, input_dimension=self.EMBEDDING_DIMENSION,\n output_dimension=self.kc_dimension, max_length_src=max_length_src,\n max_length_tar=max_length_tar)\n similarity_list = []\n num_correct = 0\n num_incorrect = 0\n start_time = time.time()\n for k in range(len(x)):\n (input_seq, actual_output), _ = next(train_gen)\n\n actual_output_words = []\n for ind in range(1, len(actual_output[0]) - 1):\n vector_np = actual_output[0][ind].numpy()\n is_zero_vector = (vector_np == np.zeros(self.kc_dimension)).all()\n is_one_vector = (vector_np == np.ones(self.kc_dimension)).all()\n\n if not is_zero_vector and not is_one_vector:\n actual_output_words.append(kcOneHotEncoder.inverse_transform([vector_np])[0][0])\n # print(\"Actual Words -> \", actual_output_words)\n decoded_sen = self.decode_sequence(input_seq, kcOneHotEncoder)\n decoded_word_sentence = kcOneHotEncoder.inverse_transform(decoded_sen).reshape(-1)\n # print(\"Predicted Sequence -> \", decoded_word_sentence)\n\n res = len(set(actual_output_words) & set(decoded_word_sentence)) / float(\n len(set(actual_output_words) | set(decoded_word_sentence)))\n if res == 1:\n num_correct += 1\n else:\n num_incorrect += 1\n similarity_list.append(res)\n\n end_time = time.time()\n np_similarity_array = np.array(similarity_list)\n print(\"Latent Dimension = \", self.latent_dim)\n print(\"Total Correct = \", num_correct)\n print(\"Total Incorrect = \", num_incorrect)\n print(\"Similarity Mean on Training Data = \", np_similarity_array.mean())\n print(\"Total Time taken for evaluation = \", end_time - start_time, \" secs\")\n\n def decode_sequence(self, input_seq, kcOneHotEncoder):\n start_token = np.ones(self.kc_dimension)\n end_token = np.ones(self.kc_dimension)\n\n state_values = self.encoder_model.predict(input_seq)\n target_seq = np.zeros((1, 1, self.kc_dimension))\n target_seq[0, 0] = start_token\n\n stop_condition = False\n decoded_sentence = []\n\n while not stop_condition:\n output_tokens, h, c = self.decoder_model.predict([target_seq] + state_values)\n decoded_word = kcOneHotEncoder.inverse_transform([output_tokens[0][0]])\n\n if (cosine_similarity(output_tokens[0][0], end_token)) < 0.1 or len(decoded_sentence) > 100:\n stop_condition = True\n else:\n decoded_sentence.append(output_tokens[0][0])\n\n target_seq = np.zeros((1, 1, self.kc_dimension))\n corrected_one_hot = kcOneHotEncoder.transform(decoded_word)[0]\n target_seq[0][0] = corrected_one_hot\n state_values = [h, c]\n return decoded_sentence\n\n\ndef cosine_similarity(wv1, wv2):\n similarity = sc.spatial.distance.cosine(wv1, wv2)\n return similarity\n\n\nif __name__ == \"__main__\":\n myModel = Model(200, 933)\n myModel.create_model()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"379776472","text":"# -*- coding:utf-8 -*-\r\n# author:yufeixu\r\n# datetime:2020/6/2 17:37\r\n# software: PyCharm\r\n\"\"\"\r\n路径总和 II\r\n给定一个二叉树和一个目标和,找到所有从根节点到叶子节点路径总和等于给定目标和的路径。\r\n\r\n说明: 叶子节点是指没有子节点的节点。\r\n\r\n示例:\r\n给定如下二叉树,以及目标和 sum = 22,\r\n\r\n 5\r\n / \\\r\n 4 8\r\n / / \\\r\n 11 13 4\r\n / \\ / \\\r\n 7 2 5 1\r\n\r\n返回:\r\n\r\n[\r\n [5,4,11,2],\r\n [5,8,4,5]\r\n]\r\n\"\"\"\r\n\r\n\"\"\"\r\n 涉及算法:回溯\r\n\"\"\"\r\nfrom base.tree_node import TreeNode\r\nfrom typing import List\r\n\r\n\r\nclass Solution:\r\n def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:\r\n def back_track(root, sum, path):\r\n # 出口条件\r\n if not root:\r\n return\r\n path.append(root.val)\r\n sum -= root.val\r\n if not root.left and not root.right and sum == 0:\r\n res.append(path[:])\r\n back_track(root.left, sum, path)\r\n back_track(root.right, sum, path)\r\n # 回溯\r\n path.pop()\r\n\r\n res = []\r\n back_track(root, sum, [])\r\n return res\r\n","sub_path":"src/leetcode/binarytree/pathsum/no113_pathSum.py","file_name":"no113_pathSum.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451246739","text":"from __future__ import with_statement\nfrom fabric.api import sudo, cd, run, settings, require, env, put, local, prefix, task\nfrom fabric.contrib.files import exists\n\nenv.hosts = ['0.0.0.0']\nenv.user = ''\nenv.django_app = 'webapp'\n# tasks\n@task\ndef new():\n env.process_name = 'distributed_jmeter'\n env.celery_process_name = 'celery-worker1'\n env.user = ''\n env.project_name = 'webapp'\n env.postfix=''\n env.path = '/home/%(user)s/%(project_name)s' % env\n env.virtualhost_path = '%s%s' % (env.path, '/env')\n\n@task\ndef deploy(install=False, migrate_db=False, branch=None):\n \"\"\"\n Deploy the latest version (from env.project_branch or specified branch) of the site\n to the servers, install any required third party modules,\n install the virtual host (if forced with 'install=True', collect the static files,\n symlink the media files and then restart the webserver.\n \"\"\"\n import time\n with settings(release = time.strftime('%Y%m%d%H%M%S')):\n upload_tar_from_git(branch)\n install_requirements()\n symlink_current_release()\n sudo('touch %(path)s/releases/current/scraper_app.log' % env)\n sudo('chmod 777 %(path)s/releases/current/scraper_app.log' % env)\n\n with cd('%(path)s/releases/current/%(project_name)s/conf/' % env):\n run('cp config.ini.production config.ini')\n\n if install:\n install_site()\n migrate(install)\n if migrate_db:\n migrate(True)\n #collect_static()\n graceful_restart()\n if install:\n restart_webserver()\n\n@task\ndef rollback():\n \"\"\"\n Limited rollback capability. Simple loads the previously current\n version of the code. Rolling back again will swap between the two.\n \"\"\"\n require('path')\n with cd('%(path)s/releases' % env):\n run('mv current _previous;')\n run('mv previous current;')\n run('mv _previous previous;')\n collect_static()\n #symlink_media()\n restart_webserver()\n\n\n# Helpers. These are called by other functions rather than directly\ndef upload_tar_from_git(branch=None):\n \"Create an archive from the specified Git branch and upload it\"\n require('release')\n require('path')\n require('project_name')\n\n with settings(archive_name = '%(release)s.tar.gz' % env):\n local('/usr/bin/tar czf ../%(archive_name)s .' % env)\n\n run('mkdir -p %(path)s/releases/%(release)s/%(project_name)s' % env)\n put(('../%(archive_name)s' % env), ('%(path)s/packages/' % env))\n with cd('%(path)s/releases/%(release)s/%(project_name)s' % env):\n run('tar zxf ../../../packages/%(archive_name)s' % env)\n with cd('%(path)s/releases/%(release)s/%(project_name)s/%(django_app)s' % env):\n run('cp local_settings.py.production%(postfix)s local_settings.py' % env)\n local('/bin/rm ../%(archive_name)s' % env)\n\ndef install_site():\n \"Add the virtualhost file to nginx\"\n require('release')\n require('project_name')\n with cd('%(path)s/releases/%(release)s/%(project_name)s' % env):\n sudo('cp conf/nginx.conf%(postfix)s /etc/nginx/sites-available/%(project_name)s' % env)\n #sudo('cp conf/nginx.static.conf /etc/nginx/sites-available/%(project_name)s.static' % env)\n if not exists('/etc/nginx/sites-enabled/%(project_name)s' % env):\n with cd('/etc/nginx/sites-enabled'):\n sudo('ln -s ../sites-available/%(project_name)s ./' % env)\n #sudo('ln -s ../sites-available/%(user)s.static ./' % env)\n with cd('%(path)s/releases/%(release)s/%(project_name)s/conf' % env):\n sudo('cp supervisor.conf%(postfix)s /etc/supervisor/conf.d/%(project_name)s.conf' % env)\n sudo('cp celeryd.conf%(postfix)s /etc/supervisor/conf.d/%(project_name)s.celeryd.conf' % env)\n\n\ndef install_requirements():\n \"Install the required packages from the requirements file using pip\"\n require('release')\n require('project_name')\n with cd('%(virtualhost_path)s' % env):\n with prefix('source %(virtualhost_path)s/bin/activate' % env):\n run('%(virtualhost_path)s/bin/pip install -r %(path)s/releases/%(release)s/%(project_name)s/requirements.txt' % env)\n\ndef symlink_current_release():\n \"Symlink our current release\"\n require('release')\n with cd('%(path)s/releases' % env):\n if exists('previous'):\n run('rm previous')\n if exists('current'):\n run('mv current previous')\n run('ln -s %(release)s current' % env)\n\ndef collect_static():\n \"Collect static files in its folder\"\n require('project_name')\n with cd('%(path)s/releases/current/%(project_name)s' % env):\n with prefix('source %(virtualhost_path)s/bin/activate' % env):\n run('%(virtualhost_path)s/bin/python manage.py collectstatic' % env)\n\n@task\ndef migrate(install=False):\n \"Update the database\"\n require('project_name')\n with cd('%(path)s/releases/current/%(project_name)s' % env):\n with prefix('source %(virtualhost_path)s/bin/activate' % env):\n if install:\n run('%(virtualhost_path)s/bin/python manage.py syncdb' % env)\n #run('%(virtualhost_path)s/bin/python manage.py migrate --fake' % env)\n else:\n run('%(virtualhost_path)s/bin/python manage.py syncdb --all' % env)\n run('%(virtualhost_path)s/bin/python manage.py migrate' % env)\n\n@task\ndef restart_webserver():\n \"Restart the web server\"\n sudo('/etc/init.d/nginx reload')\n\n@task\ndef graceful_restart():\n \"Restart the gunicorn processes\"\n sudo('supervisorctl restart %(process_name)s' % env)\n sudo('supervisorctl restart %(celery_process_name)s' % env)\n","sub_path":"distributed-jmeter/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":5697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"297687502","text":"import trajoptpy.math_utils as mu\nimport numpy as np\n\n\ndef traj_collisions(traj, robot, n=100):\n \"\"\"\n Returns the set of collisions.\n manip = Manipulator or list of indices\n \"\"\"\n traj_up = mu.interp2d(np.linspace(0, 1, n),\n np.linspace(0, 1, len(traj)), traj)\n env = robot.GetEnv()\n col_times = []\n\n with robot:\n for (i, row) in enumerate(traj_up):\n robot.SetActiveDOFValues(row)\n col_env = env.CheckCollision(robot)\n col_self = robot.CheckSelfCollision()\n if col_env or col_self:\n col_times.append(i)\n return col_times\n\n\ndef traj_is_safe(traj, robot, n=100):\n return len(traj_collisions(traj, robot, n)) == 0\n","sub_path":"trajoptpy/check_traj.py","file_name":"check_traj.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385274452","text":"import tkinter\n\nwin=tkinter.Tk()\nwin.title(\"Listbox1\")\nwin.geometry(\"400x400+200+50\")\n\n# 列表框控件,可以包含一个或多个文本框\nlb=tkinter.Listbox(win,selectmode=tkinter.BROWSE)\nlb.pack()\n\nfor item in [\"good\",\"nice\",\"handsome\",\"aaa\",\"bbb\",\"ccc\",\"ddd\"]:\n # 按顺序添加\n lb.insert(tkinter.END,item)\n\n# 在开始添加\nlb.insert(tkinter.ACTIVE,\"cool\")\n\n# 返回当前的索引项,不是item元素\n# print(lb.curselection())\n\n#获取列表中的元素个数\nprint(lb.size())\n\nwin.mainloop()\n","sub_path":"Test/Tkinter_test/Listbox1.py","file_name":"Listbox1.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"35099578","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'account'\nurlpatterns = [\n\n path('singup/', views.UserCreate.as_view(), name='registration'),\n path('login/', views.UserLogin.as_view(), name='login'),\n path('user_show/', views.UserShow.as_view(), name='user-show'),\n path('save_signals/', views.SaveSignalsShow.as_view(), name='save-signals'),\n path('profile//', views.ChangeProfile.as_view(), name='my-profile'),\n\n]\n","sub_path":"src/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353848233","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom scrapy import Spider, Request, Selector\nfrom ..utils import query_params, gen_request_uuid\nimport json, time, scrapy, logging\n\n\nclass HKETLList(Spider):\n \"\"\"\n 香港etl列表: 'http://hk.morningstar.com/ap/etf/Explore.aspx?Type=4'\n \"\"\"\n name = \"hk-etf-list\"\n ISOTIMEFORMAT = '%Y-%m-%d %X'\n allowed_domains = [\"hk.morningstar.com\", \"gllt.morningstar.com\"]\n start_urls = [\n 'https://gllt.morningstar.com/roq2rh8blz/etfquickrank/default.aspx?Universe=ETALL%24%24ALL&tab=Performance&sortby=ReturnM0&LanguageId=en-GB']\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(url, meta={'page': 1})\n\n def parse(self, response):\n selector = Selector(response)\n\n xp = '//*[@id=\"ctl00_ContentPlaceHolder1_aFundQuickrankControl_gridResult\"]/tr[contains(@class,\"gridItem\")]'\n items_tr = selector.xpath(xp)\n request_id = gen_request_uuid()\n results = []\n for item in items_tr:\n name = item.xpath('td[2]/a/text()').extract_first()\n detail_url = item.xpath('td[2]/a/@href').extract_first()\n detail_url = 'https://gllt.morningstar.com/roq2rh8blz' + detail_url[2:]\n on_click = item.xpath('td[1]/input/@onclick').extract_first()\n prop_start = on_click.index('{')\n prop_end = on_click.index('}') + 1\n prop_json_str = on_click[prop_start:prop_end]\n prop = json.loads(prop_json_str.replace(\"\\\\\", \"\"))\n\n param_prop = query_params(detail_url)\n\n item_result = {\n 'request_id': request_id,\n 'fund_name': name,\n 'fund_detail_url': detail_url,\n 'isin': prop['isin'],\n 'pid': prop['performanceid'],\n 'symbol': prop['symbol'],\n 'security_token': prop['securitytoken'],\n 'ms_fund_id': prop['fundid'],\n 'currency_id': param_prop['CurrencyId'],\n 'base_currency_id': param_prop['BaseCurrencyId'],\n 'from_url': response.url,\n 'crawl_time': time.strftime(self.ISOTIMEFORMAT, time.localtime())\n }\n results.append(item_result)\n\n yield {\n 'item_type': 'hk_etl_list',\n 'item_list': results\n }\n\n if response.meta['page'] == 1:\n page_info_str = selector.xpath(\n '//*[@id=\"ctl00_ContentPlaceHolder1_aFundQuickrankControl_AspNetPager\"]/table/tr/td[1]/span/text()').extract_first()\n total_page = self.extract_total_page(page_info_str)\n logging.info(\"total %s pages\", total_page)\n\n next_page = response.meta['page'] + 1\n if next_page <= total_page:\n form_data = {'__EVENTTARGET': 'ctl00$ContentPlaceHolder1$aFundQuickrankControl$AspNetPager',\n '__EVENTARGUMENT': str(next_page)}\n next_req = scrapy.FormRequest.from_response(response, formnumber=0, formdata=form_data, dont_click=True)\n yield next_req.replace(meta={'page': next_page, 'total_page': total_page})\n else:\n next_page = response.meta['page'] + 1\n total_page = response.meta['total_page']\n if next_page <= total_page:\n form_data = {'__EVENTTARGET': 'ctl00$ContentPlaceHolder1$aFundQuickrankControl$AspNetPager',\n '__EVENTARGUMENT': str(next_page)}\n next_req = scrapy.FormRequest.from_response(response, formnumber=0, formdata=form_data, dont_click=True)\n yield next_req.replace(meta={'page': next_page, 'total_page': total_page})\n logging.info(\"finished page [%s]\", response.meta['page'])\n\n @staticmethod\n def extract_total_page(page_info_str):\n total_num = int(page_info_str.split('of')[1].strip())\n page_begin, page_end = (int(p) for p in page_info_str.split('of')[0].split('-'))\n page_size = page_end - page_begin + 1\n basic_page_num = total_num / page_size\n remain = total_num % page_size\n return basic_page_num if remain == 0 else basic_page_num + 1\n","sub_path":"etf/etf/spiders/hk_etl_list.py","file_name":"hk_etl_list.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466205416","text":"# test types serialization\n\nimport logging\nimport pytest\n\nfrom hyperapp.common.htypes import (\n TPrimitive,\n tNone,\n tString,\n tBinary,\n tInt,\n tBool,\n tDateTime,\n TOptional,\n TRecord,\n TException,\n TList,\n Request,\n Notification,\n Interface,\n builtin_mt,\n name_wrapped_mt,\n optional_mt,\n list_mt,\n field_mt,\n record_mt,\n exception_mt,\n request_mt,\n notification_mt,\n interface_mt,\n )\nfrom hyperapp.common import cdr_coders # register codec\n\nlog = logging.getLogger(__name__)\n\n\npytest_plugins = ['hyperapp.common.htypes.test.fixtures']\n\n\ndef test_optional(types, mosaic):\n base_ref = mosaic.put(builtin_mt('string'))\n piece = optional_mt(base_ref)\n t = types.resolve(mosaic.put(piece))\n assert t == TOptional(tString)\n assert t.base_t is tString\n\n\ndef test_list(types, mosaic):\n element_ref = mosaic.put(builtin_mt('int'))\n piece = list_mt(element_ref)\n t = types.resolve(mosaic.put(piece))\n assert t == TList(tInt)\n\n\ndef test_list_opt(types, mosaic):\n base_ref = mosaic.put(builtin_mt('datetime'))\n element_ref = mosaic.put(optional_mt(base_ref))\n piece = list_mt(element_ref)\n t = types.resolve(mosaic.put(piece))\n assert t == TList(TOptional(tDateTime))\n\n\ndef test_record(types, mosaic):\n string_list_mt = list_mt(mosaic.put(builtin_mt('string')))\n bool_opt_mt = optional_mt(mosaic.put(builtin_mt('bool')))\n piece = record_mt(None, [\n field_mt('int_field', mosaic.put(builtin_mt('int'))),\n field_mt('string_list_field', mosaic.put(string_list_mt)),\n field_mt('bool_optional_field', mosaic.put(bool_opt_mt)),\n ])\n name = 'some_test_record'\n named_piece = name_wrapped_mt(name, mosaic.put(piece))\n t = types.resolve(mosaic.put(named_piece))\n assert t == TRecord(name, {\n 'int_field': tInt,\n 'string_list_field': TList(tString),\n 'bool_optional_field': TOptional(tBool),\n })\n\n\ndef test_based_record(types, mosaic):\n base_piece = record_mt(None, [\n field_mt('int_field', mosaic.put(builtin_mt('int'))),\n ])\n named_base_piece = name_wrapped_mt('some_base_record', mosaic.put(base_piece))\n named_base_ref = mosaic.put(named_base_piece)\n piece = record_mt(named_base_ref, [\n field_mt('string_field', mosaic.put(builtin_mt('string'))),\n ])\n name = 'some_test_record'\n named_piece = name_wrapped_mt(name, mosaic.put(piece))\n t = types.resolve(mosaic.put(named_piece))\n assert t == TRecord(name, {\n 'int_field': tInt,\n 'string_field': tString,\n })\n\n\ndef test_empty_record(types, mosaic):\n piece = record_mt(None, [])\n\n name_1 = 'record_1'\n named_piece_1 = name_wrapped_mt(name_1, mosaic.put(piece))\n t_1 = types.resolve(mosaic.put(named_piece_1))\n assert t_1 == TRecord(name_1, {})\n\n name_2 = 'record_2'\n named_piece_2 = name_wrapped_mt(name_2, mosaic.put(piece))\n t_2 = types.resolve(mosaic.put(named_piece_2))\n assert t_2 == TRecord(name_2, {})\n\n\ndef test_exception(types, mosaic):\n string_list_mt = list_mt(mosaic.put(builtin_mt('string')))\n bool_opt_mt = optional_mt(mosaic.put(builtin_mt('bool')))\n piece = exception_mt(None, [\n field_mt('int_field', mosaic.put(builtin_mt('int'))),\n field_mt('string_list_field', mosaic.put(string_list_mt)),\n field_mt('bool_optional_field', mosaic.put(bool_opt_mt)),\n ])\n name = 'some_test_exception'\n named_piece = name_wrapped_mt(name, mosaic.put(piece))\n t = types.resolve(mosaic.put(named_piece))\n assert t == TException(name, {\n 'int_field': tInt,\n 'string_list_field': TList(tString),\n 'bool_optional_field': TOptional(tBool),\n })\n\n\ndef test_based_exception(types, mosaic):\n base_piece = exception_mt(None, [\n field_mt('int_field', mosaic.put(builtin_mt('int'))),\n ])\n named_base_piece = name_wrapped_mt('some_base_exception', mosaic.put(base_piece))\n named_base_ref = mosaic.put(named_base_piece)\n piece = exception_mt(named_base_ref, [\n field_mt('string_field', mosaic.put(builtin_mt('string'))),\n ])\n name = 'some_test_exception'\n named_piece = name_wrapped_mt(name, mosaic.put(piece))\n t = types.resolve(mosaic.put(named_piece))\n assert t == TException(name, {\n 'int_field': tInt,\n 'string_field': tString,\n })\n\n\ndef test_empty_exception(types, mosaic):\n piece = exception_mt(None, [])\n\n name_1 = 'exception_1'\n named_piece_1 = name_wrapped_mt(name_1, mosaic.put(piece))\n t_1 = types.resolve(mosaic.put(named_piece_1))\n assert t_1 == TException(name_1, {})\n\n name_2 = 'exception_2'\n named_piece_2 = name_wrapped_mt(name_2, mosaic.put(piece))\n t_2 = types.resolve(mosaic.put(named_piece_2))\n assert t_2 == TException(name_2, {})\n\n\ndef test_interface(types, mosaic):\n int_list_mt = list_mt(mosaic.put(builtin_mt('int')))\n bool_opt_mt = optional_mt(mosaic.put(builtin_mt('bool')))\n request_1 = request_mt(\n method_name='request_1',\n param_fields=[\n field_mt('request_1_str_param', mosaic.put(builtin_mt('string'))),\n field_mt('request_1_int_list_param', mosaic.put(int_list_mt)),\n ],\n response_fields=[\n field_mt('request_1_int_response', mosaic.put(builtin_mt('int'))),\n field_mt('request_1_bool_opt_response', mosaic.put(bool_opt_mt)),\n ],\n )\n notification_1 = notification_mt(\n method_name='notification_1',\n param_fields=[\n field_mt('notification_1_datetime_param', mosaic.put(builtin_mt('datetime'))),\n field_mt('notification_1_bool_opt_param', mosaic.put(bool_opt_mt)),\n ],\n )\n request_2 = request_mt(\n method_name='request_2',\n param_fields=[\n field_mt('request_2_datetime_param', mosaic.put(builtin_mt('datetime'))),\n ],\n response_fields=[\n field_mt('request_2_str_response', mosaic.put(builtin_mt('string'))),\n ],\n )\n notification_2 = notification_mt(\n method_name='notification_2',\n param_fields=[\n field_mt('notification_2_int_list_param', mosaic.put(int_list_mt)),\n ],\n )\n request_3 = request_mt('request_3', param_fields=[], response_fields=[])\n notification_3 = notification_mt('notification_3', param_fields=[])\n\n piece = interface_mt(\n base=None,\n method_list=[\n mosaic.put(request_1),\n mosaic.put(notification_1),\n mosaic.put(request_2),\n mosaic.put(notification_2),\n mosaic.put(request_3),\n mosaic.put(notification_3),\n ],\n )\n\n name = 'test_interface'\n named_piece = name_wrapped_mt(name, mosaic.put(piece))\n t = types.resolve(mosaic.put(named_piece))\n\n assert t == Interface(name,\n method_list=[\n Request(\n method_name='request_1',\n params_record_t=TRecord(f'{name}_request_1_params', {\n 'request_1_str_param': tString,\n 'request_1_int_list_param': TList(tInt),\n }),\n response_record_t=TRecord(f'{name}_request_1_response', {\n 'request_1_int_response': tInt,\n 'request_1_bool_opt_response': TOptional(tBool),\n }),\n ),\n Notification(\n method_name='notification_1',\n params_record_t=TRecord(f'{name}_notification_1_params', {\n 'notification_1_datetime_param': tDateTime,\n 'notification_1_bool_opt_param': TOptional(tBool),\n }),\n ),\n Request(\n method_name='request_2',\n params_record_t=TRecord(f'{name}_request_2_params', {\n 'request_2_datetime_param': tDateTime,\n }),\n response_record_t=TRecord(f'{name}_request_2_response', {\n 'request_2_str_response': tString,\n }),\n ),\n Notification(\n method_name='notification_2',\n params_record_t=TRecord(f'{name}_notification_2_params', {\n 'notification_2_int_list_param': TList(tInt),\n }),\n ),\n Request(\n method_name='request_3',\n params_record_t=TRecord(f'{name}_request_3_params'),\n response_record_t=TRecord(f'{name}_request_3_response'),\n ),\n Notification(\n method_name='notification_3',\n params_record_t=TRecord(f'{name}_notification_3_params'),\n ),\n ])\n\n\ndef test_based_interface(types, mosaic):\n int_list_mt = list_mt(mosaic.put(builtin_mt('int')))\n bool_opt_mt = optional_mt(mosaic.put(builtin_mt('bool')))\n request_1 = request_mt(\n method_name='request_1',\n param_fields=[\n field_mt('request_1_datetime_param', mosaic.put(builtin_mt('datetime'))),\n ],\n response_fields=[\n field_mt('request_1_str_response', mosaic.put(builtin_mt('string'))),\n ],\n )\n notification_1 = notification_mt(\n method_name='notification_1',\n param_fields=[\n field_mt('notification_1_int_list_param', mosaic.put(int_list_mt)),\n ],\n )\n\n base_interface_mt = interface_mt(\n base=None,\n method_list=[\n mosaic.put(request_1),\n ],\n )\n base_name = 'test_base_interface'\n named_base_interface_mt = name_wrapped_mt(base_name, mosaic.put(base_interface_mt))\n\n piece = interface_mt(\n base=mosaic.put(named_base_interface_mt),\n method_list=[\n mosaic.put(notification_1),\n ],\n )\n\n name = 'test_interface'\n named_piece = name_wrapped_mt(name, mosaic.put(piece))\n t = types.resolve(mosaic.put(named_piece))\n\n base_interface_t = Interface(base_name,\n method_list=[\n Request(\n method_name='request_1',\n params_record_t=TRecord(f'{base_name}_request_1_params', {\n 'request_1_datetime_param': tDateTime,\n }),\n response_record_t=TRecord(f'{base_name}_request_1_response', {\n 'request_1_str_response': tString,\n }),\n ),\n ])\n\n assert t == Interface(\n name=name,\n base=base_interface_t,\n method_list=[\n Notification(\n method_name='notification_1',\n params_record_t=TRecord(f'{name}_notification_1_params', {\n 'notification_1_int_list_param': TList(tInt),\n }),\n ),\n ])\n","sub_path":"hyperapp/common/htypes/test/test_meta.py","file_name":"test_meta.py","file_ext":"py","file_size_in_byte":10867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"362886905","text":"import pandas as pd\nimport tensorflow as tf\nfrom bert import bert_tokenization as tokenization\nimport os\nimport random\nimport numpy as np\nimport pandas as pd\nimport tensorflow_hub as hub\n#import matplotlib.pyplot as plt\nfrom tensorflow.keras.models import Model\nimport numpy as np\nimport re\n\ndf = pd.read_csv('data.csv')\norigin = df.iloc[:1000]['origin'].tolist()\nmodern = df.iloc[:1000]['modern'].tolist()\n\npretrained_path = 'uncased_L-12_H-768_A-12'\nconfig_path = os.path.join(pretrained_path, 'bert_config.json')\ncheckpoint_path = os.path.join(pretrained_path, 'bert_model.ckpt')\nvocab_path = os.path.join(pretrained_path, 'vocab.txt')\n\n# TF_KERAS must be added to environment variables in order to use TPU\n#os.environ['TF_KERAS'] = '1'\n\nmax_seq_length = 128\n# Load Pre-Trained BERT Model via TF 2.0\ninput_word_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,\n name=\"input_word_ids\")\ninput_mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,\n name=\"input_mask\")\nsegment_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,\n name=\"segment_ids\")\nbert_layer = hub.KerasLayer(\"https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1\",\n trainable=True)\npooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])\nmodel = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=[pooled_output, sequence_output])\n\nvocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()\ndo_lower_case = bert_layer.resolved_object.do_lower_case.numpy()\ntokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)\n\ndef get_masks(tokens, max_seq_length):\n \"\"\"Mask for padding\"\"\"\n if len(tokens) > max_seq_length:\n tokens = tokens[:128]\n #raise IndexError(\"Token length more than max seq length!\")\n return [1] * len(tokens) + [0] * (max_seq_length - len(tokens))\n\n\ndef get_segments(tokens, max_seq_length):\n \"\"\"Segments: 0 for the first sequence, 1 for the second\"\"\"\n if len(tokens) > max_seq_length:\n tokens = tokens[:128]\n # raise IndexError(\"Token length more than max seq length!\")\n segments = []\n current_segment_id = 0\n for token in tokens:\n segments.append(current_segment_id)\n if token == \"[SEP]\":\n current_segment_id = 1\n return segments + [0] * (max_seq_length - len(tokens))\n\ndef get_ids(tokens, tokenizer, max_seq_length):\n if len(tokens) > max_seq_length:\n tokens = tokens[:128]\n \"\"\"Token ids from Tokenizer vocab\"\"\"\n token_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_ids = token_ids + [0] * (max_seq_length - len(token_ids))\n return input_ids\n\ndef batch_iter(id, mask, seg, label, batch_size, max_seq_length):\n \"\"\"\n A mini-batch iterator to generate mini-batches for training neural network\n param data: a list of sentences. each sentence is a vector of integers\n param label: a list of labels\n param batch_size: the size of mini-batch\n param num_epochs: number of epochs\n return: a mini-batch iterator\n \"\"\"\n assert len(id) == len(mask) == len(seg) == len(label)\n data_size = len(id)\n iter_num = data_size // batch_size # Avoid dimension disagreement\n\n for i in range(iter_num):\n start_index = i * batch_size\n end_index = start_index + batch_size\n\n ids = id[start_index: end_index]\n masks = mask[start_index: end_index]\n segs = seg[start_index: end_index]\n labels = label[start_index: end_index]\n # print(len(labels))\n \n permutation = np.random.permutation(labels.shape[0])\n yield ids, masks, segs, labels\n\ntrain_data_seq = []\ntrain_data_word = []\ntrain_label = []\n\ndef data_filter(origin, modern, total_size, tokenizer, max_seq_length):\n size = total_size // 2\n size = min([len(origin), len(modern), size])\n \n m_lines = [[\"[CLS]\"] + tokenizer.tokenize(line) + [\"[SEP]\"] for line in origin]\n m_lines = [line for line in m_lines if len(line) < 128] #and len(line) > 2\n random.shuffle(m_lines)\n f_lines = [[\"[CLS]\"] + tokenizer.tokenize(line) + [\"[SEP]\"] for line in modern]\n f_lines = [line for line in f_lines if len(line) < 128] #and len(line) > 2\n random.shuffle(f_lines)\n\n m_lines = m_lines[: size]\n f_lines = f_lines[: size]\n \n lines = m_lines + f_lines\n label = np.append(np.zeros(len(m_lines)), np.ones(len(f_lines)))\n \n ids = [get_ids(token, tokenizer, max_seq_length) for token in lines]\n masks = [get_masks(token, max_seq_length) for token in lines]\n segs = [get_segments(token, max_seq_length) for token in lines]\n \n # Shuffle\n perm = np.random.permutation(len(lines))\n ids = np.array(ids)\n masks = np.array(masks)\n segs = np.array(segs)\n ids = ids[perm, :]\n masks = masks[perm, :]\n segs = segs[perm, :]\n label = label[perm]\n \n return ids, masks, segs, label\n \n# Parameters\ntotal_size = 3000\nbatch_size = 128\n\nids, masks, segs, labels = data_filter(origin, modern, total_size, tokenizer, max_seq_length)\ntrain_data = batch_iter(ids, masks, segs, labels, batch_size, max_seq_length)\n\nfor i, train_input in enumerate(train_data):\n print(\"No.\", i, \"iteration\")\n input_id, input_mask, input_seg, label = train_input\n seq_data, word_data = model.predict([input_id, input_mask, input_seg])\n train_data_seq.extend(seq_data)\n train_data_word.extend(word_data)\n train_label.extend(label)\n print(len(train_label))\n print(len(train_data_seq), len(train_data_seq[0]))\n print(len(train_data_word), len(train_data_word[0]))\n \ntrain_data_seq = np.array(train_data_seq)\ntrain_data_word = np.array(train_data_word)\ntrain_label = np.array(train_label)\nnp.save(\"data_seq.npy\", train_data_seq)\nnp.save(\"data_word.npy\", train_data_word)\nnp.save(\"label.npy\", train_label)\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555570700","text":"from mdeq_lib.evaluate.cls_valid import evaluate_classifier\nfrom mdeq_lib.training.cls_train import train_classifier\nimport numpy as np\nfrom scipy.stats import ttest_ind\n\nfrom jean_zay.submitit.general_submissions import train_eval_grid\n\n\njob_name = 'shine_classifier_cifar_large'\nn_gpus = 4\nn_runs = 5\nbase_params = dict(\n model_size='LARGE',\n dataset='cifar',\n n_gpus=n_gpus,\n n_epochs=220,\n save_at=50,\n restart_from=50,\n)\nparameters = []\nfor i_run in range(n_runs):\n parameters += [\n # dict(seed=i_run, **base_params),\n dict(seed=i_run, shine=True, **base_params),\n dict(seed=i_run, fpn=True, **base_params),\n ]\n\nres_all = train_eval_grid(\n job_name,\n train_classifier,\n evaluate_classifier,\n parameters,\n to_grid=False,\n timeout_train=20,\n n_gpus_train=n_gpus,\n timeout_eval=2,\n n_gpus_eval=n_gpus,\n project='shine',\n params_to_ignore=['n_epochs', 'save_at', 'restart_from'],\n torch=True,\n)\n\n\n# perf_orig = [res for (res, params) in zip(res_all, parameters) if not params.get('shine', False) and not params.get('fpn', False)]\nperf_shine = [res for (res, params) in zip(res_all, parameters) if params.get('shine', False)]\nperf_fpn = [res for (res, params) in zip(res_all, parameters) if params.get('fpn', False)]\n\n\n# print('Perf orig', perf_orig)\n# print('Perf shine', perf_shine)\n# print('Perf fpn', perf_fpn)\n#\n# print('Stats test orig vs shine', ttest_ind(perf_orig, perf_shine))\n# print('Stats test orig vs fpn', ttest_ind(perf_orig, perf_fpn))\n\nprint('Descriptive stats')\n# print('Perf orig', np.mean(perf_orig), np.std(perf_orig))\nprint('Perf shine', np.mean(perf_shine), np.std(perf_shine))\nprint('Perf fpn', np.mean(perf_fpn), np.std(perf_fpn))\n","sub_path":"jean_zay/submitit/shine/cifar_large.py","file_name":"cifar_large.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"272139846","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# %%\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import svm\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics.pairwise import linear_kernel\r\nfrom sklearn import preprocessing\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\n# %%\r\n#df1=pd.read_csv('pro.csv')\r\ndf2=pd.read_csv('pro.csv')\r\ndf2= df2.iloc[:,1:]\r\n# df1['Index'] = np.arange(len(df1))\r\n# df1.set_index('Index')\r\n# import csv\r\ndf10= df2.iloc[:,:7].join(df2.iloc[:,23:25]).join(df2.iloc[:,26:27]).join(df2.iloc[:,33:34]).join(df2.iloc[:,-1:]) # df1[\"Suggested Job Role\"]\r\n#df10.head()\r\n\r\n# %%\r\nfrom sklearn.utils import shuffle\r\ndf = shuffle(df10)\r\ndf= df.dropna()\r\ndf101=df\r\n\r\n# %%\r\nfitdf= pd.DataFrame(df10.iloc[:,:17])\r\n\r\n# %%\r\nfitdf.head(2)\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\nlabel_encoder = preprocessing.LabelEncoder()\r\ndf['Interested subjects']= label_encoder.fit_transform(df['Interested subjects'])\r\ndf['Interested subjects'].unique()\r\n\r\n# from sklearn.preprocessing import LabelEncoder\r\nlabel_encoder2 = preprocessing.LabelEncoder()\r\ndf['interested career area']= label_encoder2.fit_transform(df['interested career area'])\r\ndf['interested career area'].unique()\r\n\r\nlabel_encoder3 = preprocessing.LabelEncoder()\r\ndf['Suggested Job Role']= label_encoder3.fit_transform(df['Suggested Job Role'])\r\ndf['Suggested Job Role'].unique()\r\n\r\nlabel_encoder3 = preprocessing.LabelEncoder()\r\ndf['Type of company want to settle in?']= label_encoder3.fit_transform(df['Type of company want to settle in?'])\r\ndf['Type of company want to settle in?'].unique()\r\n\r\nlabel_encoder4 = preprocessing.LabelEncoder()\r\ndf['Management or Technical']= label_encoder4.fit_transform(df['Management or Technical'])\r\ndf['Management or Technical'].unique()\r\n\r\n#labelencoder = LabelEncoder()\r\n\r\n#df= labelencoder.fit_transform(df)\r\ndf.iloc[:,:].head(2)\r\n\r\n# %%\r\ntrain= df[:df.shape[0]*80//100].dropna()\r\ntest= df[df.shape[0]*80//100:].dropna()\r\n\r\n# %%\r\nX_test= test.iloc[:, :-1]\r\nX_train= train.iloc[:, :-1]\r\ny_test= test.iloc[:,-1:]\r\ny_train= train.iloc[:,-1:]\r\n\r\n\r\n\r\nX_train= np.array(X_train)\r\nX_test= np.array(X_test)\r\ny_train= np.array(y_train)\r\ny_test= np.array(y_test)\r\n\r\n# %%\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn= KNeighborsClassifier(n_neighbors= 10)\r\nknn.fit(X_train, np.ravel(y_train,order='C'))\r\nknn.score(X_test,y_test)*100\r\n\r\n# %%\r\nfrom sklearn import svm\r\nsvc = svm.SVC(kernel='linear', C=1)\r\nsvc.fit(X_train, y_train)\r\nsvc.score(X_test, y_test)*100\r\n\r\n# %%\r\nfrom sklearn.naive_bayes import MultinomialNB\r\n# scaler = preprocessing.MinMaxScaler()\r\n# all_features_minmax = scaler.fit_transform(all_features)\r\nnb = MultinomialNB(alpha=0.1)\r\n# cv_scores = cross_val_score(clf, all_features_minmax, all_classes, cv=10)\r\nnb.fit(X_train, y_train)\r\nnb.score(X_test, y_test)*100\r\n\r\n# %%\r\n#from sklearn.ensemble import RandomForestClassifier\r\n#ranfor = RandomForestClassifier(n_estimators=10, random_state=10)\r\n#ranfor.fit(X_train, y_train)\r\n#ranfor.score(X_test, y_test)*100\r\n\r\n# %%\r\nfrom sklearn.linear_model import LogisticRegression\r\nregressor = LogisticRegression(solver='saga', random_state=0)\r\nregressor.fit(X_train, y_train)\r\nregressor.score(X_test, y_test)*100\r\n\r\n# %%\r\njobs= (df2[df.columns[-1]]).tolist()\r\n\r\n# %%\r\ndef max5(arr,jobs):\r\n temp=[]\r\n arr=arr.tolist()\r\n for i in range(5):\r\n k=np.argmax(arr)\r\n temp.append(jobs[k])\r\n arr[k]=0\r\n return temp\r\n\r\nimport mysql.connector\r\n\r\nmydb = mysql.connector.connect(host=\"localhost\",user=\"root\",passwd=\"\",database=\"carrier\")\r\n\r\nmycursor = mydb.cursor()\r\n\r\nmycursor.execute(\"SELECT * FROM subjects\")\r\n\r\nresult = mycursor.fetchall()\r\n\r\nres0 = result[0][0]\r\nres1 = result[0][1]\r\nres2 = result[0][2]\r\nres3 = result[0][3]\r\nres4 = result[0][4]\r\nres5 = result[0][5]\r\nres6 = result[0][6]\r\nres7 = result[0][7]\r\nres8 = result[0][21]\r\nres9 = result[0][22]\r\nres10 = result[0][31]\r\n\r\n\r\n\r\n# %%\r\nlul= int(np.floor(np.random.rand()*17217))\r\nlive= df.iloc[lul:lul+2, :-1]\r\ntl=[res0, res1, res2, res3, res4, res5, res6, res7, res8, res9, res10]\r\n#tl=[1, 0,8, 8, 39,77, 69, 80, 'IOT', 'testing','Management']\r\nlive= pd.concat([live, pd.DataFrame([tl], columns= df.columns[:-1])], sort=False)\r\n\r\nlive_df= live.iloc[:, :8]\r\nfor i, e in enumerate(live.columns[:]):\r\n if live.dtypes[i] == 'O':\r\n live_df[live.columns[i]]= live[live.columns[i]].astype('category').cat.codes\r\nknow= ((regressor.predict_proba(live_df)))[2]\r\njobs[np.argmax(know)]\r\n\r\n\r\nprint(\"Suggested Career Path: \"+str(max5(know,jobs)))\r\n\r\nres0=max5(know,jobs)[0]\r\nres1=max5(know,jobs)[1]\r\nres2=max5(know,jobs)[2]\r\nres3=max5(know,jobs)[3]\r\nres4=max5(know,jobs)[4]\r\n\r\nmycursor.execute(\"\"\"INSERT INTO `job` (`JOB1`,`JOB2`,`JOB3`,`JOB4`,`JOB5`) VALUE ('%s','%s','%s','%s','%s')\"\"\" % (res0,res1,res2,res3,res4))\r\nmydb.commit()\r\n\r\n","sub_path":"carrier_file.py","file_name":"carrier_file.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"110793459","text":"from django.conf.urls import patterns, url\n\nfrom booking import views\nfrom booking.views import CostListView\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n # ex: /kassa/5/\n url(r'^(?P\\d+)/$', views.kassa, name='kassa_status'),\n # ex: /kassa/5/add_cost/\n url(r'^(?P\\d+)/add_cost/$', views.add_cost, name='add_cost'),\n\n url(r'^costs/$', CostListView.as_view(), name='costs'),\n url(r'^costs/page(?P\\d+)/$', CostListView.as_view()),\n\n url(r'^cost/(?P\\d+)$', views.CostView.as_view(), name='cost'),\n url(r'^cost/(?P\\d+)/edit$', views.CostEdit.as_view(), name='cost_edit'),\n url(r'^cost/add$', views.CostCreate.as_view(), name='cost_add'),\n)","sub_path":"booking/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334885467","text":"import aiida.orm as orm\nfrom aiida.engine import CalcJob\nfrom aiida.plugins import DataFactory\nfrom aiida.common import datastructures\n\nStructureSet = DataFactory('ce.structures')\n\nclass EnumCalculation(CalcJob):\n \"\"\"\n AiiDA calculation plugin wrapping the script of function\n enumerate_structures the wrapped file is geneum.py in\n ../wrappers/\n \"\"\"\n\n @classmethod\n def define(cls, spec):\n # yapf: disable\n super(EnumCalculation, cls).define(spec)\n spec.input('metadata.options.resources', valid_type=dict, default={'num_machines':1, 'num_mpiprocs_per_machine':1}, non_db=True)\n spec.input('metadata.options.parser_name', valid_type=str, default='ce.genenum', non_db=True)\n spec.input('metadata.options.input_filename', valid_type=str, default='aiida.json', non_db=True)\n spec.input('metadata.options.output_filename', valid_type=str, default='aiida.out', non_db=True)\n spec.input('structure', valid_type=orm.StructureData, help='prototype structure to expand')\n spec.input('pbc', valid_type=orm.List, default=orm.List(list=[True, True, True]))\n spec.input('chemical_symbols', valid_type=orm.List, help='An N elements list of which that each element is the possible symbol of the site.')\n spec.input('min_volume', valid_type=orm.Int, default=orm.Int(1))\n spec.input('max_volume', valid_type=orm.Int, default=orm.Int(1), help='If None, no hnf cells to be considered.')\n spec.input('concentration_restrictions', required=False, valid_type=orm.Dict, help='dict indicate the concentration of each elements.')\n \n spec.output('enumerate_structures', valid_type=StructureSet, help='enumerate structures store the outputs of the process')\n spec.output('number_of_structures', valid_type=orm.Int, help='Number of structures of enumerate structures')\n\n spec.exit_code(100, 'ERROR_MISSING_OUTPUT_FILES', message='Calculation did not produce all expected output files.')\n\n def prepare_for_submission(self, folder):\n \"\"\"\n \"\"\"\n self.write_input_files(folder)\n\n # Code\n codeinfo = datastructures.CodeInfo()\n codeinfo.cmdline_params = [self.options.input_filename]\n codeinfo.code_uuid = self.inputs.code.uuid\n codeinfo.stdout_name = self.metadata.options.output_filename\n\n # Prepare a `CalaInfo` to be returned to the engine\n calcinfo = datastructures.CalcInfo()\n calcinfo.codes_info = [codeinfo]\n retrieve_list = ['cells.raw', 'coordinates.raw', 'nframes.raw', 'atomic_numbers.raw']\n calcinfo.retrieve_list = retrieve_list + [self.metadata.options.output_filename]\n\n return calcinfo\n\n def write_input_files(self, folder):\n import json\n\n # prepare a param.json which contain the parameters needed to\n # run the code. param read from the inputs.\n param = dict()\n ase_structure = self.inputs.structure.get_ase()\n cell = ase_structure.cell.tolist()\n positions = ase_structure.positions.tolist()\n pbc = self.inputs.pbc.get_list()\n param['structure'] = {'cell': cell,\n 'positions': positions,\n 'pbc': pbc}\n\n chemical_symbols = self.inputs.chemical_symbols.get_list()\n minv = self.inputs.min_volume.value\n maxv = self.inputs.max_volume.value\n if maxv > minv:\n sizes = [i for i in range(minv, maxv)]\n else:\n sizes = [minv]\n param['chemical_symbols'] = chemical_symbols\n param['sizes'] = sizes\n\n concentration_restrictions = self.inputs.get('concentration_restrictions', None)\n if concentration_restrictions is not None:\n param['concentration_restrictions'] = concentration_restrictions\n\n param_str = json.dumps(param, indent=4, sort_keys=True)\n with folder.open(self.options.input_filename, 'w', encoding='utf8') as handle:\n handle.write(param_str)\n","sub_path":"aiida_ce/calculations/genenum.py","file_name":"genenum.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"185131668","text":"from sys import exit\nimport rgb\nfrom os import listdir, path, environ\nenviron['PYGAME_HIDE_SUPPORT_PROMPT'] = ''\nimport pygame\nfrom UIManager import ActionManager, TextLine\nimport csv\nimport vlc\nfrom data_parser import get_config, ch_config, get_user_data, update_user_data, get_sys_config, get_achievements, reset_config\n\n\nclass State_Manager():\n\tdef __init__(self, config= get_config()):\n\t\t\n\t\traw_paths= get_sys_config()\n\t\tself.ASSETS_DIR= path.join(*raw_paths[\"Assets\"])\n\t\tself.WAV_DIR = path.join(*raw_paths[\"WAV Directory\"])\n\t\tself.wav_files = {}\n\t\tfor i in listdir(self.WAV_DIR):\n\t\t\tself.wav_files[i.rsplit('.', 1)[0]] = vlc.MediaPlayer(f\"{self.WAV_DIR}{i}\")\n\t\tself.curr_state = None\n\t\tpygame.display.init()\n\t\tpygame.font.init()\n\t\t\n\t\tself.SYSFONT= f\"{self.ASSETS_DIR}ARCADE_R.TTF\"\n\t\t\n\t\tpygame.display.set_caption(\"Prototype 2\")\n\t\tself.icon= pygame.image.load(f\"{self.ASSETS_DIR}quaver.png\")\n\t\tpygame.display.set_icon(self.icon)\n\t\tself.fps_clock = pygame.time.Clock()\n\t\t\n\t\tself.SIZE = self.WIDTH, self.HEIGHT = eval(config[\"Resolution\"][\"Value\"])\n\t\tself.isFullScreen= eval(config[\"Fullscreen\"][\"Value\"])\n\t\tif self.isFullScreen:\n\t\t\tself.screen = pygame.display.set_mode(self.SIZE, pygame.FULLSCREEN)\n\t\telse:\n\t\t\tself.screen = pygame.display.set_mode(self.SIZE)\n\t\tself.FPS = int(config[\"FPS\"][\"Value\"])\n\t\tself.f_t = 1 / self.FPS\n\t\tself.time = 0\n\t\tself.lag = 0\n\t\tself.g_t = 0\n\t\tself.TRACKS_DIR = path.join(*raw_paths[\"CSV Directory\"])\n\t\n\tdef update(self):\n\t\tself.curr_state.update(self.g_t, self.lag)\n\t\tself.curr_state.draw()\n\t\tpygame.display.update()\n\t\tself.time, self.lag = self.chk_slp()\n\t\tself.g_t += self.time\n\t\n\tdef ch_state(self, new_state, args={}):\n\t\tself.curr_state.exit()\n\t\tprint(\"Entering new state\")\n\t\tself.curr_state = None\n\t\tself.curr_state = new_state\n\t\tself.curr_state.enter(args)\n\t\tself.g_t = 0\n\t\n\tdef chk_slp(self):\n\t\tdel_t = self.fps_clock.tick_busy_loop(self.FPS) / 1000 - self.f_t\n\t\tif del_t > 0:\n\t\t\tprint(f\"Lag : {del_t}s\")\n\t\t\t\t\n\t\t\treturn self.fps_clock.get_time()/1000, del_t\n\t\telse:\n\t\t\treturn self.fps_clock.get_time()/1000, 0\n\n\nclass BaseState:\n\tdef __init__(self, fsm):\n\t\tself.fsm = fsm\n\t\tself.action_manager = ActionManager()\n\t\tself.background = pygame.image.load(f\"{self.fsm.ASSETS_DIR}background.jpg\").convert()\n\t\n\tdef enter(self, args):\n\t\tpass\n\t\n\tdef exit(self):\n\t\tself.fsm.screen.blit(self.background, (0,0))\n\t\n\tdef update(self, game_time, lag):\n\t\tprint(\"Updating base state\")\n\t\n\tdef draw(self):\n\t\tself.fsm.screen.fill(rgb.BLACK)\n\t\tself.fsm.screen.blit(self.background, (0,0))\n\t\tself.action_manager.draw_buttons(self.fsm.screen)\n\n\nclass MainMenuState(BaseState):\n\tdef __init__(self, fsm):\n\t\tsuper().__init__(fsm)\n\t\tself.action_manager.add_button(\"Start (Space)\", (50, 50), (50, 30), ret=\"Start\", key=\"space\")\n\t\tself.action_manager.add_button(\"Options\", (50, 100), (50, 30))\n\t\tself.action_manager.add_button(\"Achievements\", (50, 150), (50, 30))\n\t\tself.action_manager.add_button(\"Storyline\", (50, 200), (50, 30))\n\t\tself.action_manager.add_button(\"About\", (50, self.fsm.HEIGHT - 100), (50, 30))\n\t\tself.action_manager.add_button(\"Exit (Esc)\", (50, self.fsm.HEIGHT - 50), (50, 30), ret=\"Exit\", key=\"escape\")\n\t\t\n\t\tself.font = pygame.font.Font(self.fsm.SYSFONT, 24)\n\t\t\n\t\tself.text_line = TextLine(\"~BACH TO THE FUTURE~\", self.font, (300, 50))\n\t\t\n\tdef update(self, game_time, lag):\n\t\tevents = pygame.event.get()\n\t\tactions = self.action_manager.chk_actions(events)\n\t\t\n\t\tfor action in actions:\n\t\t\tif action == \"Exit\":\n\t\t\t\tself.fsm.ch_state(ExitState(self.fsm))\n\t\t\t\n\t\t\telif action == \"Start\":\n\t\t\t\tself.fsm.ch_state(SelectTrackState(self.fsm))\n\t\t\t\n\t\t\telif action == \"Options\":\n\t\t\t\tself.fsm.ch_state(SettingsState(self.fsm))\n\t\t\t\n\t\t\telif action == \"Achievements\":\n\t\t\t\tself.fsm.ch_state(AchievementsState(self.fsm))\n\t\t\t\t\n\t\t\telif action == \"About\":\n\t\t\t\tself.fsm.ch_state(AboutState(self.fsm))\n\t\t\t\n\t\t\telif action == \"Storyline\":\n\t\t\t\tfrom Storyline import StoryState\n\t\t\t\tself.fsm.ch_state(StoryState(self.fsm), {\"file\" : \"storyline.json\"})\n\t\t\t\n\tdef draw(self):\n\t\tsuper().draw()\n\t\tself.text_line.draw(self.fsm.screen)\n\n\nclass AboutState(BaseState):\n\tdef __init__(self, fsm):\n\t\tsuper().__init__(fsm)\n\t\tself.font = pygame.font.Font(self.fsm.SYSFONT, 24)\n\t\t\n\t\tself.text_lines= []\n\t\tself.text_lines.append(TextLine(\"Bach to the Future\", self.font, (200, 100)))\n\t\tself.text_lines.append(TextLine(\"Icon made by Freepik from www.flaticon.com\", pygame.font.Font(self.fsm.SYSFONT, 14), (150, 300)))\n\t\t\n\t\tself.action_manager.add_button(\"Back\", (50, 50), (50, 30))\n\t\t\n\t\tself.action_manager.add_button(\"Exit\", (50, self.fsm.HEIGHT- 100), (50, 30))\n\t\t\n\tdef update(self, game_time, lag):\n\t\t\n\t\tactions= self.action_manager.chk_actions(pygame.event.get())\n\t\t\n\t\tfor action in actions:\n\t\t\tif action == \"Exit\":\n\t\t\t\tself.fsm.ch_state(ExitState(self.fsm))\n\t\t\telif action == \"Back\":\n\t\t\t\tself.fsm.ch_state(MainMenuState(self.fsm))\n\t\t\n\tdef draw(self):\n\t\tsuper().draw()\n\t\tfor line in self.text_lines:\n\t\t\tline.draw(self.fsm.screen)\n\n\t\t\nclass SelectTrackState(BaseState):\n\tdef __init__(self, fsm):\n\t\tsuper().__init__(fsm)\n\t\tself.tracks = listdir(self.fsm.TRACKS_DIR)\n\t\t\n\t\tfor i, file in enumerate(self.tracks):\n\t\t\tself.action_manager.add_button(file.rsplit('.',1)[0], (375, i * 50 + 50), (50, 30), canScroll=True, ret=file)\n\t\t\n\t\tself.action_manager.scroll_max = self.action_manager.scroll_buttons[-1].rect[1] - (self.fsm.HEIGHT//4)*3\n\t\t\n\t\tself.action_manager.add_button(\"Exit (Esc)\", (50, self.fsm.HEIGHT - 100), (50, 30), ret=\"Exit\", key=\"escape\")\n\t\tself.action_manager.add_button(\"Back (Backspace)\", (50, 50), (50, 30), ret=\"Back\", key=\"backspace\")\n\t\n\tdef update(self, game_time, lag):\n\t\tevents = pygame.event.get()\n\t\tactions = self.action_manager.chk_actions(events)\n\t\t\n\t\tfor action in actions:\n\t\t\tif action == \"Exit\":\n\t\t\t\tself.fsm.ch_state(ExitState(self.fsm))\n\t\t\telif action == \"Back\":\n\t\t\t\tself.fsm.ch_state(MainMenuState(self.fsm))\n\t\t\telif action in self.tracks:\n\t\t\t\tprint(f\"Playing {action}\")\n\t\t\t\tself.fsm.ch_state(PlayGameState(self.fsm), {\"file_name\": action})\n\t\n\tdef draw(self):\n\t\tsuper().draw()\n\n\nclass SettingsState(BaseState):\n\tdef __init__(self, fsm):\n\t\tsuper().__init__(fsm)\n\t\tself.action_manager.add_button(\"Back\", (50, 50), (50, 30))\n\t\tself.action_manager.add_button(\"Exit (Esc)\", (50, self.fsm.HEIGHT - 100), (50, 30), ret=\"Exit\", key=\"escape\")\n\t\tself.action_manager.add_button(\"Restore\", (50, 150), (110, 30), ret=\"Restore defaults\")\n\t\tself.action_manager.add_button(\"defaults\", (50, 180), (110, 30), ret=\"Restore defaults\")\n\t\tself.font = pygame.font.Font(self.fsm.SYSFONT, 15)\n\t\n\tdef enter(self, args):\n\t\tself.settings= get_config()\n\t\tself.text= []\n\t\tself.text_lines= []\n\t\t\n\t\tfor i, setting in enumerate(self.settings):\n\t\t\tval = self.settings[setting][\"Value\"]\n\t\t\tself.text_lines.append(TextLine(f\"{setting} : {val}\", self.font, (300, i * 50 + 30, 10, 10)))\n\t\t\tself.action_manager.add_button(\"Change\", (200, i * 50 + 25), (30, 30), ret=setting)\n\t\n\tdef update(self, game_time, lag):\n\t\tevents = pygame.event.get()\n\t\tactions = self.action_manager.chk_actions(events)\n\t\t\n\t\tfor action in actions:\n\t\t\t\n\t\t\tif action == \"Exit\":\n\t\t\t\tself.fsm.ch_state(ExitState(self.fsm))\n\t\t\telif action == \"Back\":\n\t\t\t\tself.fsm.ch_state(MainMenuState(self.fsm))\n\t\t\telif action in self.settings:\n\t\t\t\tself.fsm.ch_state(ChSettingState(self.fsm), {\"setting\": action, \"value\": self.settings[action]})\n\t\t\t\n\t\t\telif action == \"Restore defaults\":\n\t\t\t\treset_config()\n\t\t\t\tself.fsm.__init__()\n\t\t\t\tself.fsm.curr_state = MainMenuState(self.fsm)\n\t\t\t\tself.fsm.ch_state(SettingsState(self.fsm))\n\t\n\tdef draw(self):\n\t\tsuper().draw()\n\t\t\n\t\tfor text_line in self.text_lines:\n\t\t\ttext_line.draw(self.fsm.screen)\n\t\t\n\nclass ChSettingState(BaseState):\n\tdef __init__(self, fsm):\n\t\tsuper().__init__(fsm)\n\t\tself.font = pygame.font.Font(self.fsm.SYSFONT, 20)\n\t\tself.action_manager.add_button(\"Back\", (50, 50), (50, 30))\n\t\n\tdef enter(self, args):\n\t\tself.args= args\n\t\tself.setting = args[\"setting\"]\n\t\tself.setting_text_line= TextLine(self.setting, self.font, (250, 50))\n\t\tself.val = args[\"value\"][\"Value\"]\n\t\tself.val_text_line= TextLine(f\"Current value : {self.val}\", self.font, (250, 100))\n\t\t\n\t\tself.choices= args[\"value\"][\"Choices\"]\n\t\tfor e, i in enumerate(args[\"value\"][\"Choices\"]):\t\t\n \t\t\tself.action_manager.add_button(str(i), (250, e*50+150), (50, 30))\n\t\n\tdef update(self, game_time, lag):\n\t\tevents = pygame.event.get()\n\t\t\n\t\tactions = self.action_manager.chk_actions(events)\n\t\t\n\t\tfor action in actions:\n\t\t\tif action == \"Back\":\n\t\t\t\tself.fsm.ch_state(SettingsState(self.fsm))\n\t\t\telif action == \"Exit\":\n\t\t\t\tself.fsm.ch_state(ExitState(self.fsm))\n\t\t\t\t\t\n\t\t\telif action in self.choices or eval(action) in self.choices:\n\t\t\t\tprint(f\"Choice clicked : {eval(action)}\")\n\t\t\t\tch_config(self.setting, action)\n\t\t\t\tconfig2= get_config()\n\t\t\t\tself.fsm.__init__(config2)\n\t\t\t\tself.fsm.curr_state= MainMenuState(self.fsm)\n\t\t\t\tself.fsm.ch_state(SettingsState(self.fsm))\n\n\tdef draw(self):\n\t\tsuper().draw()\n\t\tself.setting_text_line.draw(self.fsm.screen)\n\t\tself.val_text_line.draw(self.fsm.screen)\n\n\n\nclass AchievementsState(BaseState):\n\tdef __init__(self, fsm):\n\t\tsuper().__init__(fsm)\n\t\tself.action_manager.add_button(\"Back\", (50, 50), (50, 30))\n\t\tself.name_font = pygame.font.Font(self.fsm.SYSFONT, 20)\n\t\tself.des_font = pygame.font.Font(self.fsm.SYSFONT, 14)\n\n\t\tself.text_lines= []\n\t\thasAchieved= get_user_data()[\"Achievements\"]\n\t\tprint(hasAchieved)\n\t\tachievements= get_achievements()\n\t\tfor i, achievement in enumerate(achievements):\n\t\t\t\n\t\t\tfont_col= rgb.GREEN if hasAchieved[achievement[\"name\"]] else rgb.WHITE\n\t\t\tself.text_lines.append(TextLine(achievement[\"name\"], self.name_font, (200, i * 80 + 50), font_colour= font_col))\n\t\t\tself.text_lines.append(TextLine(achievement[\"description\"], self.des_font, (200, i * 80 + 85), font_colour= font_col))\n\n\t\n\tdef update(self, game_time, lag):\n\t\tevents = pygame.event.get()\n\t\tactions = self.action_manager.chk_actions(events)\n\t\t\n\t\tfor action in actions:\n\t\t\tif action == \"Back\":\n\t\t\t\tself.fsm.ch_state(MainMenuState(self.fsm))\n\t\t\telif action == \"Exit\":\n\t\t\t\tself.fsm.ch_state(ExitState(self.fsm))\n\t\n\tdef draw(self):\n\t\tsuper().draw()\n\n\t\tfor line in self.text_lines:\n\t\t\tline.draw(self.fsm.screen)\n\n\nclass OrbModel:\n\tdef __init__(self, x, y, duration, lane, end_time):\n\t\tself.length = duration * 450 # pixels\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.lane = lane\n\t\tself.end_time = end_time\n\t\n\tdef getTail(self):\n\t\treturn self.y + self.length\n\nclass PlayGameState(BaseState):\n\tdef __init__(self, fsm):\n\t\tsuper().__init__(fsm)\n\t\tpygame.key.set_repeat(1)\n\t\tself.action_manager.add_button(\"Back\", (self.fsm.WIDTH-100, 50), (50, 30), ret=\"Back\", key=\"backspace\")\n\t\tself.action_manager.add_keystroke(\"Pause\", 'p')\n\t\tself.action_manager.add_keystroke(\"Vol+\", \"up\")\n\t\tself.action_manager.add_keystroke(\"Vol-\", \"down\")\n\t\tself.isPlaying = True\n\t\tself.beatmap = None\n\t\tself.orbs = []\n\t\tself.image = pygame.image.load(f\"{self.fsm.ASSETS_DIR}longrectangle.png\").convert()\n\t\t\n\t\tself.score = 0\n\t\tself.score_font = pygame.font.Font(self.fsm.SYSFONT, 24)\n\t\tself.score_line = TextLine(str(self.score), self.score_font, (750, 50))\n\t\t\n\t\tself.orb_spd= 450\n\t\tself.countdown = self.fsm.FPS * 5\n\t\n\tdef enter(self, args):\n\t\t\n\t\tself.file = args[\"file_name\"]\n\t\tprint(f\"File name = {self.file}\")\n\t\tfile_path = f\"{self.fsm.TRACKS_DIR}{self.file}\"\n\t\twith open(file_path, 'r') as file:\n\t\t\treader = csv.reader(file)\n\t\t\tnext(reader)\n\t\t\tself.beatmap = [row for row in reader]\n\t\tlanes = 4\n\t\tself.positions = [i * 100 for i in range(1, lanes + 1)]\n\t\t\n\t\t############################################################################\n\t\t\n\t\tself.lane1 = pygame.image.load(f\"{self.fsm.ASSETS_DIR}red.png\").convert()\n\t\tself.lane2 = pygame.image.load(f\"{self.fsm.ASSETS_DIR}green.png\").convert()\n\t\tself.lane3 = pygame.image.load(f\"{self.fsm.ASSETS_DIR}yellow.png\").convert()\n\t\tself.lane4 = pygame.image.load(f\"{self.fsm.ASSETS_DIR}purple.png\").convert()\n\t\tself.laneIcons = [[(self.lane1, (self.positions[0], 490)), False], \\\n\t\t [(self.lane2, (self.positions[1], 490)), False], \\\n\t\t [(self.lane3, (self.positions[2], 490)), False], \\\n\t\t [(self.lane4, (self.positions[3], 490)), False]]\n\t\t\n\t\tself.action_manager.add_sp_keystroke('f', 'f')\n\t\tself.action_manager.add_sp_keystroke('g', 'g')\n\t\tself.action_manager.add_sp_keystroke('h', 'h')\n\t\tself.action_manager.add_sp_keystroke('j', 'j')\n\t\t\n\t\t############################################################################\n\t\t\n\t\treference_note = int(self.beatmap[0][2])\n\t\tlane = 0\n\t\t\n\t\tfor beat in self.beatmap:\n\t\t\tdiff = int(beat[2]) - reference_note\n\t\t\tlane = (lane + diff) % lanes\n\t\t\tx = self.positions[lane]\n\t\t\t\n\t\t\tend_time = float(beat[0])\n\t\t\tduration = float(beat[1])\n\t\t\ty = -(end_time) * self.orb_spd + 498 - self.orb_spd * 0.1\n\t\t\torb = OrbModel(x, y, duration, lane, end_time)\n\t\t\tself.orbs.append(orb)\n\t\t\treference_note = int(beat[2])\n\t\t\n\t\twav_file = self.file.rsplit('.', 1)[0]\n\t\tself.player = self.fsm.wav_files[wav_file]\n\t\tself.volume= int(get_config()[\"Default Volume\"][\"Value\"])\n\t\tprint(self.volume)\n\t\tself.player.audio_set_volume(self.volume)\n\t\tself.player.play()\n\t\n\tdef update(self, game_time, lag):\n\t\tactions = self.action_manager.chk_actions(pygame.event.get())\n\t\t\n\t\torbsONSCREEN = [orb for orb in self.orbs if orb.getTail() > 0]\n\t\t\n\t\tfor action in actions:\n\t\t\t\n\t\t\tif action == \"Exit\":\n\t\t\t\tself.fsm.ch_state(ExitState(self.fsm))\n\t\t\t\n\t\t\telif action == \"Back\":\n\t\t\t\tself.fsm.ch_state(MainMenuState(self.fsm))\n\t\t\t\n\t\t\telif action == \"Pause\":\n\t\t\t\tself.isPlaying = not self.isPlaying\n\t\t\t\tself.player.pause()\n\t\t\t\tif pygame.key.get_repeat() == 0:\n\t\t\t\t\tpygame.key.set_repeat(1)\n\t\t\t\telse:\n\t\t\t\t\tpygame.key.set_repeat(0)\n\t\t\t\n\t\t\telif action == \"Vol+\":\n\t\t\t\tself.volume += 1\n\t\t\t\tself.player.audio_set_volume(self.volume)\n\t\t\t\tprint(f\"Volume : {self.player.audio_get_volume()}\")\n\t\t\t\n\t\t\telif action == \"Vol-\":\n\t\t\t\tself.volume= max(0, self.volume-1)\n\t\t\t\tself.player.audio_set_volume(self.volume)\n\t\t\t\tprint(f\"Volume : {self.player.audio_get_volume()}\")\n\t\t\t\n\t\t\telif action == \"f (down)\":\n\t\t\t\tfor orb in orbsONSCREEN:\n\t\t\t\t\tif abs(orb.getTail() - 500) < 10 and orb.lane == 0:\n\t\t\t\t\t\tself.score += 1\n\t\t\t\t\t\tprint(\"Score += 1\")\n\t\t\t\tself.laneIcons[0][1] = True\n\t\t\t\n\t\t\telif action == \"f (up)\":\n\t\t\t\tfor orb in orbsONSCREEN:\n\t\t\t\t\tif orb.lane == 0:\n\t\t\t\t\t\tpenalty = round(0.1*max(orb.y - 500, 0))\n\t\t\t\t\t\tself.score -= penalty\n\t\t\t\t\t\tprint(f\"Penalty to score : -{penalty}\")\t\t\n\t\t\t\tself.laneIcons[0][1] = False\n\t\t\t\n\t\t\telif action == \"g (down)\":\n\t\t\t\tfor orb in orbsONSCREEN:\n\t\t\t\t\tif abs(orb.getTail() - 500) < 10 and orb.lane == 1:\n\t\t\t\t\t\tself.score += 1\n\t\t\t\t\t\tprint(\"Score += 1\")\n\t\t\t\tself.laneIcons[1][1] = True\n\t\t\t\n\t\t\telif action == \"g (up)\":\n\t\t\t\tfor orb in orbsONSCREEN:\n\t\t\t\t\tif orb.lane == 1:\n\t\t\t\t\t\tpenalty = round(0.1*max(orb.y - 500, 0))\n\t\t\t\t\t\tself.score -= penalty\n\t\t\t\t\t\tprint(f\"Penalty to score : -{penalty}\")\t\t\t\t\n\t\t\t\tself.laneIcons[1][1] = False\n\t\t\t\n\t\t\telif action == \"h (down)\":\n\t\t\t\tfor orb in orbsONSCREEN:\n\t\t\t\t\tif abs(orb.getTail() - 500) < 10 and orb.lane == 2:\n\t\t\t\t\t\tself.score += 1\n\t\t\t\t\t\tprint(\"Score += 1\")\n\t\t\t\tself.laneIcons[2][1] = True\n\t\t\t\n\t\t\telif action == \"h (up)\":\n\t\t\t\tfor orb in orbsONSCREEN:\n\t\t\t\t\tif orb.lane == 2:\n\t\t\t\t\t\tpenalty = round(0.1*max(orb.y - 500, 0))\n\t\t\t\t\t\tself.score -= penalty\n\t\t\t\t\t\tprint(f\"Penalty to score : -{penalty}\")\t\t\n\t\t\t\tself.laneIcons[2][1] = False\n\t\t\t\n\t\t\telif action == \"j (down)\":\n\t\t\t\tfor orb in orbsONSCREEN:\n\t\t\t\t\tif abs(orb.getTail() - 500) < 10 and orb.lane == 3:\n\t\t\t\t\t\tself.score += 1\n\t\t\t\t\t\tprint(\"Score += 1\")\n\t\t\t\tself.laneIcons[3][1] = True\n\t\t\t\n\t\t\telif action == \"j (up)\":\n\t\t\t\tfor orb in orbsONSCREEN:\n\t\t\t\t\tif orb.lane == 3:\n\t\t\t\t\t\tpenalty = round(0.1*max(orb.y - 500, 0))\n\t\t\t\t\t\tself.score -= penalty\n\t\t\t\t\t\tprint(f\"Penalty to score : -{penalty}\")\t\t\n\t\t\t\tself.laneIcons[3][1] = False\n\n\t\t\n\t\tif self.isPlaying: # pause handling\n\t\t\tfor i in self.orbs:\n\t\t\t\ti.y += self.orb_spd * (self.fsm.fps_clock.get_time() / 1000)\n\t\t\t\tif i.y > self.fsm.HEIGHT:\n\t\t\t\t\tself.orbs.remove(i)\n\t\t\n\t\tif len(self.orbs) == 0:\n\t\t\tself.countdown -= 1\n\t\t\tif self.countdown <= 0:\n\t\t\t\tprint(\"Track Completed!\")\n\t\t\t\tself.fsm.ch_state(GameOverState(self.fsm), {\"file_name\": self.file, \"score\": self.score})\n\t\t\n\t\tself.score_line = TextLine(str(self.score), self.score_font, (550, 50))\n\t\n\tdef exit(self):\n\t\tself.player.stop()\n\t\tpygame.key.set_repeat(0)\n\t\n\tdef draw(self):\n\t\tsuper().draw()\n\t\tself.score_line.draw(self.fsm.screen)\n\t\tpygame.draw.line(self.fsm.screen, rgb.GREY, (0,500), (800,500), 5)\n\t\t\n\t\tfor pos in self.positions:\n\t\t\tx = pos - 35\n\t\t\tpygame.draw.line(self.fsm.screen, rgb.GREEN, (x, 0), (x, self.fsm.HEIGHT), 5)\n\t\tpygame.draw.line(self.fsm.screen, rgb.GREEN, (self.positions[-1] + 60, 0), (self.positions[-1] + 60, self.fsm.HEIGHT), 5)\n\t\t\n\t\tfor i in self.orbs:\n\t\t\tself.fsm.screen.blit(self.image, (i.x, round(i.y + i.length * 0.1)), (0, 0, 30, round(i.length * 0.9)))\n\t\t\t\n\t\tfor args, boolean in self.laneIcons:\n\t\t\tif boolean:\n\t\t\t\tself.fsm.screen.blit(*args)\n\nclass GameOverState(BaseState):\n\tdef __init__(self, fsm):\n\t\tsuper().__init__(fsm)\n\t\tself.action_manager.add_button(\"Retry\", (200, 400), (50, 30))\n\t\tself.action_manager.add_button(\"Back to Main Menu\", (500, 400), (50, 30), ret=\"Main Menu\")\n\t\tself.action_manager.add_button(\"Back to Start\", (300, 400), (50, 30), ret=\"Start\")\n\t\tself.action_manager.add_keystroke(\"Exit\", \"escape\")\n\t\tself.high_scores= get_user_data()[\"Highscores\"]\n\t\tself.score_font = pygame.font.Font(self.fsm.SYSFONT, 24)\n\t\tself.high_score_text= TextLine(\"High Score achieved!\", self.score_font, (250, 300))\n\t\n\tdef enter(self, args):\n\t\tself.args = args\n\t\t\n\t\tself.score = args[\"score\"]\n\t\tself.track= args[\"file_name\"].rsplit('.', 1)[0]\n\t\t\n\t\tctr= self.fsm.WIDTH // 2\n\n\t\tself.score_line = TextLine(f\"Score : {self.score}\", self.score_font, (ctr, 150)).align_ctr()\n\n\t\tself.track_line= TextLine(self.track, self.score_font, (ctr, 50)).align_ctr()\n\n\t\tif self.high_scores[self.track] < self.score:\n\t\t\tprint(\"High Score achieved!\")\n\t\t\tself.isHighScore= True\n\t\t\tupdate_user_data((\"Highscores\", args[\"file_name\"].rsplit('.', 1)[0]), args[\"score\"])\n\t\telse:\n\t\t\tprint(\"High Score not achieved\")\n\t\t\tself.isHighScore= False\t\t\t\n\t\n\tdef update(self, game_time, lag):\n\t\tevents = pygame.event.get()\n\t\t\n\t\tactions = self.action_manager.chk_actions(events)\n\t\t\n\t\tfor action in actions:\n\t\t\tif action == \"Retry\":\n\t\t\t\tself.fsm.ch_state(PlayGameState(self.fsm), self.args)\n\t\t\t\n\t\t\telif action == \"Main Menu\":\n\t\t\t\tself.fsm.ch_state(MainMenuState(self.fsm))\n\t\t\t\n\t\t\telif action == \"Start\":\n\t\t\t\tself.fsm.ch_state(SelectTrackState(self.fsm))\n\t\t\t\n\t\t\telif action == \"Exit\":\n\t\t\t\tself.fsm.ch_state(ExitState(self.fsm))\n\t\n\tdef draw(self):\n\t\tsuper().draw()\n\t\tself.score_line.draw(self.fsm.screen)\n\t\tself.track_line.draw(self.fsm.screen)\n\t\tif self.isHighScore:\n\t\t\tself.high_score_text.draw(self.fsm.screen)\n\n\nclass ExitState(BaseState):\n\tdef __init__(self, fsm):\n\t\tsuper().__init__(fsm)\n\t\n\tdef enter(self, args):\n\t\tprint(\"Exiting program\")\n\t\tpygame.quit()\n\t\texit()\n\n\nif __name__ == \"__main__\":\n\t\n\tfsm = State_Manager()\n\tfsm.curr_state = MainMenuState(fsm)\n\twhile True:\n\t\tfsm.update()\n","sub_path":"Prototype 3/state_manager.py","file_name":"state_manager.py","file_ext":"py","file_size_in_byte":18883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519158674","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n \nimport roslib;\nimport rospy \nimport actionlib \nimport tf\nfrom std_msgs.msg import String\nfrom actionlib_msgs.msg import * \nfrom geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist \nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal \nfrom random import sample \nfrom math import pow, sqrt \n\nclass NavTest(): \n def __init__(self): \n rospy.init_node('robot2_navigation', anonymous=True) \n rospy.on_shutdown(self.shutdown) \n\n self.state=0\n self.plan = 0\n self.door_plan = 0\n self.open_state = 0\n\n self.tf_listener = tf.TransformListener()\n \n # 在每个目标位置暂停的时间 \n self.rest_time = rospy.get_param(\"~rest_time\", 0.01) \n\n\n self.sub = rospy.Subscriber('door_state', String, self.callback)\n\n # 到达目标的状态 \n goal_states = ['PENDING', 'ACTIVE', 'PREEMPTED', \n 'SUCCEEDED', 'ABORTED', 'REJECTED', \n 'PREEMPTING', 'RECALLING', 'RECALLED', \n 'LOST'] \n \n # 设置目标点的位置 \n # 如果想要获得某一点的坐标,在rviz中点击 2D Nav Goal 按键,然后单机地图中一点 \n # 在终端中就会看到坐标信息 \n self.locations = dict() \n\n self.locations['p1'] = Pose(Point(6.000, 0.000, 0.000), Quaternion(0.000, 0.000, 0.707, 0.707)) \n self.locations['p4'] = Pose(Point(5.000,3.000, 0.000), Quaternion(0.000, 0.000, 1.000, 0.000)) \n self.locations['p2'] = Pose(Point(3.500, 1.000, 0.000), Quaternion(0.000, 0.000, -0.707, 0.707)) \n self.locations['p3'] = Pose(Point(5.000, -3.5000, 0.000), Quaternion(0.000, 0.000, 0.000, 1.000)) \n \n \n # 发布控制机器人的消息 \n self.cmd_vel_pub = rospy.Publisher('/robot2/cmd_vel', Twist, queue_size=5) \n \n # 订阅move_base服务器的消息 \n self.move_base = actionlib.SimpleActionClient(\"/robot2/move_base\", MoveBaseAction) \n\n rospy.loginfo(\"Waiting for move_base action server...\") \n\n # 60s等待时间限制 \n self.move_base.wait_for_server(rospy.Duration(60)) \n rospy.loginfo(\"Connected to move base server\") \n\n # 保存机器人的在rviz中的初始位置 \n initial_pose = PoseWithCovarianceStamped() \n\n # 保存成功率、运行时间、和距离的变量 \n self.n_locations = len(self.locations) \n self.n_goals = 0 \n self.n_successes = 0 \n self.i = self.n_locations \n \n start_time = rospy.Time.now() \n running_time = 0 \n self.location = \"\" \n self.last_location = \"\" \n\n\n # 确保有初始位置 \n while initial_pose.header.stamp == \"\": \n rospy.sleep(1) \n\n rospy.loginfo(\"Starting navigation test\") \n\n \n\n\n def update_initial_pose(self, initial_pose): \n self.initial_pose = initial_pose \n\n def shutdown(self): \n rospy.loginfo(\"Stopping the robot...\") \n self.move_base.cancel_goal() \n rospy.sleep(2) \n self.cmd_vel_pub.publish(Twist()) \n rospy.sleep(1) \n\n def circle(self):\n \n # 如果已经走完了所有点,再重新开始排序 \n if self.i == self.n_locations: \n self.i = 0 \n #sequence = sample(locations, n_locations) \n #print(sequence)\n self.sequence=list(self.locations)\n print(self.sequence)\n # 如果最后一个点和第一个点相同,则跳过 \n if self.sequence[0] == self.last_location: \n self.i = 1 \n\n # 在当前的排序中获取下一个目标点 \n location = self.sequence[self.i] \n\n\n \n \n\n # 设定下一个目标点 \n self.goal = MoveBaseGoal() \n self.goal.target_pose.pose = self.locations[location] \n self.goal.target_pose.header.frame_id = '/map' \n self.goal.target_pose.header.stamp = rospy.Time.now() \n\n \n \n \n if self.plan == 0 :\n # 向下一个位置进发 \n self.move_base.send_goal(self.goal)\n # 让用户知��下一个位置 \n rospy.loginfo(\"Going to: \" + str(location)) \n\n self.plan=1 \n\n\n\n \n state = self.move_base.get_state() \n if state == GoalStatus.SUCCEEDED: \n rospy.loginfo(\"Goal succeeded!\") \n self.n_successes += 1 \n self.plan=0 \n self.i += 1 \n self.n_goals += 1 \n\n rospy.loginfo(\"State:\" + str(state)) \n\n def open_door(self):\n goal_pose=Pose(Point(8.000, 3.000, 0.000), Quaternion(0.000, 0.000, -0.707, 0.707)) \n\n self.goal = MoveBaseGoal() \n self.goal.target_pose.pose = goal_pose \n self.goal.target_pose.header.frame_id = '/map' \n self.goal.target_pose.header.stamp = rospy.Time.now()\n\n \n\n if self.open_state == 0 and self.door_plan == 0:\n self.move_base.send_goal(self.goal) \n self.door_plan=1\n rospy.loginfo(\"plan door\")\n # 五分钟时间限制 \n \n state = self.move_base.get_state() \n if state == GoalStatus.SUCCEEDED: \n \n \n self.open_state=1\n self.door_plan=0\n rospy.loginfo(\"open door\")\n\n def callback(self,data):\n\n if data.data == 'open':\n self.state=1\n \n \n elif data.data == '2':\n self.state=0\n self.open_state=0\n \n\n def get_pos(self): \n try: \n (trans, rot) = self.tf_listener.lookupTransform('/map', '/robot2/base_link', rospy.Time(0)) \n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): \n rospy.loginfo(\"tf Error\") \n return None \n \n \n \n x = trans[0] \n y = trans[1] \n \n return (x, y)\n \n\ndef trunc(f, n): \n slen = len('%.*f' % (n, f)) \n\n return float(str(f)[:slen]) \n\n\n\n\nif __name__ == '__main__': \n try: \n robot1=NavTest() \n while not rospy.is_shutdown():\n if robot1.state == 1:\n robot1.open_door()\n else:\n robot1.circle()\n\n \n \n rospy.spin() \n\n except rospy.ROSInterruptException: \n rospy.loginfo(\"Random navigation finished.\")\n","sub_path":"simulation/gazebo/pioneer3at_navigation/scripts/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"500236041","text":"# Counting DNA Nucleotides\n# http://rosalind.info/problems/dna/\n\nwith open('rosalind_dna.txt','r') as open_file:\n\tline = open_file.readline()\nprint(line)\nA, C, G, T = 0, 0, 0, 0\n\nfor i in line:\n\tif i=='A': A+=1\n\telif i=='C': C+=1\n\telif i=='G': G+=1\n\telif i=='T': T+=1\nprint(A,C,G,T)\n","sub_path":"rosalind.info/DNA.py","file_name":"DNA.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155147607","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# patch.py\n# \n# Copyright 2014 \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \nimport cv2\n\ncap = cv2.VideoCapture(-1)\n\nif(not cap.isOpened()):\n print(\"Cannot open camera\")\nelse:\n cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)\n cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)\n \n spotFilter = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\n maskMorph = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) \n\n lowH = 20\n highH = 59;\n\n lowS = 150\n highS = 255\n\n lowV = 0\n highV = 120\n\n while(1):\n frameCount = 0\n while frameCount < 5:\n cap.read()\n frameCount = frameCount + 1\n success, frame = cap.read()\n if (not success):\n print(\"Cannot read a frame from the camera\")\n else:\n imgHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n \n mask = cv2.inRange(imgHSV, (lowH, lowS, lowV), (highH, highS, highV))\n\n # Remove spots in image \n mask = cv2.erode(mask, spotFilter)\n # Create mask\n mask = cv2.dilate(mask, maskMorph)\n \n # Find the contours in the mask\n contours, hierarchy = cv2.findContours(mask, 1, 2)\n\n # Find the contour with the greatest area\n area = 0.0\n contour = None\n for candidateContour in contours:\n candidateArea = cv2.contourArea(candidateContour)\n if candidateArea > area:\n area = candidateArea\n contour = candidateContour\n\n # Get the bounding rectangle for the contour\n x = y = w = h = 0\n if len(contours) > 0:\n x, y, w, h = cv2.boundingRect(contour)\n\n cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)\n cv2.imshow(\"Patch area\", frame)\n if(cv2.waitKey(1) == 27):\n break\n cap.release()\ncv2.destroyAllWindows()\n\n","sub_path":"src/gz_piter/Python/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"464141983","text":"from django.core.management.base import NoArgsCommand, CommandError\nfrom sentry.models import Message, GroupedMessage\n\n\nclass Command(NoArgsCommand):\n help = \"WARNING: This completely wipes out all Sentry data.\"\n\n def handle_noargs(self, **options):\n grouped_messages = GroupedMessage.objects.all()\n messages = Message.objects.all()\n\n # Count these... for output\n num_grouped = grouped_messages.count()\n num_messages = messages.count()\n\n if num_grouped == 0 and num_messages == 0:\n raise CommandError(\"There's nothing to delete!\")\n\n # Delete\n grouped_messages.delete()\n messages.delete()\n\n # Confirmation\n w = self.stdout.write\n w(\"\\n- Deleted {0} GroupedMessages\".format(num_grouped))\n w(\"\\n- Deleted {0} Messages\\n\\n\".format(num_messages))\n","sub_path":"sentry/management/commands/nuke_sentry.py","file_name":"nuke_sentry.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"105846654","text":"from torch.utils.data.dataloader import DataLoader\n\nfrom data.datasets.voc import VOCDataset\nfrom data.transforms.yolo import YOLOTransform\nfrom models.detect_model import DetectModel\nfrom utils import image_util\n\n\ndef main():\n model = DetectModel()\n voc_ds = VOCDataset(\"D:\\\\Datasets\", \"2007\")\n yolo_trans = YOLOTransform(voc_ds.classes_list())\n val_ds = voc_ds.build_dataset(\"val\", image_transform=yolo_trans.image_transform, target_transform=yolo_trans.target_transform)\n val_dl = DataLoader(val_ds, batch_size=1, shuffle=True, num_workers=0)\n for images, labels in val_dl:\n predicts = model(images)\n image = image_util.tensor_to_image(images[0])\n label = labels[0].numpy()\n predict = predicts[0].detach().numpy()\n to_show = image.copy()\n image_util.draw_label(to_show, label)\n image_util.imshow(to_show)\n to_show = image.copy()\n image_util.draw_predict(to_show, predict, threshold=0)\n image_util.imshow(to_show)\n break\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"85538611","text":"DEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'template1',\n 'USER': 'DOTCLOUD_DB_SQL_LOGIN',\n 'PASSWORD': 'DOTCLOUD_DB_SQL_PASSWORD',\n 'HOST': 'DOTCLOUD_DB_SQL_HOST',\n 'PORT': int('12345'),\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n","sub_path":"src/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602189941","text":"from rest_framework import serializers\nfrom django.contrib.auth import get_user_model\nfrom rest_auth.registration.serializers import RegisterSerializer\nimport re\nfrom .models import Image\n\n\nclass MyCustomRegisterSerializer(RegisterSerializer):\n MALE = 'M'\n FEMALE = 'F'\n GENDER_CHOICES = (\n (MALE, 'Male'),\n (FEMALE, 'Female'),\n )\n\n BASIC = 'Basic'\n VIP = 'VIP'\n PREMIUM = 'Premium'\n SUBSCRIPTION_CHOICES =(\n (BASIC, 'Basic'),\n (VIP, 'VIP'),\n (PREMIUM, 'Premium'),\n )\n user_nickname = serializers.CharField(\n required=False,\n max_length=30,\n )\n gender = serializers.ChoiceField(\n choices=GENDER_CHOICES,\n )\n subscription = serializers.ChoiceField(\n choices=SUBSCRIPTION_CHOICES,\n default=BASIC,\n )\n premium_distance = serializers.IntegerField(allow_null=True)\n profile_image = serializers.ImageField(\n max_length=None,\n required=True,\n use_url=True\n )\n additional_image = serializers.ImageField(\n max_length=None,\n required=False,\n use_url=True\n )\n\n def get_cleaned_data(self):\n data_dict = super().get_cleaned_data()\n data_dict['user_nickname'] = self.validated_data.get('user_nickname', '')\n data_dict['gender'] = self.validated_data.get('gender', '')\n data_dict['subscription'] = self.validated_data.get('subscription', '')\n data_dict['premium_distance'] = self.validated_data.get('premium_distance', '')\n data_dict['profile_image'] = self.validated_data.get('profile_image', '')\n data_dict['additional_image'] = self.validated_data.get('additional_image', '')\n return data_dict\n\n\nclass ImageSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Image\n fields = ('id', 'profile_image', 'additional_image')\n\n\nclass MyUserListSerializer(serializers.ModelSerializer):\n images = ImageSerializer(read_only=True, many=True)\n\n class Meta:\n model = get_user_model()\n fields = ('url', 'username', 'images', 'email', 'user_nickname', 'match', 'latitude', 'longitude')\n\n\nclass MyUserDetailSerializer(serializers.ModelSerializer):\n images = ImageSerializer(read_only=True, many=True)\n like = serializers.BooleanField(default=False, write_only=True)\n\n class Meta:\n model = get_user_model()\n fields = ('username', 'images', 'email', 'user_nickname', 'my_likes', 'whom_I_liked',\n 'match', 'conversation', 'like')\n extra_kwargs = {\n 'username': {'read_only': True},\n 'email': {'read_only': True},\n 'my_likes': {'read_only': True},\n 'whom_I_liked': {'read_only': True},\n 'match': {'read_only': True},\n 'conversation': {'read_only': True},\n 'user_nickname': {'read_only': True}\n }\n\n # Method is overridden to hide field conversation and its link in case there is no match between users.\n def __init__(self, *args, **kwargs):\n try:\n if len(args) != 0:\n if kwargs['context']['request'].user.id not in args[0].match:\n del self.fields['conversation']\n else:\n if kwargs['context']['request'].user.id not in kwargs['instance'].match:\n del self.fields['conversation']\n except AttributeError:\n pass\n super().__init__(*args, **kwargs)\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n images = ImageSerializer(read_only=True, many=True)\n profile_image = serializers.ImageField(max_length=None, required=False, write_only=True)\n additional_image = serializers.ImageField(max_length=None, required=False, write_only=True)\n\n class Meta:\n model = get_user_model()\n fields = ('id', 'url', 'username', 'images', 'email', 'user_nickname', 'gender', 'subscription',\n 'premium_distance', 'my_likes', 'whom_I_liked', 'match', 'latitude', 'longitude',\n 'profile_image', 'additional_image')\n extra_kwargs = {\n 'my_likes': {'read_only': True},\n 'whom_I_liked': {'read_only': True},\n 'match': {'read_only': True},\n }\n\n # Check that latitude and longitude are valid coordinates (if match 'NN.NNNNN' pattern). And there\n # we clean coordinates from users mistakes (for input 'fgdfgNN.NNNNN' will return 'NN.NNNNN')\n def validate(self, data):\n latitude = data['latitude']\n longitude = data['longitude']\n pattern = r'[0-9]{2}\\.[0-9]{5,9}'\n cleaned_latitude = re.findall(pattern, latitude)\n cleaned_longitude = re.findall(pattern, longitude)\n if len(cleaned_longitude) == 0 or len(cleaned_latitude) == 0:\n raise serializers.ValidationError(\"invalid coordinates\")\n else:\n data['latitude'] = cleaned_latitude[0]\n data['longitude'] = cleaned_longitude[0]\n\n subscription = data['subscription']\n premium_distance= data['premium_distance']\n if subscription == 'Premium' and premium_distance == None:\n raise serializers.ValidationError(\"invalid premium distance\")\n return data\n","sub_path":"social/profiles/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"483298003","text":"# encoding=utf-8\n# 初始化站点和网段以及起始入口点\nfrom backend.db.simulate.prepare_data import site_data, netseg_data\nfrom backend.db.simulate.step1.step_config import operation\nfrom time import sleep\n\nsites = site_data\noperation.set_simulate_data(sites).catch_sites()\nsleep(1)\n\nsite_net = netseg_data\noperation.set_simulate_data(netseg_data).catch_netsegs()\n\nentry_host = [\n {u'网段': u'192.168.1.0/24', u'操作系统': u'Windows 7', u'Attacked': 0,\n u'IP': u'192.168.1.50', u'端口': u'tcp:80', u'业务类型': None,\n u'Mac': u'48-11-5F-82-AC-EF', u'进程': u'notepad.exe',\n u'目标类型': u'主机', u'KeyFlag': 0, u'SiteNet': u'192.168.10.0',\n u'入口点': 1}\n]\noperation.set_simulate_data(entry_host).catch_hosts()\n","sub_path":"backend/db/simulate/step1/0.py","file_name":"0.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530395157","text":"from django.urls import path, include\n\nfrom . import views\n\n# Cart Urls\nurlpatterns = [\n path('', views.Homepage.as_view(), name=\"homepage\"),\n path('cart/', views.ListCart.as_view(), name='list-carts'),\n path('cart//', views.DetailCart.as_view(), name='detail-cart'),\n path('cart/order/', views.OrderCart.as_view(), name='order-cart'),\n path('cart//update/', views.Updatecart.as_view(), name='update-cart'),\n path('cart//delete/', views.DeleteCart.as_view(), name='delete-cart'),\n path('cart/create/', views.CreateNewCart.as_view(), name='create-new-cart')\n]\n\n\n\n","sub_path":"skipship/cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544870246","text":"import bpy, sys, os\n\nmodel = sys.argv[4]\nhmap = sys.argv[5]\n\nw = int(sys.argv[6])\nh = int(sys.argv[7])\n\nhm = float(sys.argv[8])\n\n# Delete all old objects, so we start with a clean slate.\nscn = bpy.context.scene\nfor ob in scn.objects:\n scn.objects.active = ob\n print(\"Delete\", ob, bpy.context.object)\n bpy.ops.object.mode_set(mode='OBJECT')\n scn.objects.unlink(ob)\n del ob\n\nbpy.ops.mesh.primitive_plane_add()\nbpy.ops.transform.resize(value=(w/2,-h/2,1))\nbpy.ops.object.mode_set(mode='EDIT')\nbpy.ops.mesh.subdivide(number_cuts=max(w, h))\nbpy.ops.object.mode_set(mode='OBJECT')\n\ntex = bpy.data.textures.new('GroundTex', 'IMAGE')\ntex.image = bpy.data.images.load(hmap)\n#tex.image.colorspace_settings = 'RAW'\n\nbpy.ops.object.modifier_add(type='DISPLACE')\nbpy.data.objects['Plane'].modifiers['Displace'].mid_level = 0\nbpy.data.objects['Plane'].modifiers['Displace'].texture = tex\nbpy.data.objects['Plane'].modifiers['Displace'].strength = hm\nbpy.ops.object.modifier_apply(modifier='Displace')\n\nbpy.ops.object.modifier_add(type='DECIMATE')\nbpy.data.objects['Plane'].modifiers['Decimate'].decimate_type = 'DISSOLVE'\nbpy.ops.object.modifier_apply(modifier='Decimate')\n\nbpy.ops.object.shade_smooth()\n\nbpy.ops.wm.collada_import(filepath=model, import_units=True)\n\n#bpy.ops.wm.collada_export(filepath='toto.dae', check_existing=False)\noutfile = os.path.splitext(model)[0] + '.blend'\nbpy.ops.wm.save_as_mainfile(filepath=outfile, check_existing=False)\n\nbpy.ops.wm.quit_blender()\n","sub_path":"layout2dae/gen_terrain.py","file_name":"gen_terrain.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529461050","text":"#This program calculates a user's current age in days\n\n#CONSTANTS\nMONTHS = 0\nDAYS = 1\nYEARS = 2\n\n\ndef calculate_day_of_year(m, d):\n months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n days = d\n if m == 0:\n return days\n for i in range(m):\n days += months[i]\n return days\n\n\ndef get_a_date():\n\n date = input(\"Please enter the date in the form mm/dd/yyyy\")\n date_list = date.split('/')\n for i in range(len(date_list)):\n# print(date_list[i])\n date_list[i] = int(date_list[i])\n days = calculate_day_of_year (date_list[MONTHS], date_list[DAYS])\n day = [days, date_list[YEARS]]\n return day\n\n\ndef calculate_age(birthday, today):\n days = 365 * (today[1] - birthday[1]) + today[0] - birthday[0]\n return days\n\n\n\nif __name__ == \"__main__\":\n print(\"This program calculates your current age in days\")\n print(\"Enter today's date and then your birthday and you'll see the answer\")\n today = get_a_date()\n birthday = get_a_date()\n answer = calculate_age(birthday, today)\n print (\"Your age in days is \", answer)\n","sub_path":"Spring_2021_code_samples/March_10_scope.py","file_name":"March_10_scope.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170238373","text":"cases = int(input())\n\ndef mymod(a, n):\n k = a % n\n return k if k != 0 else n\n\ndef paired(n, d, a):\n base = [d,a]\n for i in range(1,n+1):\n if i == d or i == a:\n continue\n base.append(i)\n mat = [[base[(i - j) % n] for i in range(n)] for j in range(n)]\n\n mat[n-3][n-2] = base[n-1]\n mat[n-3][0] = a\n for i in range(1,n-3):\n mat[n-3][i] = base[i+2]\n mat[n-1][n-1] = base[1]\n mat[n-2][n-2] = base[1]\n mat[n-1][n-2] = base[0]\n mat[n-2][n-1] = base[0]\n for i in range(0, n-2):\n low = n-2 + (i%2)\n hi = n-1 - (i%2)\n mat[low][i] = base[i+1] if i > 0 else base[2]\n mat[hi][i] = base[i+3] if i < n-3 else base[n-1]\n return mat\n\ndef unpaired(n, d, a, b):\n base = [d,a]\n for i in range(1,n+1):\n if i == d or i == a or i == b:\n continue\n base.append(i)\n base.append(b)\n mat = [[base[(i - j) % n] for i in range(n)] for j in range(n)]\n for i in range(n):\n mat[i][n-2], mat[i][n-1] = mat[i][n-1], mat[i][n-2]\n return mat\n\nfor case in range(1, cases + 1):\n n, k = map(int, input().split())\n ok = True\n mat = []\n if k < n or k > n*n or k == n+1 or k == n*n - 1:\n ok = False\n elif k % n == 0:\n diag = k // n\n mat = [[mymod(diag + j - i, n) for j in range(n)] for i in range(n)]\n elif n == 3:\n ok = False\n else:\n # find a partition of form dd...daa or dd...dab\n for i in range(1, n + 1):\n delta = k - i*(n-2)\n if delta >= 2 and delta < 2*n:\n d = i\n a = delta//2\n b = delta - a\n if a == d or b == d:\n # a=1,d=1 is either n or n+1, already handled\n # a=1,b=2,d=2 will be treated as 11...1xy\n a -= 1\n # b=n,d=n is either n*n or n*n-1, handled\n # b=n,a=n-1,d=n-1 has to be n, so continue\n if b == n:\n continue\n b += 1\n break\n if a == b:\n mat = paired(n, d, a)\n else:\n mat = unpaired(n, d, a, b)\n if ok:\n print(\"Case #{}: POSSIBLE\".format(case))\n for r in mat:\n print(' '.join(str(x) for x in r))\n else:\n print(\"Case #{}: IMPOSSIBLE\".format(case))\n","sub_path":"python_works/codejam2020/indicium.py","file_name":"indicium.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154837888","text":"#@Author: Tharyck Vasconcelos\n\n#Copie o seu log do dirlididi, garantindo a data e o horario desejado. \n#(CUIDADO: o dirlididi trabalha em GMT 0, lembre de sempre add 3 hrs a mais)\n#crie um arquivo chamado log.txt (de preferencia no sublime), cole o seu log e salve no mesmo diretorio que esse script.\n#Rode o sript.\n#Sera gerado um arquivo de saida chamado notas.txt, que contem email do aluno e a sua respectiva nota.\n#me pague uma cerveja por ter economizado horas corrigindo um lab!\n\n\n#Se encontrar algum erro, ou quiser editar o codigo, fique avontade!\n\nimport collections\nref_arquivo = open(\"log.txt\",\"r\")\nlinha = ref_arquivo.readline()\nturma = []\nfinal = []\n\n#Array com o key das questoes\nquestoes = []\n\n#pega a quantidade de questoes e os keys de cada\nfor i in range(int(raw_input(\"Informe a quantidade de questoes: \"))):\n\tquestoes.append(raw_input(\"Informe o Key da \" + str(i + 1) + \" Questao: \"))\n\n\n#filtra as questoes que passaram\nwhile linha:\n\t#pega cada linha do arquivo e separa\n valores = linha.split()\n\n #pega apenas as questoes que passaram\n if('true' in linha):\n #testa se a questao que passou e questao do lab\n if(valores[2] in questoes):\n novalinha = valores[1] + ' ' + valores[2]\n #testa se a questao passou mais de uma vez\n if(novalinha not in turma):\n turma.append(novalinha)\n linha = ref_arquivo.readline()\n\n#conta a quantidade de questoes que cada aluno acertou e transforma para uma \"nota\"\nfor i in range(len(turma)):\n contador = 0\n valores = turma[i].split()\n for j in range(len(turma)):\n valor = turma[j].split()\n if(valores[0] == valor[0]):\n contador+=1\n\n\t#Nota\n nota = (10.0/len(questoes))*contador\n\n #alunonota = email do aluno + nota do lab\n alunoNota = valores[0] + ' ' + str(nota)\n if(alunoNota not in final):\n final.append(alunoNota)\n\n#escreve no arquivo de saida notas.txt\nref_arquivo = open(\"notas.txt\", \"w\")\n#ordena a turma antes de escreve na saida\nturma = sorted(final)\n#escreve na saida\nfor i in range(len(turma)):\n ref_arquivo.writelines(turma[i] + '\\n')\n\n#fehca o arquivo notas.txt\nref_arquivo.close()","sub_path":"dirlididi_script.py","file_name":"dirlididi_script.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"296253749","text":"# coding: utf-8\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport logging\nfrom collections import namedtuple\nimport random\nfrom PIL import Image\n\nfrom zSAC.Trainer_SAC import Model\n\nfrom nes_py.wrappers import JoypadSpace\nimport gym_super_mario_bros\nfrom gym_super_mario_bros.actions import SIMPLE_MOVEMENT\n\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\n\nSeg = namedtuple(\"Seg\", [\"s\", \"a\", \"a_logits\", \"prev_a\", \"r\", \"s1\", \"gaes\", \"ret\", \"v_cur\", \"v_tar\", \"c_in\", \"h_in\"])\n\n\nclass Env(object):\n def __init__(self, act_space, act_repeats, frames, game):\n self.act_space = act_space\n self.act_repeats = act_repeats\n self.act_repeat = random.choice(self.act_repeats)\n self.frames = frames\n\n self.max_pos = -10000\n\n self.count = 0\n\n env = gym_super_mario_bros.make(game)\n self.env = JoypadSpace(env, SIMPLE_MOVEMENT)\n\n s_t = self.resize_image(self.env.reset())\n\n self.s_t = np.tile(s_t, [1, 1, frames])\n self.s_t1 = None\n self.s = [self.s_t]\n\n self.a_t = random.randint(0, act_space - 1)\n self.a = []\n self.a_logits = []\n self.a0 = []\n self.r = []\n self.pos = []\n\n self.v_cur = []\n\n self.c_in = []\n self.h_in = []\n self.c_in_t = np.zeros(128 * 4, dtype=np.float32)\n self.h_in_t = np.zeros(128 * 4, dtype=np.float32)\n\n self.done = False\n\n def step(self, a, c_in, h_in):\n prev_a = self.a_t\n self.count += 1\n if self.count % self.act_repeat == 0:\n self.a_t = a\n self.count = 0\n self.act_repeat = random.choice(self.act_repeats)\n gs_t1, gr_t, gdone, ginfo = self.env.step(self.a_t)\n self.env.render()\n if not gdone:\n s_t1, r_t, done, info = self.env.step(self.a_t)\n r_t += gr_t\n r_t /= 2.\n else:\n s_t1 = gs_t1\n r_t = gr_t\n done = gdone\n info = ginfo\n r_t /= 15.\n s_t1 = self.resize_image(s_t1)\n channels = s_t1.shape[-1]\n self.s_t1 = np.concatenate([s_t1, self.s_t[:, :, :-channels]], axis=-1)\n\n self.s.append(self.s_t1)\n self.a.append(self.a_t)\n self.a0.append(prev_a)\n self.r.append(r_t)\n self.max_pos = max(self.max_pos, info[\"x_pos\"])\n self.pos.append(info[\"x_pos\"])\n if (len(self.pos) > 500) and (\n info[\"x_pos\"] - self.pos[-500] < 5) and (\n self.pos[-500] - info[\"x_pos\"] < 5):\n done = True\n self.done = done\n\n self.c_in.append(self.c_in_t)\n self.h_in.append(self.h_in_t)\n self.c_in_t = c_in\n self.h_in_t = h_in\n\n self.s_t = self.s_t1\n\n def update_v(self, v_cur):\n self.v_cur.append(v_cur)\n\n def reset(self, force=False):\n if self.done or force:\n self.count = 0\n self.act_repeat = random.choice(self.act_repeats)\n\n s_t = self.resize_image(self.env.reset())\n\n self.s_t = np.tile(s_t, [1, 1, self.frames])\n self.s_t1 = None\n self.s = [self.s_t]\n\n self.a_t = random.randint(0, self.act_space - 1)\n self.a = []\n self.a_logits = []\n self.a0 = []\n self.r = []\n self.pos = []\n\n self.v_cur = []\n\n self.c_in = []\n self.h_in = []\n self.c_in_t = np.zeros(128 * 4, dtype=np.float32)\n self.h_in_t = np.zeros(128 * 4, dtype=np.float32)\n\n self.done = False\n\n def get_state(self):\n return self.s_t\n\n def get_act(self):\n return self.a_t\n\n def get_max_pos(self):\n return self.max_pos\n\n def reset_max_pos(self):\n self.max_pos = -10000\n\n def get_c_in(self):\n return self.c_in_t\n\n def get_h_in(self):\n return self.h_in_t\n\n def get_history(self, force=False):\n if self.done or force:\n s = self.s[:-1]\n s1 = self.s[1:]\n a = self.a\n a0 = self.a0\n r = self.r\n seg = Seg(s, a, a0, r, s1, self.c_in, self.h_in)\n return seg\n return None\n\n @staticmethod\n def resize_image(image, size=84):\n image = Image.fromarray(image)\n image = image.convert(\"L\")\n image = image.resize((size, size))\n image = np.array(image)\n image = image / 255.\n image = np.array(image, np.float32)\n return image[:, :, None]\n\n\ndef run():\n CKPT_DIR = \"ckpt/sac0\"\n\n frames = 1\n action_repeats = [1]\n MAX_STEPS = 320000\n CLIP = 1.0\n\n sess = tf.Session()\n\n phs = dict()\n\n phs[\"s\"] = tf.placeholder(dtype=tf.float32, shape=[None, None, 84, 84, frames])\n phs[\"prev_a\"] = tf.placeholder(dtype=tf.int32, shape=[None, None])\n phs[\"a\"] = tf.placeholder(dtype=tf.int32, shape=[None, None])\n phs[\"r\"] = tf.placeholder(dtype=tf.float32, shape=[None, None])\n phs[\"bootstrap_s\"] = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, frames])\n phs[\"c_in\"] = tf.placeholder(dtype=tf.float32, shape=[None, 128 * 4])\n phs[\"h_in\"] = tf.placeholder(dtype=tf.float32, shape=[None, 128 * 4])\n phs[\"slots\"] = tf.placeholder(dtype=tf.float32, shape=[None, None])\n phs[\"bootstrap_slots\"] = tf.placeholder(dtype=tf.float32, shape=[None])\n\n with tf.variable_scope(\"p_lstm\", reuse=tf.AUTO_REUSE):\n plstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n with tf.variable_scope(\"p_lstm\", reuse=tf.AUTO_REUSE):\n qlstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n with tf.variable_scope(\"v_lstm\", reuse=tf.AUTO_REUSE):\n vlstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n with tf.variable_scope(\"v_tar_lstm\", reuse=tf.AUTO_REUSE):\n vtarlstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n model = Model(7, plstm, qlstm, vlstm, vtarlstm, \"agent\", **phs)\n\n saver = tf.train.Saver(max_to_keep=None, keep_checkpoint_every_n_hours=6)\n\n ckpt = tf.train.get_checkpoint_state(CKPT_DIR)\n saver.restore(sess, os.path.join(CKPT_DIR, ckpt.model_checkpoint_path.split(\"/\")[-1]))\n\n envs = []\n games = [\"SuperMarioBros-1-1-v0\",\n \"SuperMarioBros-2-1-v0\",\n \"SuperMarioBros-4-1-v0\",\n \"SuperMarioBros-5-1-v0\"]\n # games = [\"SuperMarioBros-2-3-v0\",\n # \"SuperMarioBros-5-2-v0\",\n # \"SuperMarioBros-7-1-v0\",\n # \"SuperMarioBros-7-3-v0\",\n # \"SuperMarioBros-8-1-v0\",\n # \"SuperMarioBros-8-2-v0\",\n # \"SuperMarioBros-8-3-v0\"]\n for i in range(len(games)):\n env = Env(7, action_repeats, frames, games[i])\n envs.append(env)\n\n while True:\n for i in range(MAX_STEPS):\n _s_t_batch = [env.get_state()[None, :, :, :] for env in envs]\n _a_t_batch = [[env.get_act()] for env in envs]\n _c_in_batch = [env.get_c_in() for env in envs]\n _h_in_batch = [env.get_h_in() for env in envs]\n\n _a_t_new, _a_t_logits, _c_out_batch, _h_out_batch = sess.run(\n [model.get_current_act(),\n model.get_current_act_logits(),\n model.c_out,\n model.h_out],\n feed_dict={model.s_t: _s_t_batch,\n model.previous_actions: _a_t_batch,\n model.c_in: _c_in_batch,\n model.h_in: _h_in_batch})\n if random.random() > 0.5:\n _a_t_new = np.argmax(_a_t_logits, axis=-1)\n\n [env.step(\n _a_t_new[i][0],\n _c_out_batch[i],\n _h_out_batch[i]\n ) for (i, env) in enumerate(envs)]\n\n force = False\n if i == MAX_STEPS - 1:\n force = True\n\n [env.reset(force) for env in envs]\n\n\nif __name__ == '__main__':\n run()\n pass\n","sub_path":"zSAC/Test_SAC.py","file_name":"Test_SAC.py","file_ext":"py","file_size_in_byte":8115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455716019","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom enum import Enum\nfrom random import randint\n\ngame_info = '1.Rock\\t2.Scissors\\t3.Paper\\nChoose a variant: '\n\n\nclass GameChoice(Enum):\n Rock = 1\n Scissors = 2\n Paper = 3\n\n def win(self, other):\n if self.__class__ is other.__class__:\n exp1 = (self == GameChoice.Paper and other == GameChoice.Rock)\n exp2 = (self == GameChoice.Rock and other == GameChoice.Scissors)\n exp3 = (self == GameChoice.Scissors and other == GameChoice.Paper)\n return exp1 or exp2 or exp3\n return NotImplemented\n\n\ndef choose(num):\n return GameChoice(int(num))\n\n\ndef invalid_input(i):\n return not (int(i) in range(1, 4))\n\n\ndef user_choice():\n i = input(game_info)\n while invalid_input(i):\n print('Choose number from range(1, 3): ')\n i = input(game_info)\n return choose(i)\n\n\ndef bot_choice():\n return choose(randint(1, 3))\n # return choose(1)\n\n\ndef game(user_step, bot_step):\n if user_step == bot_step:\n print(f'\\nyou = {user_step.name}, bot = {bot_step.name}')\n print('\\nTied, try again')\n game(user_choice(), bot_choice())\n else:\n print(f'\\nyou = {user_step.name}, bot = {bot_step.name}')\n user_win = user_step.win(bot_step)\n if user_win:\n print('\\nYou win')\n else:\n print('\\nYou loose')\n\n\nif __name__ == '__main__':\n bot = bot_choice()\n user = user_choice()\n game(user, bot)\n","sub_path":"5_3.py","file_name":"5_3.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"299980309","text":"import functools\nimport time\nimport urllib\n\nimport xbmc\nimport xbmcgui\nimport xbmcplugin\n\nfrom resources.lib import metadata, movies\nfrom resources.lib.apis import tmdb, trakt\nfrom resources.lib.router import router\n\n\n\n@router.route('/tv/trakt/popular')\ndef tv_popular(page=1):\n for show in metadata.trakt_shows(page=page):\n li = xbmcgui.ListItem(show['title'])\n xbmcplugin.addDirectoryItem(router.handle,\n '',\n li,\n False)\n xbmcplugin.addDirectoryItem(router.handle,\n router.build_url(tv_popular,\n page=int(page)+1),\n xbmcgui.ListItem('Next'),\n True)\n xbmcplugin.endOfDirectory(router.handle)\n\n\n@router.route('/app/tv')\ndef tv():\n router.gui_dirlist([(tv_popular, 'Popular TV')],\n dirs=True)\n\n\n@router.route('/')\ndef root():\n xbmcplugin.addDirectoryItem(router.handle,\n router.build_url(movies.root),\n xbmcgui.ListItem('Movies'),\n True)\n xbmcplugin.addDirectoryItem(router.handle,\n router.build_url(authenticate_trakt),\n xbmcgui.ListItem('Auth Trakt'),\n False)\n xbmcplugin.endOfDirectory(router.handle)\n\n\n@router.route('/auth_trakt')\ndef authenticate_trakt():\n init = trakt.authenticate()\n dialog = xbmcgui.DialogProgress()\n dialog.create('FoxyMeta: Authorize Trakt',\n ('Enter the following code at:\\n'\n '{url}\\n\\n'\n '{code}').format(url=init['verification_url'],\n code=init['user_code']))\n expires = time.time() + init['expires_in']\n while True:\n time.sleep(init['interval'])\n try:\n token = trakt.authenticate(init['device_code'])\n except Exception:\n pct_timeout = (time.time() - expires) / init['expires_in'] * 100\n pct_timeout = 100 - int(abs(pct_timeout))\n if pct_timeout >= 100 or dialog.iscanceled():\n dialog.close()\n xbmcgui.Dialog().notification('FoxyMeta',\n 'Trakt Authorization Failed')\n return\n dialog.update(int(pct_timeout))\n else:\n dialog.close()\n save_trakt_auth(token)\n xbmcgui.Dialog().notification('FoxyMeta',\n 'Trakt Authorization Succeeded')\n return\n\n\n@router.route('/revoke_trakt')\ndef revoke_trakt():\n dialog = xbmcgui.Dialog()\n choice = dialog.yesno('FoxyMeta: Revoke Trakt',\n ('Are you sure you want to revoke authorization with'\n ' Trakt.tv?'))\n if not choice:\n return\n result = trakt.revoke(router.addon.getSettingString('trakt.access_token'))\n if result.status_code == 200:\n router.addon.setSettingString('trakt.access_token', '')\n router.addon.setSettingString('trakt.refresh_token', '')\n router.addon.setSettingString('trakt.username', '')\n router.addon.setSettingInt('trakt.expires', 0)\n xbmcgui.Dialog().notification('FoxyMeta', 'Trakt Authorization Revoked')\n\n\ndef save_trakt_auth(response):\n username = trakt.get('users/me', response['access_token'])['username']\n router.addon.setSettingString('trakt.access_token',\n response['access_token'])\n router.addon.setSettingString('trakt.refresh_token',\n response['refresh_token'])\n expires = response['created_at'] + response['expires_in']\n router.addon.setSettingInt('trakt.expires', expires)\n router.addon.setSettingString('trakt.username', username)\n\n\nif __name__ == '__main__':\n xbmcplugin.setContent(router.handle, 'movies')\n router.run()\n","sub_path":"plugin.video.foxymeta/addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144860915","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\n__name__ = 'simple_history'\n__author__ = 'Steven Klass'\n__version_info__ = (1, 3, 6)\n__version__ = '.'.join(map(str, __version_info__))\n__date__ = '2010/10/22 4:47:00 PM'\n__credits__ = ['Steven Klass', 'Marty Alchin', 'George Vilches', 'Corey Betram']\n__license__ = 'See the file LICENSE.txt for licensing information.'\n","sub_path":"simple_history/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"133770978","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 11 20:30:27 2018\n\n@author: pmann\n\"\"\"\nimport pandas as pd\nimport numpy as np\n\ndata = pd.read_csv('Kickstarter_2018-01.csv', error_bad_lines=False)\n\nselected_cols = ['backers_count',\n 'blurb',\n 'category.id',\n 'category.name',\n 'category.parent_id',\n 'category.slug',\n 'country',\n 'created_at',\n 'deadline',\n 'goal',\n 'launched_at',\n 'name',\n 'spotlight',\n 'staff_pick',\n 'state',\n 'usd_pledged',\n 'usd_type']\n \ndata = data[selected_cols]\n\n#because there are some \"bad lines\" a.k.a lines that are shifted to the right due to bad parsing\n#subset rows that are correctly parsed\n# correctly parsed rows should have a legit entry in the domestic col\n#domestic = data['usd_type'] == \"domestic\"\n#intl = data['usd_type'] == \"international\"\n#blank = data['usd_type'].isnull().any()\n#drop data with empty blurb or empty name entries\n#given more time, webscrapped missing entries\ndata = data.dropna() \n \n#select only data with known status\nsuccessful = data['state'] == \"successful\"\nfailed = data['state'] == \"failed\"\ncancelled = data['state'] == \"cancelled\"\nsuspended = data['state'] == \"suspended\"\n\ndata = data.loc[failed | successful | cancelled | suspended]\n\n#correct class of columns\ncategorical_cols = ['category.id',\n 'category.parent_id',\n 'country',\n 'spotlight',\n 'staff_pick',\n 'state',\n 'usd_type']\n\ndata[categorical_cols] = pd.Categorical\n\nnum_cols = ['usd_pledged',\n 'deadline',\n 'created_at',\n 'launched_at']\n\ndata[num_cols] = data[num_cols].apply(pd.to_numeric, errors='coerce')\n\n#because there are some \"bad lines\" a.k.a lines that are shifted to the right due to bad parsing\n#subset rows that are correctly parsed\n# correctly parsed rows should have a legit entry in the domestic col\n#domestic = data['usd_type'] == \"domestic\"\n#intl = data['usd_type'] == \"international\"\n#blank = data['usd_type'].isnull().any()\n#drop data with empty blurb or empty name entries\n#given more time, webscrapped missing entries\ndata = data.dropna()\n\ndata['created_at'] = pd.to_datetime(data['created_at'],unit='s')\ndata['launched_at'] = pd.to_datetime(data['launched_at'],unit='s')\ndata['deadline'] = pd.to_datetime(data['launched_at'],unit='s')\n\n#Check data\ndata.info()\ndata.describe()\ndata.columns[data.isnull().any()]\n","sub_path":"success_predict.py","file_name":"success_predict.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151147414","text":"import hashlib\nimport logging\nimport os\nimport os.path\nimport tempfile\nimport zipfile\nfrom functools import cached_property, lru_cache\nfrom typing import DefaultDict\n\nimport r2pipe\nfrom quark.android.axml import AxmlReader\nfrom quark.common.bytecode import Bytecode\nfrom quark.common.method import MethodId\n\n\nclass Apkinfo(object):\n \"\"\"\n Information about apk based on radare2 analysis.\n \"\"\"\n\n def __init__(self, apk_file, tmp_dir=None):\n \"\"\"\n Load an apk and extract all the contains to a temporary dictionary.\n\n :param apk_file: path lead to an apk\n :type apk_file: str\n :param tmp_dir: a path where a temporary dictionary will generate, defaults to system temp\n :type tmp_dir: str, optional\n \"\"\"\n self._filename = os.path.basename(apk_file)\n self._filepath = apk_file\n\n self._tmp_dir = tempfile.mkdtemp() if tmp_dir is None else tmp_dir\n\n # Extract file to tmp dir\n with zipfile.ZipFile(self._filepath) as apk:\n # Extract manifest\n apk.extract('AndroidManifest.xml', path=self._tmp_dir)\n # Extract all dex files\n self._dex_list = []\n for dex in filter(lambda f: f.startswith(\n 'classes') and f.endswith('.dex'), apk.namelist()):\n apk.extract(dex, path=self._tmp_dir)\n self._dex_list.append(os.path.join(self._tmp_dir, dex))\n\n @lru_cache()\n def _get_r2(self, index):\n r2 = r2pipe.open(self._dex_list[index])\n r2.cmd('aa')\n return r2\n\n def r2_escape(self, string: str) -> str:\n escapeList = ['>']\n\n result = ''\n for char in string:\n if char in escapeList:\n result = result + '\\\\'\n\n result = result + char\n\n return result\n\n @property\n def md5(self):\n md5 = hashlib.md5()\n with open(self.filepath, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n md5.update(chunk)\n return md5.hexdigest()\n\n @property\n def filename(self):\n return self._filename\n\n @property\n def filepath(self):\n return self._filepath\n\n @property\n def filesize(self):\n return os.path.getsize(self.filepath)\n\n @property\n def number_of_dex(self):\n return len(self._dex_list)\n\n @property\n def manifest(self):\n \"\"\"\n Return a path to extracted manifest file.\n \"\"\"\n return os.path.join(self._tmp_dir, 'AndroidManifest.xml')\n\n @property\n def dex_list(self):\n \"\"\"\n Return a list of paths to extracted dex files\n \"\"\"\n return self._dex_list\n\n @cached_property\n def permissions(self):\n \"\"\"\n Return a set of app permissions which was defined in manifest file.s\n \"\"\"\n axml = AxmlReader(self.manifest)\n permission_list = set()\n\n for tag in axml:\n label = tag.get('Name')\n if label and axml.get_string(label) == 'uses-permission':\n attrs = axml.get_attributes(tag)\n\n if attrs:\n permission = axml.get_string(attrs[0]['Value'])\n permission_list.add(permission)\n\n return permission_list\n\n def find_methods_by_addr(self, dex_index, address):\n \"\"\"\n Return a method object according to given address.\n\n :param address: a address\n :type address: number-like object\n :return: a method object or None\n :rtype: MethodId\n \"\"\"\n if address < 0:\n return None\n\n r2 = self._get_r2(dex_index)\n section = r2.cmdj(f'iSj. @ {address}')\n if section is None or (section.get('name') != 'constpool' and section.get('name') != 'code'):\n return None\n\n symbol = r2.cmdj(f'isj. @ {address}')\n if symbol['type'] != 'FUNC':\n return None\n\n signature = symbol['realname']\n classname = signature[:signature.index('.method.')] + ';'\n methodname = signature[signature.index(\n '.method.') + 8:signature.index('(')]\n descriptor = signature[signature.index('('):]\n\n return MethodId(symbol['vaddr'], dex_index, classname, methodname, descriptor, symbol['is_imported'])\n\n @lru_cache\n def get_all_methods_classified(self, dexindex):\n r2 = self._get_r2(dexindex)\n\n method_json_list = r2.cmdj('isj')\n method_dict = DefaultDict(list)\n for json_obj in method_json_list:\n if json_obj.get('type') != 'FUNC':\n continue\n\n full_name = json_obj['realname']\n classname, method_descriptor = full_name.split(\n '.method.', maxsplit=1)\n classname = classname + ';'\n\n methodname = method_descriptor[:method_descriptor.index('(')]\n descriptor = method_descriptor[method_descriptor.index('('):]\n\n is_imported = json_obj['is_imported']\n\n method = MethodId(\n json_obj['vaddr'], dexindex, classname, methodname, descriptor, is_imported)\n method_dict[classname].append(method)\n\n return method_dict\n\n def find_methods(self, classname, methodname='', descriptor='', dex_index=None):\n \"\"\"\n Find a list of methods matching given infomations.\n\n NOTE: finding method with only descriptor provided is not accurate currently.\n\n :param classname: Name of class which methods belong to , defaults to ''\n :type classname: str, optional\n :param methodname: Name of method for matching, defaults to ''\n :type methodname: str, optional\n :param descriptor: Descriptor of method for matching, defaults to ''\n :type descriptor: str, optional\n :param dex_index: Indicate where the given method is, defaults to None\n :type dex_index: non-negative number, optional\n :return: a list of methods\n :rtype: list of MethodId objects\n \"\"\"\n\n def method_filter(method: MethodId):\n return (not methodname or methodname == method.methodname) and (not descriptor or descriptor == method.descriptor)\n\n if dex_index:\n dex_list = [dex_index]\n else:\n dex_list = range(self.number_of_dex)\n\n for dex_index in dex_list:\n method_dict = self.get_all_methods_classified(dex_index)\n filted_methods = filter(method_filter, method_dict[classname])\n yield from filted_methods\n\n def find_upper_methods(self, method: MethodId):\n \"\"\"\n Return the corresponding xref methods from given method. \n\n :param method: a method object\n :type method: MethodId\n :yield: all xref methods\n :rtype: a generator of MethodId objects\n \"\"\"\n\n r2 = self._get_r2(method.dexindex)\n\n xrefs = r2.cmdj(f'axtj @ {method.address}')\n\n for xref in xrefs:\n if xref['type'] != 'CALL':\n continue\n\n if 'from' in xref:\n yield (xref['from'], self.find_methods_by_addr(method.dexindex, xref['from']))\n else:\n logging.debug(\n f'Key from was not found at searching upper methods for {method}.')\n\n def find_bytecode_by_addr(self, dex_index, offset):\n r2 = self._get_r2(dex_index)\n\n ins_json = r2.cmdj(f'pdj 1 @ {offset}')\n\n if ins_json and 'disasm' in ins_json[0]:\n ins = ins_json[0]\n return Bytecode.get_by_smali(ins['offset'], ins['disasm'])\n else:\n return None\n\n def get_function_bytecode(self, function: MethodId, start_offset=-1, end_offset=-1):\n \"\"\"\n Return the corresponding bytecode according to the address of function in the given MethodId object.\n\n :param function: a MethodId object\n :type function: MethodId\n :yield: all bytecode instructions\n :rtype: a generator of bytecodeobject in quark-engine\n \"\"\"\n\n if not function.is_import:\n\n r2 = self._get_r2(function.dexindex)\n\n instruct_flow = r2.cmdj(f'pdfj @ {function.address}')['ops']\n\n if instruct_flow:\n\n for ins in instruct_flow:\n if ins['offset'] < start_offset:\n continue\n if ins['offset'] >= end_offset >= 0:\n break\n\n yield Bytecode.get_by_smali(ins['offset'], ins['disasm'])\n\n def check_valid(self):\n pass\n\n def __del__(self):\n \"\"\"\n Clean up all the extracted files.\n \"\"\"\n try:\n for dirpath, _, files in os.walk(self._tmp_dir, False):\n for filename in files:\n os.remove(os.path.join(dirpath, filename))\n os.rmdir(self._tmp_dir)\n except:\n pass\n","sub_path":"quark/android/apk.py","file_name":"apk.py","file_ext":"py","file_size_in_byte":8847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493533249","text":"# Daniel Peek qer419\n# Michael Canas ohh135\n# CS3793 Assignment 04\n# 10/19/2018\n# neuralnet.py based on Dr. O'Hara's bp.c\n\nimport random\nimport neuralnet as nn\n\n\n# An individual card\nclass Card:\n def __init__(self, face):\n self.face = face\n if face.isdigit():\n self.value = int(face)\n elif face == \"J\" or face == \"Q\" or face == \"K\":\n self.value = 10\n elif face == \"A\":\n self.value = 1\n else:\n self.value = -1\n\n\n# Class for the dealer\nclass Dealer:\n def __init__(self):\n self.face_up_card = None\n self.hand = []\n self.total = 0\n self.stay = False\n self.bust = False\n\n # draw a card into the face down hand of the dealer\n def draw(self, deck):\n self.hand.append(deck.pop())\n self.total += self.hand[-1].value\n if self.total >= 17:\n self.stay = True\n if self.total > 21:\n self.bust = True\n\n # draw a card face up\n def draw_face_up(self, deck):\n self.face_up_card = deck.pop()\n self.total += self.face_up_card.value\n if self.total >= 17:\n self.stay = True\n\n # reset to initial values, used between games\n def clear(self):\n self.face_up_card = None\n self.hand.clear()\n self.total = 0\n self.stay = False\n self.bust = False\n\n\n# class for player\nclass Player:\n def __init__(self):\n self.hand = []\n self.total = 0\n self.stay = False\n self.bust = False\n\n # draw a card into hand\n def draw(self, deck):\n self.hand.append(deck.pop())\n self.total += self.hand[-1].value\n if self.total > 21:\n self.stay = True\n self.bust = True\n\n # reset to initial values, used between games\n def clear(self):\n self.hand.clear()\n self.total = 0\n self.stay = False\n self.bust = False\n\n\n# create deck as a list containing 4 of each card\ndef make_deck():\n deck = []\n for i in range(4):\n deck.append(Card(\"2\"))\n deck.append(Card(\"3\"))\n deck.append(Card(\"4\"))\n deck.append(Card(\"5\"))\n deck.append(Card(\"6\"))\n deck.append(Card(\"7\"))\n deck.append(Card(\"8\"))\n deck.append(Card(\"9\"))\n deck.append(Card(\"10\"))\n deck.append(Card(\"J\"))\n deck.append(Card(\"Q\"))\n deck.append(Card(\"K\"))\n deck.append(Card(\"A\"))\n return deck\n\n\n# shuffle the deck with 200 random swaps\ndef shuffle(deck):\n for i in range(200):\n x = random.randint(0, len(deck) - 1)\n y = random.randint(0, len(deck) - 1)\n deck[x], deck[y] = deck[y], deck[x]\n\n\n# trainer \"cheats\" by looking at the top card of the deck to determine ideal play\ndef desired_output(player, deck):\n if player.total + deck[-1].value > 21:\n # print(\"current total:\", player.total, \"next card:\", deck[-1].face)\n return 0\n else:\n # print(\"current total:\", player.total, \"next card:\", deck[-1].face)\n return 1\n\n# run a single game\ndef play(player, dealer, deck, net_2, net_3):\n global net2_total_right\n global net2_total_wrong\n global net3_total_right\n global net3_total_wrong\n\n # initial draws that happen every time\n player.draw(deck)\n dealer.draw_face_up(deck)\n player.draw(deck)\n dealer.draw(deck)\n\n # run the two-card version of the neural net\n # choose to stay or draw and adjust weights based on correctness\n sample = [player.hand[0].value, player.hand[1].value, dealer.face_up_card.value]\n guess = net_2.predict(sample)\n best_choice = desired_output(player, deck)\n net_2.adjust_weights(sample, best_choice)\n\n # record results\n if guess == best_choice:\n net2_total_right += 1\n else:\n net2_total_wrong += 1\n\n # set whether or not the net chose to stay\n if guess == 0:\n player.stay = True\n\n # draw the player's third card\n if player.stay is False:\n player.draw(deck)\n # dealer's third card\n if dealer.stay is False:\n dealer.draw(deck)\n\n # run the three card neural net\n if player.stay is False:\n sample = [player.hand[0].value, player.hand[1].value, player.hand[2].value, dealer.face_up_card.value]\n guess = net_3.predict(sample)\n best_choice = desired_output(player, deck)\n net_3.adjust_weights(sample, best_choice)\n\n # record results\n if guess == best_choice:\n net3_total_right += 1\n else:\n net3_total_wrong += 1\n\n if guess == 0:\n player.stay = True\n\n # draw the player's fourth card\n if player.stay is False:\n player.draw(deck)\n # dealer's fourth card\n if dealer.stay is False:\n dealer.draw(deck)\n\n # calculate whether or not to draw 5th card\n if player.stay is False:\n # total value and size of a complete deck\n decision_number = 340\n number_of_cards = 52\n\n # calculate average value of cards in deck\n for card in player.hand:\n decision_number -= card.value\n number_of_cards -= 1\n decision_number -= dealer.face_up_card.value\n number_of_cards -= 1\n decision_number = decision_number / number_of_cards\n\n # if average value is less than or equal to difference between player's total and 21, draw\n if decision_number <= (21 - player.total):\n player.draw(deck)\n # if total is 21 or under, automatic win by 5 card charlie\n if player.total <= 21:\n return 1\n else:\n return 0\n else:\n player.stay = True\n\n # dealer's fifth card\n if dealer.stay is False:\n dealer.draw(deck)\n\n if player.bust is True and dealer.bust is True:\n # print(\"both bust, dealer wins tie\")\n # print(\"player total:\", player.total, \"dealer total:\", dealer.total, \"\\n\")\n return 0\n elif player.bust is True and dealer.bust is not True:\n # print(\"player busts, dealer wins\")\n # print(\"player total:\", player.total, \"dealer total:\", dealer.total, \"\\n\")\n return 0\n elif player.bust is False and dealer.bust is True:\n # print(\"dealer busts, player wins\")\n # print(\"player total:\", player.total, \"dealer total:\", dealer.total, \"\\n\")\n return 1\n elif player.bust is False and dealer.bust is False:\n if player.total > dealer.total:\n # print(\"player wins\")\n # print(\"player total:\", player.total, \"dealer total:\", dealer.total, \"\\n\")\n return 1\n elif player.total < dealer.total:\n # print(\"dealer wins\")\n # print(\"player total:\", player.total, \"dealer total:\", dealer.total, \"\\n\")\n return 0\n else:\n # print(\"dealer wins tie\")\n # print(\"player total:\", player.total, \"dealer total:\", dealer.total, \"\\n\")\n return 0\n\n\ndef main():\n global wins\n global losses\n global net2_total_right\n global net2_total_wrong\n global net3_total_right\n global net3_total_wrong\n\n # set number of games and create neural nets\n games = 100000\n two_card_net = nn.NeuralNetwork(3, 12, 2, 0.01)\n three_card_net = nn.NeuralNetwork(4, 16, 2, 0.01)\n\n # set up players and deck\n random.seed()\n deck = make_deck()\n shuffle(deck)\n player = Player()\n dealer = Dealer()\n\n # run all games\n for i in range(games):\n # run a single game\n result = play(player, dealer, deck, two_card_net, three_card_net)\n if result == 0:\n losses += 1\n elif result == 1:\n wins += 1\n\n # reset for next game\n for card in player.hand:\n deck.append(card)\n for card in dealer.hand:\n deck.append(card)\n deck.append(dealer.face_up_card)\n player.clear()\n dealer.clear()\n shuffle(deck)\n\n # print game number and percent correct choices\n if i % 10000 == 0:\n print(\"finished game number\", i)\n if net2_total_wrong > 0 and net2_total_right > 0:\n two_card_percent = net2_total_right/(net2_total_right + net2_total_wrong)\n print(\"two card net right choice: %.02f%%\" % (two_card_percent * 100))\n print(\"two card net confidence: %.04f\" % two_card_net.confidence)\n if net3_total_wrong > 0 and net3_total_right > 0:\n three_card_percent = net3_total_right/(net3_total_right + net3_total_wrong)\n print(\"three card net right choice: %.02f%%\" % (three_card_percent * 100))\n print(\"three card net confidence: %.04f\\n\" % three_card_net.confidence)\n\n\nif __name__ == \"__main__\":\n wins = 0\n losses = 0\n net2_total_right = 0\n net2_total_wrong = 0\n net3_total_right = 0\n net3_total_wrong = 0\n main()\n # after all games, print total right guesses/wrong guesses and wins/losses\n print(\"Two card net right\", net2_total_right, \"wrong\", net2_total_wrong)\n print(\"Three card net right\", net3_total_right, \"wrong\", net3_total_wrong)\n print(\"wins\", wins, \"losses\", losses)\n\n","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":9133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"446280345","text":"#!/usr/bin/python3\n# Project:\n# Author: syx10\n# Time 2020/12/30:19:22\n\nimport numpy as np\nimport pyqtgraph as pg\nfrom PyQt5.QtWidgets import QWidget\n\n\nclass WaveGraph(QWidget):\n def __init__(self):\n super().__init__()\n pg.setConfigOptions(antialias=True)\n self.resize(600, 1000)\n self.pw = pg.PlotWidget(self)\n self.pw.resize(400, 1000)\n self.data = []\n self.curve = self.pw.plot(pen='y')\n self.curve.getViewBox().invertY(True)\n\n def handle_data(self, data):\n t = np.arange(len(data))\n self.curve.setData(data, t)\n","sub_path":"wavegraph.py","file_name":"wavegraph.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341481472","text":"\"\"\"\n一只青蛙想要过河。 假定河流被等分为 x 个单元格,并且在每一个单元格内都有可能放有一石子(也有可能没有)。 青蛙可以跳上石头,但是不可以跳入水中。\n\n给定石子的位置列表(用单元格序号升序表示), 请判定青蛙能否成功过河(即能否在最后一步跳至最后一个石子上)。 开始时, 青蛙默认已站在第一个石子上,并可以假定它第一步只能跳跃一个单位(即只能从单元格1跳至单元格2)。\n\n如果青蛙上一步跳跃了 k 个单位,那么它接下来的跳跃距离只能选择为 k - 1、k 或 k + 1个单位。 另请注意,青蛙只能向前方(终点的方向)跳跃。\n\n请注意:\n\n石子的数量 ≥ 2 且 < 1100;\n每一个石子的位置序号都是一个非负整数,且其 < 231;\n第一个石子的位置永远是0。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/frog-jump\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom collections import defaultdict\nclass Solution:\n def canCross(self, stones: List[int]) -> bool:\n # 可以使用暴力法,用递归的思想,每步都选择最多三种跳的方法\n # 递归树往下递归,如果所有的路线都不能跳到最后一个格子,那么\n # 就代表不能不能过河,只要有一个路径可以到达最后一个格子,就代表\n # 可以过河,但时间复杂度是指数级的==\n # 后续补充这种方法\n # 先采用动态规划来求解\n # 三个主要问题:\n # 1,重复子问题-》假设上一个状态能够跳到,\n # 2,状态定义\n # 3,DP方程\n # 利用哈希表来存储每个石头的状态\n dit = defaultdict(set)\n # base case\n for stone in stones:\n dit[stone]\n dit[stones[0]].add(0)\n for item in stones:\n for base_jump in dit[item]:\n for new_jump in [base_jump-1, base_jump, base_jump+1]:\n if new_jump < 1: # 这里改为必须大于等于1,防止出现对其本身的修改,防止出现set changed during iteration 错误\n continue\n if item + new_jump in dit:\n dit[item+new_jump].add(new_jump)\n return True if len(dit[stones[-1]]) > 0 else False","sub_path":"Week_06/frog-jump.py","file_name":"frog-jump.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"34039864","text":"import csv\nimport tensorflow as tf\nimport numpy as np\n\n\ndef read_csv(csv_path, features_only=False):\n with open(csv_path, newline='') as f:\n next(f) # skip the first line\n reader = csv.reader(f)\n features, labels = [], []\n for line in reader:\n feature, label = [float(i) for i in line[:-1]], int(line[-1])\n # yield feature, label\n features.append(feature)\n labels.append(label)\n\n if features_only:\n return features\n else:\n return features, labels\n\n\ndef get_dataset(mode, features_only=False):\n if mode == 'train':\n # return tf.data.Dataset.from_generator(\n # lambda: csv_yield(r\"../data/iris/iris_training.csv\"),\n # output_types=(tf.float32, tf.int32))\n return tf.data.Dataset.from_tensor_slices(read_csv(r\"./data/iris_training.csv\", features_only))\n\n elif mode == 'eval':\n # return tf.data.Dataset.from_generator(\n # lambda: read_csv(r\"../data/iris/iris_test.csv\"),\n # output_types=(tf.float32, tf.int32))\n return tf.data.Dataset.from_tensor_slices(read_csv(r\"./data/iris_test.csv\", features_only))\n else:\n # data = [[5.1, 5.9, 6.9], [3.3, 3.0, 3.1], [1.7, 4.2, 5.4], [0.5, 1.5, 2.1]]\n # data = np.array(data).T # transposition\n # expected_y = ['Setosa', 'Versicolor', 'Virginica'] # []\n\n data = [[6.4, 2.8, 5.6, 2.2], [5., 2.3, 3.3, 1.], [4.9, 2.5, 4.5, 1.7]]\n expected_y = [2, 1, 2]\n return tf.data.Dataset.from_tensor_slices(data), expected_y\n\n\nif __name__ == '__main__':\n # ds = tf.data.Dataset.from_generator(\n # lambda: csv_yield(r\"../data/iris/iris_training.csv\"),\n # output_types=(tf.float32, tf.int32)).batch(12)\n\n ds = tf.data.Dataset.from_tensor_slices(read_csv(r\"./data/iris_training.csv\"))\n print(ds.output_shapes)\n\n ds_iter = ds.make_one_shot_iterator()\n print(ds_iter.output_shapes)\n\n # each column is a sample\n data = [[5.1, 5.9, 6.9],\n [3.3, 3.0, 3.1],\n [1.7, 4.2, 5.4],\n [0.5, 1.5, 2.1]]\n data = np.array(data).T\n ds2_iter = tf.data.Dataset.from_tensor_slices(data).batch(12).make_one_shot_iterator()\n\n with tf.Session() as sess:\n print(sess.run(ds_iter.get_next()))\n print(sess.run(ds2_iter.get_next()))\n","sub_path":"examples/iris/data_helper.py","file_name":"data_helper.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"264547685","text":"# Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.\n# push(x) -- Push element x onto stack.\n# pop() -- Removes the element on top of the stack.\n# top() -- Get the top element.\n# getMin() -- Retrieve the minimum element in the stack.\n# Example:\n# MinStack minStack = new MinStack();\n# minStack.push(-2);\n# minStack.push(0);\n# minStack.push(-3);\n# minStack.getMin(); --> Returns -3.\n# minStack.pop();\n# minStack.top(); --> Returns 0.\n# minStack.getMin(); --> Returns -2.\n\n\nclass MinStack:\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.stack_data = []\n self.min_stack = []\n\n def push(self, x: int) -> None:\n if len(self.min_stack) == 0:\n self.min_stack.append(x)\n else:\n if x <= self.getMin():\n self.min_stack.append(x)\n self.stack_data.append(x)\n\n def pop(self) -> None:\n if self.min_stack[-1] == self.stack_data[-1]:\n self.min_stack.pop()\n self.stack_data.pop()\n\n def top(self) -> int:\n return self.stack_data[-1]\n\n def getMin(self) -> int:\n return self.min_stack[-1]\n\n\n# Your MinStack object will be instantiated and called as such:\nminStack = MinStack()\nminStack.push(-2)\nminStack.push(0)\nminStack.push(-3)\nprint(minStack.getMin())\nprint(minStack.pop())\nprint(minStack.top())\nprint(minStack.getMin())\n","sub_path":"Python/155.py","file_name":"155.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"51303014","text":"import datetime\nimport sys; sys.path.append('.')\nimport os; os.chdir('..')\nsys.path.append('.')\n\nimport json\nimport html\nfrom bson import json_util\nfrom datetime import date\nfrom pprint import pformat\n\nfrom flask import Flask, g, request\nfrom flask import Markup\nfrom flask import url_for\n\nimport flask_admin as admin\nfrom flask_admin.model import typefmt\n\nfrom flask_mongoengine import MongoEngine\nfrom flask_admin.form import rules\nfrom flask_admin.contrib.mongoengine import ModelView\n\nimport py.mongo as mongo\n# import mongo\nos.chdir('./flask_ui')\n\n\napp = Flask(__name__)\napp.config['MONGODB_DB'] = 'evo'\napp.secret_key = 'super secret key'\n\ndef pre_format(view, value):\n return Markup('
{}
'.format(html.escape(value)))\n\ndef json_format(view, value): \n # useful for mongoengine dict and list fields\n return pre_format(view, json.dumps(value, indent=2, default=json_util.default))\n\ndef date_format(view, value):\n return value.strftime('%d.%m.%Y %H:%M:%S')\n\ndef dict_formatter(view, context, model, name):\n return json.dumps(getattr(model, name), indent=4, sort_keys=True)\n\nMY_DEFAULT_FORMATTERS = dict(typefmt.BASE_FORMATTERS)\nMY_DEFAULT_FORMATTERS.update({\n date: date_format,\n dict: json_format\n})\n\ndef nice_json(old_json):\n parsed = json.loads(old_json)\n return json.dumps(parsed, indent=4, sort_keys=True)\n\n# Customized admin views\nclass PointView(ModelView):\n column_type_formatters = MY_DEFAULT_FORMATTERS\n column_list = ['step', 'experiment', 'test_mean', 'coordinates']\n column_filters = ['experiment', 'step']\n column_formatters = dict(\n test_mean=lambda v, c, m, p: round(m.test_mean, 2),\n # exp_conf=lambda v, c, m, p: Markup(pformat(m.experiment.configuration.values.to_dict()))\n )\n column_sortable_list = ['test_mean', 'step']\n can_view_details = True\n named_filter_urls = True\n can_delete = False\n can_create = False\n can_edit = False\n\n\nclass ExperimentView(ModelView):\n column_type_formatters = MY_DEFAULT_FORMATTERS\n column_list = ('code', 'id', 'name', 'status', 'points', 'space', 'finished_at')#, 'configuration')\n column_filters = ['name', 'status', 'code']\n column_editable_list = ['status', 'name']\n\n # {url_for(\"experiment.details_view\", id=m.id)}\n column_formatters = dict(\n # name=lambda v, c, m, p: Markup(f'{m.name}'),\n points=lambda v, c, m, p: Markup(f'{len(m.points)}'),\n # configuration=lambda v, c, m, p: Markup(f'{m.configuration.name}'),\n space=dict_formatter,\n # config_values=lambda v, c, m, p: nice_json(m.configuration.values.to_json())\n )\n can_view_details = True\n named_filter_urls = True\n column_display_pk = True\n\n\n # column_searchable_list = ('name')\n\n # form_ajax_refs = {\n # 'tags': {\n # 'fields': ('name',)\n # }\n # }\n\nclass ConfigView(ModelView):\n column_list = ('id', 'name', 'values')\n column_editable_list = ['name']\n column_filters = ['name']\n\n named_filter_urls = True\n column_display_pk = True\n can_view_details = True\n column_formatters = dict(\n # name=lambda v, c, m, p: Markup(f'{m.name}'),\n values=lambda v, c, m, p: nice_json(m.values.to_json())\n )\n \nclass ModelsView(ModelView):\n column_list = ('id', 'model_id', 'code', 'future_code', 'description', 'experiment', 'step_no', 'status')\n column_filters = ['model_id', 'code', 'experiment', 'status']\n column_editable_list = ['description', 'status', 'code', 'future_code']\n column_formatters = dict(\n experiment=lambda v, c, m, p: Markup(f'{m.experiment.name}')\n )\n can_view_details = True\n\n # configuration=lambda v, c, m, p: Markup(f'{m.configuration.name}'),\n\n@app.route('/')\ndef index():\n return 'Click me to get to Admin!'\n\n@app.before_request\ndef connect_to_db():\n print('Open connection')\n g.db_client = mongo.connect_to_mongo(db_name='evo')\n\n@app.teardown_appcontext\ndef teardown_db(error):\n if hasattr(g, 'db_client'):\n g.db_client.close()\n print(\"closing db connection\")\n\nif __name__ == \"__main__\":\n # Create admin\n admin = admin.Admin(app, 'Evotrade dashboard')\n\n # Add views\n admin.add_view(ExperimentView(mongo.Experiment))\n admin.add_view(PointView(mongo.Point))\n admin.add_view(ConfigView(mongo.Configuration))\n admin.add_view(ModelsView(mongo.ModelCollection))\n\n\n # Start app\n\n\n app.run(debug=True, port=5005)","sub_path":"flask_ui/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115253028","text":"\"\"\"\nCopyright ©2020. The Regents of the University of California (Regents). All Rights Reserved.\n\nPermission to use, copy, modify, and distribute this software and its documentation\nfor educational, research, and not-for-profit purposes, without fee and without a\nsigned licensing agreement, is hereby granted, provided that the above copyright\nnotice, this paragraph and the following two paragraphs appear in all copies,\nmodifications, and distributions.\n\nContact The Office of Technology Licensing, UC Berkeley, 2150 Shattuck Avenue,\nSuite 510, Berkeley, CA 94720-1620, (510) 643-7201, otl@berkeley.edu,\nhttp://ipira.berkeley.edu/industry-info for commercial licensing opportunities.\n\nIN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,\nINCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF\nTHE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED\nOF THE POSSIBILITY OF SUCH DAMAGE.\n\nREGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE\nSOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED\n\"AS IS\". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,\nENHANCEMENTS, OR MODIFICATIONS.\n\"\"\"\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport logging\nfrom smtplib import SMTP\n\nfrom diablo import skip_when_pytest\nfrom diablo.models.sent_email import SentEmail\nfrom flask import current_app as app\n\n\nclass BConnected:\n\n def __init__(self):\n self.bcop_smtp_password = app.config['BCOP_SMTP_PASSWORD']\n self.bcop_smtp_port = app.config['BCOP_SMTP_PORT']\n self.bcop_smtp_server = app.config['BCOP_SMTP_SERVER']\n self.bcop_smtp_username = app.config['BCOP_SMTP_USERNAME']\n\n def send(\n self,\n message,\n recipients,\n subject_line,\n term_id=None,\n section_id=None,\n template_type=None,\n ):\n @skip_when_pytest()\n def _send():\n # Connect to SMTP server\n smtp = SMTP(self.bcop_smtp_server, port=self.bcop_smtp_port)\n # TLS encryption\n smtp.starttls()\n smtp.set_debuglevel(app.logger.level == logging.DEBUG)\n smtp.login(self.bcop_smtp_username, self.bcop_smtp_password)\n\n emails_sent_to = set()\n\n for recipient in recipients:\n mock_message = _get_mock_message(\n recipient['name'],\n recipient['email'],\n subject_line,\n message,\n )\n if app.config['DIABLO_ENV'] == 'test':\n app.logger.info(mock_message)\n else:\n from_address = app.config['EMAIL_DIABLO_SUPPORT']\n to_address = self.get_email_address(user=recipient, subject_line=subject_line)\n\n msg = MIMEMultipart('alternative')\n msg['From'] = from_address\n msg['To'] = to_address\n msg['Bcc'] = app.config['EMAIL_DIABLO_ADMIN']\n\n if app.config['EMAIL_TEST_MODE']:\n # Append intended recipient email address to verify when testing.\n intended_email = recipient['email']\n msg['Subject'] = f'{subject_line} (To: {intended_email})'\n else:\n msg['Subject'] = subject_line\n\n # TODO: 'plain' text version of email?\n msg.attach(MIMEText(message, 'plain'))\n msg.attach(MIMEText(message, 'html'))\n # Send\n smtp.sendmail(from_addr=from_address, to_addrs=to_address, msg=msg.as_string())\n\n emails_sent_to.add(to_address)\n\n app.logger.info(f'{len(recipients)} \\'{template_type}\\' emails sent to {\", \".join(list(emails_sent_to))}')\n # Disconnect\n smtp.quit()\n\n # Send emails\n _send()\n\n SentEmail.create(\n recipient_uids=[recipient['uid'] for recipient in recipients],\n section_id=section_id,\n template_type=template_type,\n term_id=term_id or app.config['CURRENT_TERM_ID'],\n )\n\n def ping(self):\n with SMTP(self.bcop_smtp_server, port=self.bcop_smtp_port) as smtp:\n smtp.noop()\n return True\n\n @classmethod\n def get_email_address(cls, user, subject_line=None):\n user_email = user['email']\n if app.config['EMAIL_TEST_MODE']:\n app.logger.info(f'EMAIL_TEST_MODE intended email: {user_email}; subject_line: {subject_line}')\n return app.config['EMAIL_REDIRECT_WHEN_TESTING']\n else:\n return user_email\n\n\ndef _get_mock_message(recipient_name, email_address, subject_line, message):\n return f\"\"\"\n\n To: {recipient_name} <{email_address}>\n Cc: Course Capture Admin <{app.config['EMAIL_DIABLO_ADMIN']}>\n From: {app.config['EMAIL_DIABLO_SUPPORT']}\n Subject: {subject_line}\n\n Message:\n {message}\n\n \"\"\"\n","sub_path":"diablo/externals/b_connected.py","file_name":"b_connected.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}